filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitJobExecutor.java
/* * Copyright 2020 Armory, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; import static java.nio.charset.StandardCharsets.UTF_8; import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; import com.netflix.spinnaker.clouddriver.jobs.JobRequest; import com.netflix.spinnaker.clouddriver.jobs.JobResult; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.net.URLEncoder; import java.nio.charset.Charset; import java.nio.file.Path; import java.nio.file.Paths; import java.util.*; import java.util.regex.Pattern; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.FileUtils; import org.jetbrains.annotations.NotNull; import org.springframework.util.StringUtils; @Slf4j public class GitJobExecutor { private static final String SSH_KEY_PWD_ENV_VAR = "SSH_KEY_PWD"; private static final Pattern FULL_SHA_PATTERN = Pattern.compile("[0-9a-f]{40}"); private static final Pattern SHORT_SHA_PATTERN = Pattern.compile("[0-9a-f]{7}"); private static Path genericAskPassBinary; @Getter private final GitRepoArtifactAccount account; private final JobExecutor jobExecutor; private final String gitExecutable; private final AuthType authType; private final Path askPassBinary; private enum AuthType { USER_PASS, TOKEN, SSH, NONE } public GitJobExecutor( GitRepoArtifactAccount account, JobExecutor jobExecutor, String gitExecutable) throws IOException { this.account = account; this.jobExecutor = jobExecutor; this.gitExecutable = gitExecutable; if (!StringUtils.isEmpty(account.getUsername()) && !StringUtils.isEmpty(account.getPassword())) { authType = AuthType.USER_PASS; } else if (account.getTokenAsString().isPresent() && !StringUtils.isEmpty(account.getTokenAsString())) { authType = AuthType.TOKEN; } else if (!StringUtils.isEmpty(account.getSshPrivateKeyFilePath())) { authType = AuthType.SSH; } else { authType = AuthType.NONE; } askPassBinary = initAskPass(); } public void cloneOrPull(String repoUrl, String branch, Path localPath, String repoBasename) throws IOException { File localPathFile = localPath.toFile(); if (!localPathFile.exists()) { clone(repoUrl, branch, localPath, repoBasename); return; } // localPath exists if (!localPathFile.isDirectory()) { throw new IllegalArgumentException( "Local path " + localPath.toString() + " is not a directory"); } // localPath exists and is a directory File[] localPathFiles = localPathFile.listFiles(); if (localPathFiles == null || localPathFiles.length == 0) { clone(repoUrl, branch, localPath, repoBasename); return; } // localPath exists, is a directory and has files in it Path dotGitPath = Paths.get(localPath.toString(), repoBasename, ".git"); if (!dotGitPath.toFile().exists()) { log.warn( "Directory {} for git/repo {}, branch {} has files or directories but {} was not found. The directory will be recreated to start with a new clone.", localPath.toString(), repoUrl, branch, dotGitPath.toString()); clone(repoUrl, branch, localPath, repoBasename); return; } // localPath has "<repo>/.git" directory pull(repoUrl, branch, dotGitPath.getParent()); } private void clone(String repoUrl, String branch, Path destination, String repoBasename) throws IOException { if (!isValidReference(repoUrl)) { throw new IllegalArgumentException( "Git reference \"" + repoUrl + "\" is invalid for credentials with auth type " + authType); } File destinationFile = destination.toFile(); if (destinationFile.exists()) { FileUtils.deleteDirectory(destinationFile); } FileUtils.forceMkdir(destinationFile); if (FULL_SHA_PATTERN.matcher(branch).matches()) { fetchFullSha(repoUrl, branch, destination, repoBasename); } else { cloneBranchOrTag(repoUrl, branch, destination, repoBasename); } } private void cloneBranchOrTag( String repoUrl, String branch, Path destination, String repoBasename) throws IOException { log.info("Cloning git/repo {} into {}", repoUrl, destination.toString()); String command = gitExecutable + " clone --branch " + branch + " --depth 1 " + repoUrlWithAuth(repoUrl); JobResult<String> result = new CommandChain(destination).addCommand(command).runAll(); if (result.getResult() == JobResult.Result.SUCCESS) { return; } String errorMsg = command + " failed. Error: " + result.getError() + " Output: " + result.getOutput(); if (!SHORT_SHA_PATTERN.matcher(branch).matches()) { throw new IOException(errorMsg); } log.warn(errorMsg + ". Trying a full clone and checkout " + branch); File destFile = destination.toFile(); FileUtils.deleteDirectory(destFile); FileUtils.forceMkdir(destFile); cloneAndCheckoutSha(repoUrl, branch, destination, repoBasename); } private void fetchFullSha(String repoUrl, String sha, Path destination, String repoBasename) throws IOException { log.info("Fetching git/repo {} sha {} into {}", repoUrl, sha, destination.toString()); Path repoPath = Paths.get(destination.toString(), repoBasename); if (!repoPath.toFile().mkdirs()) { throw new IOException("Unable to create directory " + repoPath.toString()); } JobResult<String> result = new CommandChain(repoPath) .addCommand(gitExecutable + " init") .addCommand(gitExecutable + " remote add origin " + repoUrlWithAuth(repoUrl)) .addCommand(gitExecutable + " fetch --depth 1 origin " + sha) .addCommand(gitExecutable + " reset --hard FETCH_HEAD") .runAll(); if (result.getResult() == JobResult.Result.SUCCESS) { return; } // Some git servers don't allow to directly fetch specific commits // (error: Server does not allow request for unadvertised object), // fallback to full clone and checkout SHA log.warn( "Unable to directly fetch specific sha, trying full clone. Error: " + result.getError()); FileUtils.forceDelete(repoPath.toFile()); cloneAndCheckoutSha(repoUrl, sha, destination, repoBasename); } private void cloneAndCheckoutSha( String repoUrl, String sha, Path destination, String repoBasename) throws IOException { Path repoPath = Paths.get(destination.toString(), repoBasename); new CommandChain(destination) .addCommand(gitExecutable + " clone " + repoUrlWithAuth(repoUrl)) .runAllOrFail(); new CommandChain(repoPath).addCommand(gitExecutable + " checkout " + sha).runAllOrFail(); } private void pull(String repoUrl, String branch, Path localPath) throws IOException { if (FULL_SHA_PATTERN.matcher(branch).matches()) { log.info( "Contents of git/repo {} for sha {} already downloaded, no \"git pull\" needed.", repoUrl, branch); return; } JobResult<String> result = new CommandChain(localPath).addCommand(gitExecutable + " symbolic-ref HEAD").runAll(); if (result.getResult() != JobResult.Result.SUCCESS) { // detached HEAD state happens when "branch" is actually a short commit SHA log.info( "git/repo {} is in detached HEAD state for version {}, skipping \"git pull\"", repoUrl, branch); return; } log.info("Pulling git/repo {} into {}", repoUrl, localPath.toString()); new CommandChain(localPath).addCommand(gitExecutable + " pull").runAllOrFail(); if (!localPath.getParent().toFile().setLastModified(System.currentTimeMillis())) { log.warn("Unable to set last modified time on {}", localPath.getParent().toString()); } } public void archive(Path localClone, String branch, String subDir, Path outputFile) throws IOException { String cmd = gitExecutable + " archive --format tgz --output " + outputFile.toString() + " " + branch; if (!StringUtils.isEmpty(subDir)) { cmd += " " + subDir; } new CommandChain(localClone).addCommand(cmd).runAllOrFail(); } /** * For SSH authentication if the private key is password protected, SSH_ASKPASS binary is used to * supply the password. https://git-scm.com/docs/gitcredentials#_requesting_credentials */ private Path initAskPass() throws IOException { if (authType != AuthType.SSH) { return null; } if (!StringUtils.isEmpty(account.getSshPrivateKeyPassphraseCmd())) { File pwdCmd = new File(account.getSshPrivateKeyPassphraseCmd()); if (!pwdCmd.exists() || !pwdCmd.isFile()) { throw new IOException( "SshPrivateKeyPassphraseCmd doesn't exist or is not a file: " + account.getSshPrivateKeyPassphraseCmd()); } return Paths.get(account.getSshPrivateKeyPassphraseCmd()); } if (genericAskPassBinary == null) { File askpass = File.createTempFile("askpass", null); if (!askpass.setExecutable(true)) { throw new IOException( "Unable to make executable askpass script at " + askpass.toPath().toString()); } // Default way for supplying the password of a private ssh key is to echo an env var with the // password. // This env var is set at runtime when executing git commands that need it. FileUtils.writeStringToFile( askpass, "#!/bin/sh\n" + "echo \"$" + SSH_KEY_PWD_ENV_VAR + "\"", Charset.defaultCharset()); genericAskPassBinary = askpass.toPath(); } return genericAskPassBinary; } private boolean isValidReference(String reference) { if (authType == AuthType.USER_PASS || authType == AuthType.TOKEN) { return reference.startsWith("http"); } if (authType == AuthType.SSH) { return reference.startsWith("ssh://") || reference.startsWith("git@"); } return true; } private List<String> cmdToList(String cmd) { List<String> cmdList = new ArrayList<>(); switch (authType) { case USER_PASS: case TOKEN: // "sh" subshell is used so that environment variables can be used as part of the command cmdList.add("sh"); cmdList.add("-c"); cmdList.add(cmd); break; case SSH: default: cmdList.addAll(Arrays.asList(cmd.split(" "))); break; } return cmdList; } private String repoUrlWithAuth(String repoUrl) { if (authType != AuthType.USER_PASS && authType != AuthType.TOKEN) { return repoUrl; } String authPart; if (authType == AuthType.USER_PASS) { authPart = "$GIT_USER:$GIT_PASS"; } else { authPart = "token:$GIT_TOKEN"; } try { URI uri = new URI(repoUrl); return String.format( "%s://%s@%s%s%s", uri.getScheme(), authPart, uri.getHost(), (uri.getPort() > 0 ? ":" + uri.getPort() : ""), uri.getRawPath()); } catch (URISyntaxException e) { throw new IllegalArgumentException("Malformed git repo url " + repoUrl, e); } } private Map<String, String> addEnvVars(Map<String, String> env) { Map<String, String> result = new HashMap<>(env); switch (authType) { case USER_PASS: result.put("GIT_USER", encodeURIComponent(account.getUsername())); result.put("GIT_PASS", encodeURIComponent(account.getPassword())); break; case TOKEN: result.put( "GIT_TOKEN", encodeURIComponent( account .getTokenAsString() .orElseThrow( () -> new IllegalArgumentException( "Token or TokenFile must be present if using token auth.")))); break; case SSH: result.put("GIT_SSH_COMMAND", buildSshCommand()); result.put("SSH_ASKPASS", askPassBinary.toString()); result.put("DISPLAY", ":0"); if (!StringUtils.isEmpty(account.getSshPrivateKeyPassphrase())) { result.put(SSH_KEY_PWD_ENV_VAR, account.getSshPrivateKeyPassphrase()); } break; } if (log.isDebugEnabled()) { result.put("GIT_CURL_VERBOSE", "1"); result.put("GIT_TRACE", "1"); } return result; } @NotNull private String buildSshCommand() { String gitSshCmd = "setsid ssh"; if (account.isSshTrustUnknownHosts()) { gitSshCmd += " -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; } else if (!StringUtils.isEmpty(account.getSshKnownHostsFilePath())) { gitSshCmd += " -o UserKnownHostsFile=" + account.getSshKnownHostsFilePath(); } if (!StringUtils.isEmpty(account.getSshPrivateKeyFilePath())) { gitSshCmd += " -i " + account.getSshPrivateKeyFilePath(); } return gitSshCmd; } private static String encodeURIComponent(String s) { if (StringUtils.isEmpty(s)) { return s; } String result; result = URLEncoder.encode(s, UTF_8) .replaceAll("\\+", "%20") .replaceAll("\\*", "%2A") .replaceAll("%21", "!") .replaceAll("%27", "'") .replaceAll("%28", "(") .replaceAll("%29", ")") .replaceAll("%7E", "~"); return result; } private class CommandChain { private final Collection<JobRequest> commands = new ArrayList<>(); private final Path workingDir; CommandChain(Path workingDir) { this.workingDir = workingDir; } CommandChain addCommand(String command) { commands.add( new JobRequest( cmdToList(command), addEnvVars(System.getenv()), this.workingDir.toFile())); return this; } void runAllOrFail() throws IOException { for (JobRequest command : commands) { log.debug("Executing command: \"{}\"", String.join(" ", command.getTokenizedCommand())); JobResult<String> result = jobExecutor.runJob(command); if (result.getResult() != JobResult.Result.SUCCESS) { throw new IOException( String.format( "%s failed. Error: %s Output: %s", command.getTokenizedCommand(), result.getError(), result.getOutput())); } } } JobResult<String> runAll() { JobResult<String> result = null; for (JobRequest command : commands) { log.debug("Executing command: \"{}\"", String.join(" ", command.getTokenizedCommand())); result = jobExecutor.runJob(command); if (result.getResult() != JobResult.Result.SUCCESS) { break; } } return result; } } }
[]
[]
[]
[]
[]
java
0
0
molecule/default/tests/test_default.py
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_hosts_file(host): f = host.file('/etc/hosts') assert f.exists assert f.user == 'root' assert f.group == 'root' def test_prometheus_group(host): prometheus_group = host.group('prometheus') assert prometheus_group.exists assert prometheus_group.gid == 9999 def test_prometheus_user(host): prometheus_user = host.user('prometheus') assert prometheus_user.exists def test_etc_prometheus(host): etcdir = host.file("/etc/prometheus") assert etcdir.exists assert etcdir.is_directory assert etcdir.user == 'prometheus' def test_varlib_prometheus(host): varlib = host.file("/var/lib/prometheus") assert varlib.exists assert varlib.is_directory assert varlib.user == 'prometheus' def test_file_download(host): tmpfile = host.file("/tmp/prometheus-2.6.0.linux-amd64.tar.gz") assert tmpfile.exists assert tmpfile.is_file def test_binaries_exist(host): promfile = host.file("/usr/local/bin/prometheus") promtool = host.file("/usr/local/bin/promtool") assert promfile.exists assert promfile.is_file assert promtool.exists assert promtool.is_file def test_prometheus_config_file(host): prmcfg = host.file("/etc/prometheus/prometheus.yml") assert prmcfg.exists assert prmcfg.is_file assert prmcfg.user == 'prometheus' def test_systemd_unit(host): systemdfile = host.file("/etc/systemd/system/prometheus.service") assert systemdfile.exists assert systemdfile.is_file assert systemdfile.user == 'root'
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0
src/core/main.go
// Copyright 2018 Project Harbor Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "encoding/gob" "fmt" "net/url" "os" "os/signal" "strconv" "strings" "syscall" "time" configCtl "github.com/goharbor/harbor/src/controller/config" "github.com/astaxie/beego" _ "github.com/astaxie/beego/session/redis" _ "github.com/astaxie/beego/session/redis_sentinel" "github.com/goharbor/harbor/src/common/dao" common_http "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/models" _ "github.com/goharbor/harbor/src/controller/event/handler" "github.com/goharbor/harbor/src/controller/health" "github.com/goharbor/harbor/src/controller/registry" ctluser "github.com/goharbor/harbor/src/controller/user" "github.com/goharbor/harbor/src/core/api" _ "github.com/goharbor/harbor/src/core/auth/authproxy" _ "github.com/goharbor/harbor/src/core/auth/db" _ "github.com/goharbor/harbor/src/core/auth/ldap" _ "github.com/goharbor/harbor/src/core/auth/oidc" _ "github.com/goharbor/harbor/src/core/auth/uaa" "github.com/goharbor/harbor/src/core/middlewares" "github.com/goharbor/harbor/src/core/service/token" "github.com/goharbor/harbor/src/lib/cache" _ "github.com/goharbor/harbor/src/lib/cache/memory" // memory cache _ "github.com/goharbor/harbor/src/lib/cache/redis" // redis cache "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/metric" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/migration" "github.com/goharbor/harbor/src/pkg/notification" _ "github.com/goharbor/harbor/src/pkg/notifier/topic" "github.com/goharbor/harbor/src/pkg/scan" "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" "github.com/goharbor/harbor/src/pkg/version" "github.com/goharbor/harbor/src/server" ) const ( adminUserID = 1 ) func updateInitPassword(ctx context.Context, userID int, password string) error { queryUser := models.User{UserID: userID} user, err := dao.GetUser(queryUser) if err != nil { return fmt.Errorf("Failed to get user, userID: %d %v", userID, err) } if user == nil { return fmt.Errorf("user id: %d does not exist", userID) } if user.Salt == "" { err = ctluser.Ctl.UpdatePassword(ctx, userID, password) if err != nil { return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err) } log.Infof("User id: %d updated its encrypted password successfully.", userID) } else { log.Infof("User id: %d already has its encrypted password.", userID) } return nil } func gracefulShutdown(closing, done chan struct{}) { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) log.Infof("capture system signal %s, to close \"closing\" channel", <-signals) close(closing) select { case <-done: log.Infof("Goroutines exited normally") case <-time.After(time.Second * 3): log.Infof("Timeout waiting goroutines to exit") } os.Exit(0) } func main() { beego.BConfig.WebConfig.Session.SessionOn = true beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName redisURL := os.Getenv("_REDIS_URL_CORE") if len(redisURL) > 0 { u, err := url.Parse(redisURL) if err != nil { panic("bad _REDIS_URL:" + redisURL) } gob.Register(models.User{}) if u.Scheme == "redis+sentinel" { ps := strings.Split(u.Path, "/") if len(ps) < 2 { panic("bad redis sentinel url: no master name") } ss := make([]string, 5) ss[0] = strings.Join(strings.Split(u.Host, ","), ";") // host ss[1] = "100" // pool if u.User != nil { password, isSet := u.User.Password() if isSet { ss[2] = password } } if len(ps) > 2 { db, err := strconv.Atoi(ps[2]) if err != nil { panic("bad redis sentinel url: bad db") } if db != 0 { ss[3] = ps[2] } } ss[4] = ps[1] // monitor name beego.BConfig.WebConfig.Session.SessionProvider = "redis_sentinel" beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",") } else { ss := make([]string, 5) ss[0] = u.Host // host ss[1] = "100" // pool if u.User != nil { password, isSet := u.User.Password() if isSet { ss[2] = password } } if len(u.Path) > 1 { if _, err := strconv.Atoi(u.Path[1:]); err != nil { panic("bad redis url: bad db") } ss[3] = u.Path[1:] } ss[4] = u.Query().Get("idle_timeout_seconds") beego.BConfig.WebConfig.Session.SessionProvider = "redis" beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",") } log.Info("initializing cache ...") if err := cache.Initialize(u.Scheme, redisURL); err != nil { log.Fatalf("failed to initialize cache: %v", err) } } beego.AddTemplateExt("htm") log.Info("initializing configurations...") config.Init() log.Info("configurations initialization completed") metricCfg := config.Metric() if metricCfg.Enabled { metric.RegisterCollectors() go metric.ServeProm(metricCfg.Path, metricCfg.Port) } token.InitCreators() database, err := config.Database() if err != nil { log.Fatalf("failed to get database configuration: %v", err) } if err := dao.InitDatabase(database); err != nil { log.Fatalf("failed to initialize database: %v", err) } if err = migration.Migrate(database); err != nil { log.Fatalf("failed to migrate: %v", err) } ctx := orm.Context() if err := config.Load(ctx); err != nil { log.Fatalf("failed to load config: %v", err) } if err := configCtl.Ctl.OverwriteConfig(ctx); err != nil { log.Fatalf("failed to init config from CONFIG_OVERWRITE_JSON, error %v", err) } password, err := config.InitialAdminPassword() if err != nil { log.Fatalf("failed to get admin's initial password: %v", err) } if err := updateInitPassword(ctx, adminUserID, password); err != nil { log.Error(err) } // Init API handler if err := api.Init(); err != nil { log.Fatalf("Failed to initialize API handlers with error: %s", err.Error()) } health.RegisterHealthCheckers() registerScanners(orm.Context()) closing := make(chan struct{}) done := make(chan struct{}) go gracefulShutdown(closing, done) // Start health checker for registries go registry.Ctl.StartRegularHealthCheck(orm.Context(), closing, done) log.Info("initializing notification...") notification.Init() server.RegisterRoutes() if common_http.InternalTLSEnabled() { log.Info("internal TLS enabled, Init TLS ...") iTLSKeyPath := os.Getenv("INTERNAL_TLS_KEY_PATH") iTLSCertPath := os.Getenv("INTERNAL_TLS_CERT_PATH") log.Infof("load client key: %s client cert: %s", iTLSKeyPath, iTLSCertPath) beego.BConfig.Listen.EnableHTTP = false beego.BConfig.Listen.EnableHTTPS = true beego.BConfig.Listen.HTTPSPort = 8443 beego.BConfig.Listen.HTTPSKeyFile = iTLSKeyPath beego.BConfig.Listen.HTTPSCertFile = iTLSCertPath beego.BeeApp.Server.TLSConfig = common_http.NewServerTLSConfig() } log.Infof("Version: %s, Git commit: %s", version.ReleaseVersion, version.GitCommit) beego.RunWithMiddleWares("", middlewares.MiddleWares()...) } const ( trivyScanner = "Trivy" ) func registerScanners(ctx context.Context) { wantedScanners := make([]scanner.Registration, 0) uninstallScannerNames := make([]string, 0) if config.WithTrivy() { log.Info("Registering Trivy scanner") wantedScanners = append(wantedScanners, scanner.Registration{ Name: trivyScanner, Description: "The Trivy scanner adapter", URL: config.TrivyAdapterURL(), UseInternalAddr: true, Immutable: true, }) } else { log.Info("Removing Trivy scanner") uninstallScannerNames = append(uninstallScannerNames, trivyScanner) } if err := scan.RemoveImmutableScanners(ctx, uninstallScannerNames); err != nil { log.Warningf("failed to remove scanners: %v", err) } if err := scan.EnsureScanners(ctx, wantedScanners); err != nil { log.Fatalf("failed to register scanners: %v", err) } if defaultScannerName := getDefaultScannerName(); defaultScannerName != "" { log.Infof("Setting %s as default scanner", defaultScannerName) if err := scan.EnsureDefaultScanner(ctx, defaultScannerName); err != nil { log.Fatalf("failed to set default scanner: %v", err) } } } func getDefaultScannerName() string { if config.WithTrivy() { return trivyScanner } return "" }
[ "\"_REDIS_URL_CORE\"", "\"INTERNAL_TLS_KEY_PATH\"", "\"INTERNAL_TLS_CERT_PATH\"" ]
[]
[ "INTERNAL_TLS_CERT_PATH", "INTERNAL_TLS_KEY_PATH", "_REDIS_URL_CORE" ]
[]
["INTERNAL_TLS_CERT_PATH", "INTERNAL_TLS_KEY_PATH", "_REDIS_URL_CORE"]
go
3
0
provider/aws/aws.go
package aws import ( "context" "fmt" "io" "math/rand" "net/url" "os" "path" "sort" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecr" "github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/simpledb" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sts" "github.com/convox/praxis/cache" "github.com/convox/praxis/helpers" "github.com/convox/praxis/manifest" "github.com/convox/praxis/types" "github.com/fsouza/go-dockerclient" "github.com/pkg/errors" ) const () type Provider struct { Config *aws.Config Context context.Context Development bool Name string Password string Region string Session *session.Session Version string } func init() { rand.Seed(time.Now().UTC().UnixNano()) } func FromEnv() (*Provider, error) { session, err := session.NewSession() if err != nil { return nil, err } region := os.Getenv("AWS_REGION") p := &Provider{ Config: &aws.Config{Region: aws.String(region)}, Context: context.Background(), Development: os.Getenv("DEVELOPMENT") == "true", Name: os.Getenv("NAME"), Password: os.Getenv("PASSWORD"), Region: region, Session: session, Version: os.Getenv("VERSION"), } if p.Version == "" { if v, err := p.rackOutput("Version"); err == nil && v != "" { p.Version = v } } return p, nil } func (p *Provider) AutoScaling() *autoscaling.AutoScaling { return autoscaling.New(p.Session, p.Config) } func (p *Provider) CloudFormation() *cloudformation.CloudFormation { return cloudformation.New(p.Session, p.Config) } func (p *Provider) CloudWatch() *cloudwatch.CloudWatch { return cloudwatch.New(p.Session, p.Config) } func (p *Provider) CloudWatchLogs() *cloudwatchlogs.CloudWatchLogs { return cloudwatchlogs.New(p.Session, p.Config) } func (p *Provider) Docker(host string) (*docker.Client, error) { return docker.NewClient(host) } func (p *Provider) ECR() *ecr.ECR { return ecr.New(p.Session, p.Config) } func (p *Provider) ECS() *ecs.ECS { return ecs.New(p.Session, p.Config) } func (p *Provider) EC2() *ec2.EC2 { return ec2.New(p.Session, p.Config) } func (p *Provider) KMS() *kms.KMS { return kms.New(p.Session, p.Config) } func (p *Provider) IAM() *iam.IAM { return iam.New(p.Session, p.Config) } func (p *Provider) S3() *s3.S3 { return s3.New(p.Session, p.Config) } func (p *Provider) SimpleDB() *simpledb.SimpleDB { return simpledb.New(p.Session, p.Config) } func (p *Provider) SQS() *sqs.SQS { return sqs.New(p.Session, p.Config) } func (p *Provider) STS() *sts.STS { return sts.New(p.Session, p.Config) } func awsError(err error) string { if ae, ok := err.(awserr.Error); ok { return ae.Code() } return "" } func (p *Provider) accountID() (string, error) { if v, ok := cache.Get("accountID", "").(string); ok { return v, nil } res, err := p.STS().GetCallerIdentity(&sts.GetCallerIdentityInput{}) if err != nil { return "", err } a := *res.Account if err := cache.Set("accountID", "", a, 24*time.Hour); err != nil { return "", err } return *res.Account, nil } func (p *Provider) appOutput(app string, resource string) (string, error) { return p.stackOutput(fmt.Sprintf("%s-%s", p.Name, app), resource) } func (p *Provider) appResource(app string, resource string) (string, error) { return p.stackResource(fmt.Sprintf("%s-%s", p.Name, app), resource) } func humanStatus(status string) string { switch status { case "CREATE_COMPLETE": return "running" case "CREATE_IN_PROGRESS": return "creating" case "DELETE_IN_PROGRESS": return "deleting" case "DELETE_FAILED": return "error" case "ROLLBACK_COMPLETE": return "running" case "ROLLBACK_IN_PROGRESS": return "rollback" case "UPDATE_COMPLETE": return "running" case "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS": return "updating" case "UPDATE_IN_PROGRESS": return "updating" case "UPDATE_ROLLBACK_COMPLETE": return "running" case "UPDATE_ROLLBACK_IN_PROGRESS": return "rollback" case "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS": return "rollback" default: return status } } func (p *Provider) clusterServices() ([]*ecs.Service, error) { cluster, err := p.rackResource("RackCluster") if err != nil { return nil, err } req := &ecs.ListServicesInput{ Cluster: aws.String(cluster), } ss := []*ecs.Service{} for { res, err := p.ECS().ListServices(req) if err != nil { return nil, err } sres, err := p.ECS().DescribeServices(&ecs.DescribeServicesInput{ Cluster: aws.String(cluster), Services: res.ServiceArns, }) if err != nil { return nil, err } ss = append(ss, sres.Services...) if res.NextToken == nil { break } req.NextToken = res.NextToken } return ss, nil } func (p *Provider) containerInstance(arn string) (*ecs.ContainerInstance, error) { cluster, err := p.rackResource("RackCluster") if err != nil { return nil, err } res, err := p.ECS().DescribeContainerInstances(&ecs.DescribeContainerInstancesInput{ Cluster: aws.String(cluster), ContainerInstances: []*string{aws.String(arn)}, }) if err != nil { return nil, err } if len(res.ContainerInstances) < 1 { return nil, fmt.Errorf("could not find container instance: %s", arn) } return res.ContainerInstances[0], nil } func (p *Provider) containerInstances() ([]*ecs.ContainerInstance, error) { cluster, err := p.rackResource("RackCluster") if err != nil { return nil, err } ii := []*ecs.ContainerInstance{} req := &ecs.ListContainerInstancesInput{ Cluster: aws.String(cluster), } for { res, err := p.ECS().ListContainerInstances(req) if err != nil { return nil, err } ires, err := p.ECS().DescribeContainerInstances(&ecs.DescribeContainerInstancesInput{ Cluster: aws.String(cluster), ContainerInstances: res.ContainerInstanceArns, }) if err != nil { return nil, err } ii = append(ii, ires.ContainerInstances...) if res.NextToken == nil { break } req.NextToken = res.NextToken } return ii, nil } func (p *Provider) deleteBucket(bucket string) error { req := &s3.ListObjectsInput{ Bucket: aws.String(bucket), } err := p.S3().ListObjectsPages(req, func(res *s3.ListObjectsOutput, last bool) bool { objects := make([]*s3.ObjectIdentifier, len(res.Contents)) for i, o := range res.Contents { objects[i] = &s3.ObjectIdentifier{Key: o.Key} } if len(objects) == 0 { return false } p.S3().DeleteObjects(&s3.DeleteObjectsInput{ Bucket: aws.String(bucket), Delete: &s3.Delete{Objects: objects}, }) return true }) if err != nil { return err } _, err = p.S3().DeleteBucket(&s3.DeleteBucketInput{ Bucket: aws.String(bucket), }) if err != nil { return err } return nil } func (p *Provider) describeStack(name string) (*cloudformation.Stack, error) { if v, ok := cache.Get("describeStack", name).(*cloudformation.Stack); ok { return v, nil } res, err := p.CloudFormation().DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: aws.String(name), }) if err != nil { return nil, err } if len(res.Stacks) < 1 { return nil, fmt.Errorf("no such stack: %s", name) } if err := cache.Set("describeStack", name, res.Stacks[0], 10*time.Second); err != nil { return nil, err } return res.Stacks[0], nil } func (p *Provider) ec2Instance(id string) (*ec2.Instance, error) { res, err := p.EC2().DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(id)}, }) if err != nil { return nil, err } if len(res.Reservations) < 1 || len(res.Reservations[0].Instances) < 1 { return nil, fmt.Errorf("could not find ec2 instance: %s", id) } return res.Reservations[0].Instances[0], nil } func (p *Provider) rackOutput(output string) (string, error) { return p.stackOutput(p.Name, output) } func (p *Provider) rackResource(resource string) (string, error) { return p.stackResource(p.Name, resource) } func (p *Provider) writeLogf(group, stream, format string, args ...interface{}) error { req := &cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String(group), LogStreamName: aws.String(stream), LogEvents: []*cloudwatchlogs.InputLogEvent{ &cloudwatchlogs.InputLogEvent{ Message: aws.String(fmt.Sprintf(format, args...)), Timestamp: aws.Int64(time.Now().UTC().UnixNano() / 1000000), }, }, } for { res, err := p.CloudWatchLogs().PutLogEvents(req) switch awsError(err) { // if the log stream doesnt exist, create it case "ResourceNotFoundException": _, err := p.CloudWatchLogs().CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(group), LogStreamName: aws.String(stream), }) if err != nil { return err } continue // need to set the sequence token case "DataAlreadyAcceptedException": req.SequenceToken = res.NextSequenceToken continue case "InvalidSequenceTokenException": if ae, ok := err.(awserr.Error); ok { parts := strings.Split(ae.Message(), " ") req.SequenceToken = aws.String(parts[len(parts)-1]) continue } } return err } } func (p *Provider) subscribeLogs(group, stream string, opts types.LogsOptions, w io.WriteCloser) error { return p.subscribeLogsCallback(group, stream, opts, w, nil) } func (p *Provider) subscribeLogsCallback(group, stream string, opts types.LogsOptions, w io.WriteCloser, fn func() bool) error { defer w.Close() req := &cloudwatchlogs.FilterLogEventsInput{ Interleaved: aws.Bool(true), LogGroupName: aws.String(group), } if stream != "" { req.LogStreamNames = []*string{aws.String(stream)} } if opts.Filter != "" { req.FilterPattern = aws.String(opts.Filter) } if !opts.Since.IsZero() { req.StartTime = aws.Int64(opts.Since.UTC().Unix() * 1000) } for { // Always make sure there is something we can write to if _, err := fmt.Fprintf(w, ""); err != nil { if err == io.EOF { return nil } return err } events := []*cloudwatchlogs.FilteredLogEvent{} err := p.CloudWatchLogs().FilterLogEventsPages(req, func(res *cloudwatchlogs.FilterLogEventsOutput, last bool) bool { for _, e := range res.Events { events = append(events, e) } return true }) if err != nil { fmt.Fprintf(os.Stderr, "error: %s\n", err) break } sort.Slice(events, func(i, j int) bool { return *events[i].Timestamp < *events[j].Timestamp }) for _, e := range events { parts := strings.SplitN(*e.LogStreamName, "/", 3) if len(parts) == 3 { pp := strings.Split(parts[2], "-") ts := time.Unix(*e.Timestamp/1000, *e.Timestamp%1000*1000).UTC() var err error if opts.Prefix { _, err = fmt.Fprintf(w, "%s %s/%s/%s %s\n", ts.Format(helpers.PrintableTime), parts[0], parts[1], pp[len(pp)-1], *e.Message) } else { _, err = fmt.Fprintf(w, "%s\n", *e.Message) } if err != nil { if err == io.EOF { return nil } return err } } } if !opts.Follow { break } if fn != nil { if !fn() { return nil } } time.Sleep(1 * time.Second) if len(events) > 0 { req.StartTime = aws.Int64(*events[len(events)-1].Timestamp + 1) } } return nil } func (p *Provider) stackOutput(name string, output string) (string, error) { ck := fmt.Sprintf("%s/%s", name, output) if v, ok := cache.Get("stackOutput", ck).(string); ok { return v, nil } stack, err := p.describeStack(name) if err != nil { return "", err } for _, o := range stack.Outputs { if *o.OutputKey == output { ov := *o.OutputValue if err := cache.Set("stackOutput", ck, ov, 1*time.Minute); err != nil { return "", err } return ov, nil } } return "", fmt.Errorf("no such output for stack %s: %s", name, output) } func (p *Provider) stackResource(name string, resource string) (string, error) { ck := fmt.Sprintf("%s/%s", name, resource) if v, ok := cache.Get("stackResource", ck).(string); ok { return v, nil } res, err := p.CloudFormation().DescribeStackResource(&cloudformation.DescribeStackResourceInput{ LogicalResourceId: aws.String(resource), StackName: aws.String(name), }) if err != nil { return "", err } r := *res.StackResourceDetail.PhysicalResourceId if err := cache.Set("stackResource", ck, r, 1*time.Minute); err != nil { return "", err } return r, nil } func (p *Provider) fetchTaskDefinition(arn string) (*ecs.TaskDefinition, error) { if v, ok := cache.Get("taskDefinition", arn).(*ecs.TaskDefinition); ok { return v, nil } res, err := p.ECS().DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ TaskDefinition: aws.String(arn), }) if err != nil { return nil, err } if err := cache.Set("taskDefinition", arn, res.TaskDefinition, 24*time.Hour); err != nil { return nil, err } return res.TaskDefinition, nil } func (p *Provider) taskDefinition(app string, opts types.ProcessRunOptions) (string, error) { logs, err := p.appResource(app, "Logs") if err != nil { return "", err } req := &ecs.RegisterTaskDefinitionInput{ ContainerDefinitions: []*ecs.ContainerDefinition{ { Cpu: aws.Int64(256), Essential: aws.Bool(true), Image: aws.String(""), LogConfiguration: &ecs.LogConfiguration{ LogDriver: aws.String("awslogs"), Options: map[string]*string{ "awslogs-region": aws.String(p.Region), "awslogs-group": aws.String(logs), "awslogs-stream-prefix": aws.String("convox"), }, }, MemoryReservation: aws.Int64(int64(coalescei(opts.Memory, 256))), Name: aws.String(opts.Service), }, }, Family: aws.String(fmt.Sprintf("%s-%s", p.Name, app)), } // get release and manifest for initial environment and volumes var m *manifest.Manifest var release *types.Release var service *manifest.Service if opts.Release != "" { m, release, err = helpers.ReleaseManifest(p, app, opts.Release) if err != nil { return "", errors.WithStack(err) } // if service is not defined in manifest, i.e. "build", carry on service, err = m.Service(opts.Service) if err != nil && !strings.Contains(err.Error(), "no such service") { return "", errors.WithStack(err) } } if service != nil { // manifest environment env, err := m.ServiceEnvironment(opts.Service) if err != nil { return "", err } for k, v := range env { req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String(k), Value: aws.String(v), }) } // resource environment rs, err := p.ResourceList(app) if err != nil { return "", err } for _, r := range rs { req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String(strings.ToUpper(fmt.Sprintf("%s_URL", r.Name))), Value: aws.String(r.Endpoint), }) } // app environment menv, err := helpers.AppEnvironment(p, app) if err != nil { return "", err } for k, v := range menv { req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String(k), Value: aws.String(v), }) } // volumes for service s, err := m.Service(opts.Service) if err != nil { return "", err } for i, v := range s.Volumes { var from, to string parts := strings.SplitN(v, ":", 2) switch len(parts) { case 1: from = path.Join("/volumes", v) to = v case 2: from = parts[0] to = parts[1] default: return "", fmt.Errorf("invalid volume definition: %s", v) } name := fmt.Sprintf("volume-%d", i) // manifest volumes req.Volumes = append(req.Volumes, &ecs.Volume{ Host: &ecs.HostVolumeProperties{ SourcePath: aws.String(from), }, Name: aws.String(name), }) req.ContainerDefinitions[0].MountPoints = append(req.ContainerDefinitions[0].MountPoints, &ecs.MountPoint{ ContainerPath: aws.String(to), SourceVolume: aws.String(name), }) } } for k, v := range opts.Environment { req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String(k), Value: aws.String(v), }) } if opts.Command != "" { req.ContainerDefinitions[0].Command = []*string{aws.String("sh"), aws.String("-c"), aws.String(opts.Command)} } if opts.Output != nil { req.ContainerDefinitions[0].Command = []*string{aws.String("sleep"), aws.String("3600")} } endpoint, err := p.stackOutput(p.Name, "Endpoint") if err != nil { return "", err } u, err := url.Parse(endpoint) if err != nil { return "", err } u.User = url.UserPassword(p.Password, "") aenv := map[string]string{ "APP": app, "RACK_URL": u.String(), } for k, v := range aenv { req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String(k), Value: aws.String(v), }) } if opts.Service != "" && opts.Image == "" { if release == nil { return "", fmt.Errorf("no release for app: %s", app) } req.ContainerDefinitions[0].Environment = append(req.ContainerDefinitions[0].Environment, &ecs.KeyValuePair{ Name: aws.String("RELEASE"), Value: aws.String(release.Id), }) account, err := p.accountID() if err != nil { return "", err } repo, err := p.appResource(app, "Repository") if err != nil { return "", err } req.ContainerDefinitions[0].Image = aws.String(fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s:%s.%s", account, p.Region, repo, opts.Service, release.Build)) } if opts.Image != "" { req.ContainerDefinitions[0].Image = aws.String(opts.Image) } for from, to := range opts.Ports { req.ContainerDefinitions[0].PortMappings = append(req.ContainerDefinitions[0].PortMappings, &ecs.PortMapping{ HostPort: aws.Int64(int64(from)), ContainerPort: aws.Int64(int64(to)), }) } i := 0 for from, to := range opts.Volumes { name := fmt.Sprintf("volume-o-%d", i) // one-off volumes req.Volumes = append(req.Volumes, &ecs.Volume{ Host: &ecs.HostVolumeProperties{ SourcePath: aws.String(from), }, Name: aws.String(name), }) req.ContainerDefinitions[0].MountPoints = append(req.ContainerDefinitions[0].MountPoints, &ecs.MountPoint{ ContainerPath: aws.String(to), SourceVolume: aws.String(name), }) } res, err := p.ECS().RegisterTaskDefinition(req) if err != nil { return "", err } return *res.TaskDefinition.TaskDefinitionArn, nil } func (p *Provider) taskForPid(pid string) (*ecs.Task, error) { cluster, err := p.rackResource("RackCluster") if err != nil { return nil, err } res, err := p.ECS().DescribeTasks(&ecs.DescribeTasksInput{ Cluster: aws.String(cluster), Tasks: []*string{aws.String(pid)}, }) if err != nil { return nil, err } if len(res.Tasks) < 1 { return nil, fmt.Errorf("could not find task for pid: %s", pid) } return res.Tasks[0], nil } func upperName(name string) string { // myapp -> Myapp; my-app -> MyApp us := strings.ToUpper(name[0:1]) + name[1:] for { i := strings.Index(us, "-") if i == -1 { break } s := us[0:i] if len(us) > i+1 { s += strings.ToUpper(us[i+1 : i+2]) } if len(us) > i+2 { s += us[i+2:] } us = s } return us }
[ "\"AWS_REGION\"", "\"DEVELOPMENT\"", "\"NAME\"", "\"PASSWORD\"", "\"VERSION\"" ]
[]
[ "PASSWORD", "AWS_REGION", "VERSION", "DEVELOPMENT", "NAME" ]
[]
["PASSWORD", "AWS_REGION", "VERSION", "DEVELOPMENT", "NAME"]
go
5
0
tests/test_interpret_stmnt.py
from __future__ import print_function import helpers_test from cparser import * from cparser.interpreter import * from helpers_test import * import ctypes def test_interpret_c_cast(): state = parse("int f()\n { int v = (int) 42; return v; } \n") interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_c_cast_ptr(): state = parse("void f()\n { int* v = (int*) 0; } \n") interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) def test_interpret_c_cast_ptr_2_a(): state = parse("void f()\n { unsigned int v = (unsigned int) 42; } \n") interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) def test_interpret_c_cast_ptr_2_b(): state = parse("void f()\n { void* v = (void*) 0; } \n") interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) def test_interpret_c_cast_ptr_2(): state = parse(""" void f() { int x; int* v = (int*) x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) def test_interpret_c_cast_ptr_3(): state = parse(""" int g(int*) { return 3; } int f() { g((int*) 0); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("g", output=sys.stdout) interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_c_cast_ptr_4(): state = parse(""" int g(unsigned char * buff) { return 3; } int f() { g((unsigned char *) "x"); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_auto_cast(): state = parse(""" void g(unsigned long) {} int f() { g((long) 42); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_auto_cast_2(): state = parse(""" void g(const char*, const char*) {} int f() { g(0, "foo"); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_var_init_wrap_value(): state = cparser.State() state.autoSetupGlobalIncludeWrappers() cparser.parse_code(""" #include <stdio.h> // stdout int f() { FILE* f = stdout; return 5; } """, state) print("Parse errors:", state._errors) assert not state._errors interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_var_init_wrap_value_2(): state = cparser.State() state.autoSetupGlobalIncludeWrappers() cparser.parse_code(""" #include <stdio.h> // stdout / stderr int f() { int v = 0; FILE* f = v ? stdout : stderr; return 5; } """, state) print("Parse errors:", state._errors) assert not state._errors interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_call_void_func(): state = parse(""" int g() { return 0; } int f() { (void) g(); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Parsed funcs:") pprint(state.funcs["g"]) pprint(state.funcs["g"].args) pprint(state.funcs["g"].body) pprint(state.funcs["f"]) pprint(state.funcs["f"].args) pprint(state.funcs["f"].body) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) f = state.funcs["f"] assert isinstance(f, CFunc) assert isinstance(f.body, CBody) assert len(f.body.contentlist) == 2 call_stmnt = f.body.contentlist[0] print("Call statement:", call_stmnt) assert isinstance(call_stmnt, CStatement) assert isinstance(call_stmnt._leftexpr, CFuncCall) assert isinstance(call_stmnt._leftexpr.base, CBuiltinType) assert call_stmnt._leftexpr.base.builtinType == ("void", ) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_goto_forward(): state = parse(""" int f() { goto final; return 3; final: return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_goto_backward(): state = parse(""" int f() { int x = 0; again: if(x > 0) return 42; x += 1; goto again; return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_do_while(): state = parse(""" int f() { int x = 0; do { x += 1; } while(0); return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_inplacce_add(): state = parse(""" int f() { int x = 42; x += 1; return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 43 def test_interpret_do_while_while(): state = parse(""" int f() { int x = 0; do { x += 1; } while(0); while(x < 3) { x++; } return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_goto_label_single_stmnt(): state = parse(""" int f() { int x = 0; if(1) {} else label: x = 1; if(x == 0) goto label; return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_goto_in_nested(): state = parse(""" int f() { int x = 0; while(1) { x = 1; again: if(x >= 5) break; x += 1; goto again; } return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_goto_into_nested(): state = parse(""" int f() { int x = 1; goto here; while(1) { x += 3; break; here: x *= 2; } return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_goto_into_nested_for_loop(): state = parse(""" int f() { int x = 1; goto here; for(x=0; ; x++) { x += 2; break; here: x *= 2; } return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_goto_with_if_else(): state = parse(""" int f() { int x = 1; goto here; here: if(x <= 3) x = 5; else x = 13; return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_for_loop_empty(): state = parse(""" int f() { for(;;) { break; } return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Parsed func body:") pprint(state.funcs["f"].body) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_nested_var(): state = parse(""" int f() { int x = 1; { int x = 2; x = 3; } x = 4; return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 4 def test_interpret_ptr_array(): state = parse(""" typedef struct _object { long foo; } PyObject; typedef struct _tuple { PyObject *ob_item[1]; } PyTupleObject; #define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i]) PyObject tupleGlobal; void* f() { PyObject* tuple = &tupleGlobal; PyObject* obj = PyTuple_GET_ITEM(tuple, 0); return obj; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) print("PyTupleObject:", state.typedefs["PyTupleObject"]) assert isinstance(state.typedefs["PyTupleObject"].type, CStruct) print("PyTupleObject body:") assert isinstance(state.typedefs["PyTupleObject"].type.body, CBody) pprint(state.typedefs["PyTupleObject"].type.body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_void_p) assert r.value != 0 def test_interpret_global_obj(): state = parse(""" typedef struct _object { long foo; } PyObject; PyObject obj; void* f() { return &obj; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_void_p) assert r.value != 0 def test_interpret_array(): state = parse(""" int f() { int a[5]; a[1] = 5; a[2] = 13; return a[1]; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_func_call_auto_cast(): state = parse(""" int add(int n) { return n; } int f() { return add(3 + 2); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("add", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_init_struct(): state = parse(""" typedef struct _A { int a, b, c; } A; int f() { A s = {1, 2, 3}; return s.b; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "s" print("var decl s body:") print(vardecl.body) print("_A:") print(state.structs["_A"]) print("_A body:") print(state.structs["_A"].body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 2 def test_interpret_init_struct_via_self(): state = parse(""" #include <assert.h> typedef struct _A { void* self; int x; } A; A s = {&s, 42}; int f() { assert((&s) == s.self); return s.x; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) print("s:", state.vars["s"]) print("s body:", state.vars["s"].body) s = state.vars["s"] s_body = s.body assert isinstance(s_body, CStatement) assert isinstance(s_body._leftexpr, CCurlyArrayArgs) s_body = s_body._leftexpr assert len(s_body.args) == 2 assert isinstance(s_body.args[0], CStatement) assert s_body.args[0]._leftexpr is None assert s_body.args[0]._op == COp("&") assert isinstance(s_body.args[0]._rightexpr, CStatement) s_body_ref = s_body.args[0]._rightexpr assert s_body_ref._leftexpr is state.vars["s"] interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_init_array(): state = parse(""" int f() { int a[] = {1, 2, 3}; return a[2]; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "a" print("var decl a body:") print(vardecl.body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_init_array_sizeof(): state = parse(""" int f() { int a[] = {1, 2, 3, 4, 5}; return sizeof(a); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "a" print("var decl a body:") print(vardecl.body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 * ctypes.sizeof(ctypes.c_int) def test_interpreter_char_array(): state = parse(""" int f() { char name[] = "foo"; return sizeof(name); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "name" print("var decl a body:") print(vardecl.body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 4 def test_interpreter_global_char_array(): state = parse(""" static char name[] = "foo"; int f() { return sizeof(name); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 4 def test_interpreter_offset_of_direct(): state = parse(""" typedef struct _typeobject { long foo; long bar; } PyTypeObject; int f() { int a = (int) &((PyTypeObject*)(0))->bar; return a; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "a" print("var decl a body:") print(vardecl.body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ctypes.sizeof(ctypes.c_long) def test_interpreter_num_cast(): state = parse(""" int f() { int a = (int) 'A'; return a; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) vardecl = state.funcs["f"].body.contentlist[0] assert isinstance(vardecl, CVarDecl) assert vardecl.name == "a" print("var decl a body:") print(vardecl.body) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('A') def test_interpreter_func_ptr(): state = parse(""" typedef int (*F) (); int i() { return 42; } int f() { F fp = i; int v = fp(); return v; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpreter_func_ptr_return_ptr(): state = parse(""" typedef int* (*F) (); int _i = 42; int* i() { return &_i; } int f() { F fp = i; int* vp = fp(); return *vp; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpreter_func_ptr_struct_init(): state = parse(""" #include <assert.h> typedef int (*F) (); typedef struct _S { int x; F f; } S; int i() { return 42; } S s = {3, i}; int f() { assert(s.x == 3); //assert(s.f == (F) i); // not sure what's needed for this return s.x + s.f(); } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("i", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) #s = interpreter.globalScope.vars["s"] #print s, s._fields_, s.x, s.f assert isinstance(r, ctypes.c_int) assert r.value == 45 def test_interpreter_func_ptr_struct_init_unknown(): state = parse(""" #include <assert.h> typedef long (*F) (); typedef struct _S { int x; F f; } S; long unknown_func(); S s = {3, unknown_func}; int f() { assert(s.x == 3); assert((void*) s.f != 0); return s.x + s.f(); } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) # Because the unknown_func will by default return 0. assert r.value == 3 def test_interpret_op_precedence_ref(): state = parse(""" #include <assert.h> typedef struct _A { int* x; } A; int f() { int a = 42; A b = {&a}; assert(&a == b.x); *b.x += 1; return a; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 43 def test_interpret_multiple_vars(): state = parse(""" int f() { int a = 23, b, c; c = 42; return c; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_sizeof_ptr(): state = parse(""" int f() { return sizeof(int*); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ctypes.sizeof(ctypes.c_void_p) def test_interpret_multi_stmnt(): state = parse(""" int f() { int j = 0; int i, n = 1; for (i = 0; i < n; i++, j++) { } return i; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_multi_stmnt_body(): state = parse(""" int f() { int i = 1, j = 2; i++, j++; return i + j; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_prefix_inc_ret(): state = parse(""" int f() { int i = 0; return ++i; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_postfix_inc_ret(): state = parse(""" int f() { int i = 0; return i++; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 0 def test_interpret_postfix_inc(): state = parse(""" int f() { int i = 0; i++; return i; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_return_ptr(): state = parse(""" const char* g() { return "hey"; } int f() { const char* s = g(); return *s; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord("h") def test_interpret_malloc(): state = parse(""" #include <stdlib.h> #include <string.h> char* g() { char* s = malloc(5); strcpy(s, "hey"); return s; } int f() { char* s = g(); char c = *s; free(s); return c; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord("h") def test_interpret_malloc_with_cast(): state = parse(""" #include <stdlib.h> #include <string.h> char* g() { char* s = (char*) malloc(5); strcpy(s, "hey"); return s; } int f() { char* s = g(); char c = *s; free(s); return c; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord("h") def test_interpret_noname_struct_init(): state = parse(""" typedef struct { int x; } S; int f() { S s; s.x = 42; return s.x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_struct_ambig_name(): # https://github.com/albertz/PyCParser/issues/2 state = parse(""" typedef struct { int number; } Number; struct XYZ { Number Number[10]; }; int f() { struct XYZ s; s.Number[1].number = 42; s.Number[2].number = 3; return s.Number[1].number; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_condition(): # https://github.com/albertz/PyCParser/issues/3 state = parse(""" int f() { int i = 5, j = 6, k = 1; if ((i=j && k == 1) || k > j) return i; return -17; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) # Note: i = (j && (k == 1)). assert r.value == 1 def test_interpret_void_ptr_cast(): state = parse(""" int g(int *) { return 42; } int f() { void* obj = 0; return g((int *)obj); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_void_cast_two_args(): state = parse(""" int f() { int a, b; (void) (a = 1, (b = 2, &a)); return b; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 2 def test_interpret_macro_file_line(): state = parse(""" void PyErr_BadInternalCall(void) {} void _PyErr_BadInternalCall(char *filename, int lineno) {} #define PyErr_BadInternalCall() _PyErr_BadInternalCall(__FILE__, __LINE__) int f() { PyErr_BadInternalCall(); return 42; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_reserved_global_varname(): state = parse(""" void h() {} int f() { h(); int g = 42; return g; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_stmnt_no_space(): state = parse(""" int f() { int foo = 6, bar = 3; if (foo/bar == 2) return 13; return 5; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 13 def test_interpret_marco_if0(): state = parse(""" int f() { #if 0 return 13; #endif return 5; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 5 def test_interpret_varname_like_struct(): state = parse(""" typedef struct { int x; } PyGC_Head; typedef int node; // problematic void g(PyGC_Head *node) { node->x = 13; } int f() { PyGC_Head node; g(&node); return node.x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 13 def test_interpret_malloc_macro(): state = parse(""" #include <stdlib.h> typedef int PyGC_Head; #define PY_SSIZE_T_MAX ((long)(((size_t)-1)>>1)) #define PyObject_MALLOC PyMem_MALLOC #define PyObject_FREE PyMem_FREE #define PyMem_MALLOC(n) ((size_t)(n) > (size_t)PY_SSIZE_T_MAX ? 0 \ : malloc((n) ? (n) : 1)) #define PyMem_FREE free int f() { int basicsize = 20; PyGC_Head* g; g = (PyGC_Head *)PyObject_MALLOC( sizeof(PyGC_Head) + basicsize); PyObject_FREE(g); return 42; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_malloc_in_ternary(): state = parse(""" #include <stdlib.h> int f() { void* g = 0 ? 0 : malloc(12); free(g); return 42; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_double_macro(): state = parse(""" #define M1 M2 #define M2(n) (n * 2) int f() { int x = M1(5); return x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 10 def test_interpret_max_uint16(): state = parse(""" #include <stdint.h> int64_t f() { int64_t x = (uint16_t) -1; return x; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int64) assert r.value == 2 ** 16 - 1 def test_interpret_max_uint16_plus1(): state = parse(""" #include <stdint.h> int64_t f() { int64_t x = (int32_t)(uint16_t)(-1) + 1; return x; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int64) assert r.value == 2 ** 16 def test_interpret_ternary_second(): state = parse(""" long f() { long max_ushort = (unsigned short)(-1); long x = (long)(max_ushort) + 1; long g = 0 ? (unsigned short)(0) : x; return g; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_long) assert r.value == 256 ** ctypes.sizeof(ctypes.c_short) def test_interpret_double_cast(): state = parse(""" long f() { long x = (int)(unsigned short)(-1); return x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_long) assert r.value == 256 ** ctypes.sizeof(ctypes.c_short) - 1 def test_interpret_int_float(): state = parse(""" int f() { int x = 4 * 0.5; return x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 2 def test_interpret_float_cast(): state = parse(""" int f() { int x = (int) 2.2; return x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 2 def test_interpret_double(): state = parse(""" double f() { double x = 2.5; return x; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_double) assert r.value == 2.5 def test_interpret_strlen_plus1(): state = parse(""" #include <stdint.h> #include <string.h> size_t f() { size_t x = strlen("foo") + 1; return x; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_size_t) assert r.value == 4 def test_interpret_atoi(): state = parse(""" #include <stdlib.h> int f() { return atoi("42"); } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_env_c_str(): state = parse(""" #include <stdlib.h> const char* f() { const char* s = getenv("_cparser_test_interpret_env_c_str"); return s; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) import os os.environ["_cparser_test_interpret_env_c_str"] = "ABC" interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) wrapped_c_byte = interpreter.globalsDict["ctypes_wrapped"].c_byte assert isinstance(r, ctypes.POINTER(wrapped_c_byte)) # char is always byte in the interpreter r = ctypes.cast(r, ctypes.c_char_p) assert r.value.decode("utf8") == "ABC" def test_interpret_env_non_existing_c_str(): state = parse(""" #include <stdlib.h> const char* f() { const char* s = getenv("_test_interpret_env_non_existing_c_str"); return s; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) wrapped_c_byte = interpreter.globalsDict["ctypes_wrapped"].c_byte assert isinstance(r, ctypes.POINTER(wrapped_c_byte)) # char is always byte in the interpreter ptr = ctypes.cast(r, wrapCTypeClass(ctypes.c_void_p)) assert ptr.value in (0, None) def test_interpret_cond_c_str(): state = parse(""" const char* f() { const char* s = 0 ? "foo" : "bazz"; return 0 ? "blubber" : s; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) wrapped_c_byte = interpreter.globalsDict["ctypes_wrapped"].c_byte assert isinstance(r, ctypes.POINTER(wrapped_c_byte)) # char is always byte in the interpreter r = ctypes.cast(r, ctypes.c_char_p) assert r.value.decode("utf8") == "bazz" def test_interpret_cstr(): state = parse(""" int f() { const char* p = 0; p = 0 ? 0 : "P"; return *p; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord("P") def test_interpret_cstr_indirect(): state = parse(""" const char* g() { return "foo"; } int f() { const char* p = 0; p = 0 ? 0 : g(); return *p; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord("f") def test_interpret_struct_forward_type(): state = parse(""" typedef struct _A { struct _B *b; } A; typedef struct _B { int x; } B; int f() { A a; B b; a.b = &b; a.b->x = 42; return a.b->x + 1; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 43 def test_interpret_struct_array(): state = parse(""" /* GC information is stored BEFORE the object structure. */ typedef union _gc_head { struct { union _gc_head *gc_next; union _gc_head *gc_prev; unsigned long gc_refs; } gc; long double dummy; /* force worst-case alignment */ } PyGC_Head; struct gc_generation { PyGC_Head head; int threshold; /* collection threshold */ int count; /* count of allocations or collections of younger generations */ }; #define NUM_GENERATIONS 3 #define GEN_HEAD(n) (&generations[n].head) /* linked lists of container objects */ static struct gc_generation generations[NUM_GENERATIONS] = { /* PyGC_Head, threshold, count */ {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0}, {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0}, {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0}, }; int f() { // via _PyObject_GC_Malloc generations[0].count++; /* number of allocated GC objects */ return generations[0].count; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_global_array(): state = parse(""" int x[3] = {3,2,1}; int f() { x[1]++; return x[1]; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) print("x:", state.vars["x"]) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_gc_malloc(): state = parse(""" #include <stdlib.h> typedef struct _PyObject { int x; } PyObject; /* GC information is stored BEFORE the object structure. */ typedef union _gc_head { struct { union _gc_head *gc_next; union _gc_head *gc_prev; long gc_refs; } gc; long double dummy; /* force worst-case alignment */ } PyGC_Head; /* Get an object's GC head */ #define AS_GC(o) ((PyGC_Head *)(o)-1) /* Get the object given the GC head */ #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1)) PyObject* PyObject_GC_Malloc(size_t basicsize) { PyObject *op; PyGC_Head *g; g = (PyGC_Head *)malloc(sizeof(PyGC_Head) + basicsize); g->gc.gc_refs = -1; op = FROM_GC(g); return op; } void PyObject_GC_Del(void *op) { PyGC_Head *g = AS_GC(op); free(g); } int f() { PyObject* obj = PyObject_GC_Malloc(16); PyObject_GC_Del(obj); return 42; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("PyObject_GC_Malloc", output=sys.stdout) interpreter.dumpFunc("PyObject_GC_Del", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_get_opt(): state = parse(""" #include <stdio.h> #include <string.h> int _PyOS_opterr = 1; /* generate error messages */ int _PyOS_optind = 1; /* index into argv array */ char *_PyOS_optarg = NULL; /* optional argument */ static char *opt_ptr = ""; int _PyOS_GetOpt(int argc, char **argv, char *optstring) { char *ptr; int option; if (*opt_ptr == '\0') { if (_PyOS_optind >= argc) return -1; else if (argv[_PyOS_optind][0] != '-' || argv[_PyOS_optind][1] == '\0' /* lone dash */ ) return -1; else if (strcmp(argv[_PyOS_optind], "--") == 0) { ++_PyOS_optind; return -1; } else if (strcmp(argv[_PyOS_optind], "--help") == 0) { ++_PyOS_optind; return 'h'; } else if (strcmp(argv[_PyOS_optind], "--version") == 0) { ++_PyOS_optind; return 'V'; } opt_ptr = &argv[_PyOS_optind++][1]; } if ((option = *opt_ptr++) == '\0') return -1; if (option == 'J') { if (_PyOS_opterr) fprintf(stderr, "-J is reserved for Jython\n"); return '_'; } if (option == 'X') { if (_PyOS_opterr) fprintf(stderr, "-X is reserved for implementation-specific arguments\n"); return '_'; } if ((ptr = strchr(optstring, option)) == NULL) { if (_PyOS_opterr) fprintf(stderr, "Unknown option: -%c\n", option); return '_'; } if (*(ptr + 1) == ':') { if (*opt_ptr != '\0') { _PyOS_optarg = opt_ptr; opt_ptr = ""; } else { if (_PyOS_optind >= argc) { if (_PyOS_opterr) fprintf(stderr, "Argument expected for the -%c option\n", option); return '_'; } _PyOS_optarg = argv[_PyOS_optind++]; } } return option; } int f() { int c; int argc = 3; char* argv[] = {"./cpython.py", "-c", "print 'hello'", 0}; while ((c = _PyOS_GetOpt(argc, argv, "3bBc:dEhiJm:OQ:RsStuUvVW:xX?")) != -1) { } return 42; } """, withGlobalIncludeWrappers=True) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("_PyOS_GetOpt", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_char_p_substract(): state = parse(""" int f() { const char* a = "hello"; const char* b = a + 3; return (int) (b - a); } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_ptr_comma_tuple(): state = parse(""" int f() { const char* a = "hello"; const char* b; return (b = a), 42; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_octal(): state = parse(""" int f() { return (int) '\\014'; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 12 def test_interpret_macro_version_hex(): state = parse(""" /* Values for PY_RELEASE_LEVEL */ #define PY_RELEASE_LEVEL_ALPHA 0xA #define PY_RELEASE_LEVEL_BETA 0xB #define PY_RELEASE_LEVEL_GAMMA 0xC /* For release candidates */ #define PY_RELEASE_LEVEL_FINAL 0xF /* Serial should be 0 here */ /* Higher for patch releases */ /* Version parsed out into numeric values */ /*--start constants--*/ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 #define PY_MICRO_VERSION 5 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ #define PY_VERSION "2.7.5" /*--end constants--*/ /* Subversion Revision number of this file (not of the repository). Empty since Mercurial migration. */ #define PY_PATCHLEVEL_REVISION "" /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. Use this for numeric comparisons, e.g. #if PY_VERSION_HEX >= ... */ #define PY_VERSION_HEX ((PY_MAJOR_VERSION << 24) | \ (PY_MINOR_VERSION << 16) | \ (PY_MICRO_VERSION << 8) | \ (PY_RELEASE_LEVEL << 4) | \ (PY_RELEASE_SERIAL << 0)) long f() { return PY_VERSION_HEX; } """) print("Parsed:") print("f:", state.funcs["f"]) print("f body:") assert isinstance(state.funcs["f"].body, CBody) pprint(state.funcs["f"].body.contentlist) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r, hex(r.value)) assert isinstance(r, ctypes.c_long) assert r.value == 0x20705f0 def test_interpret_double_macro_rec(): """ Check cpre2_parse() for correctly substituting macros -- not applying the same macro twice in recursion. """ state = parse(""" int a() { return 2; } int b() { return 3; } #define a b #define b a int f_a() { return a(); } int f_b() { return b(); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f_a", output=sys.stdout) interpreter.dumpFunc("f_b", output=sys.stdout) print("Run:") r_a = interpreter.runFunc("f_a") r_b = interpreter.runFunc("f_b") print("result:", r_a, r_b) assert r_a.value == 2 assert r_b.value == 3 def test_interpret_simple_add_two_b(): state = parse(""" int a() { return 2; } int f() { return 1 + a() + a(); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_double_macro_rec_linear(): state = parse(""" int a() { return 2; } #define b a #define x (1 + b() + b()) int f() { return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_undefined_macro(): state = parse(""" int f() { #if not_defined_macro return -3; #endif return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_macro_call_twice(): state = parse(""" #define INC(x) (x + 1) int f(int a) { return INC(INC(a)); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f", 3) print("result:", r) assert r.value == 5 def test_interpret_macro_concat(): state = parse(""" #define PREFIX( x) foo_ ## x int f() { int foo_bar = 5; return PREFIX( bar); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_cast_const_void_p(): state = parse(""" int f(const char *target) { const void * x = 0; x = (const void *)(target); return 5; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f", "x") print("result:", r) assert r.value == 5 def test_interpret_cast_const_int(): state = parse(""" int f() { int x = 0; x = (const int)(5); return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_for_if_else(): state = parse(""" int f() { int i; for (i = 0; i < 10; ++i) if (i <= 2) {} else { return 5; } return -1; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_char_array_cast_len_int(): state = parse(""" int f() { char formatbuf[(int)5]; return sizeof(formatbuf); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_char_array_cast_len_sizet(): state = parse(""" int f() { char formatbuf[(size_t)5]; return sizeof(formatbuf); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_int_float_cast(): state = parse(""" int f() { return int(3.2); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 3 def test_interpret_char_mask_ptr_deref(): state = parse(""" typedef struct { char ob_sval[1]; } PyStringObject; #define Py_CHARMASK(c) ((unsigned char)((c) & 0xff)) int f() { PyStringObject _a, _b; _a.ob_sval[0] = 'A'; _b.ob_sval[0] = 'B'; PyStringObject *a = &_a, *b = &_b; int c = Py_CHARMASK(*a->ob_sval) - Py_CHARMASK(*b->ob_sval); return c; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 255 def test_interpret_char_mask_subscript(): state = parse(""" #define Py_CHARMASK(c) ((unsigned char)((c) & 0xff)) int f() { const char* s = "hello"; int c = Py_CHARMASK(s[1]); return c; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == ord('e') def test_interpret_op_mod(): state = parse(""" int f() { int j = 11, tabsize = 8; return tabsize - (j % tabsize); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_py_init_slots_array(): state = parse(""" typedef int PyObject; typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); typedef struct { binaryfunc nb_add; } PyNumberMethods; typedef struct _heaptypeobject { PyNumberMethods as_number; } PyHeapTypeObject; static PyObject * wrap_binaryfunc_l(PyObject *self, PyObject *args, void *wrapped) { return 0; } #define SLOT1BINFULL(FUNCNAME, TESTFUNC, SLOTNAME, OPSTR, ROPSTR) \\ static PyObject * FUNCNAME(PyObject *self, PyObject *other) { return 0; } #define SLOT1BIN(FUNCNAME, SLOTNAME, OPSTR, ROPSTR) \\ SLOT1BINFULL(FUNCNAME, FUNCNAME, SLOTNAME, OPSTR, ROPSTR) SLOT1BIN(slot_nb_add, nb_add, "__add__", "__radd__") typedef PyObject *(*wrapperfunc)(PyObject *self, PyObject *args, void *wrapped); struct wrapperbase { char *name; int offset; void *function; wrapperfunc wrapper; char *doc; int flags; PyObject *name_strobj; }; typedef struct wrapperbase slotdef; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) #define ETSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \\ {NAME, offsetof(PyHeapTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \\ DOC, 42} #define BINSLOT(NAME, SLOT, FUNCTION, DOC) \\ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, \\ "x." NAME "(y) <==> x" DOC "y") static slotdef slotdefs[] = { BINSLOT("__add__", nb_add, slot_nb_add, "+"), {0} }; int f() { return slotdefs[0].flags; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_py_init_slots_array_simple(): state = parse(""" typedef int PyObject; typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); typedef struct { binaryfunc nb_add; } PyNumberMethods; typedef struct _heaptypeobject { PyNumberMethods as_number; } PyHeapTypeObject; typedef struct { char *name; int offset; int flags; } slotdef; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) static slotdef slotdefs[] = { {"__add__", offsetof(PyHeapTypeObject, as_number.nb_add), 42}, {0} }; int f() { return slotdefs[0].flags; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_offsetof(): state = parse(""" typedef int PyObject; typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); typedef struct { long placeholder; binaryfunc nb_add; } PyNumberMethods; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) int f() { int offset = offsetof(PyNumberMethods, nb_add); return offset; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == ctypes.sizeof(ctypes.c_long) def test_interpret_offsetof_substruct(): state = parse(""" typedef int PyObject; typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); typedef struct { long placeholder; binaryfunc nb_add; } PyNumberMethods; typedef struct _heaptypeobject { PyNumberMethods as_number; } PyHeapTypeObject; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) int f() { int offset = offsetof(PyHeapTypeObject, as_number.nb_add); return offset; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == ctypes.sizeof(ctypes.c_long) def test_interpret_offsetof_subsubstruct(): state = parse(""" typedef struct { long placeholder; long here; } SubSubStruct; typedef struct { long placeholder; SubSubStruct sub; } SubStruct; typedef struct { SubStruct sub; } BaseStruct; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) int f() { int offset = offsetof(BaseStruct, sub.sub.here); return offset; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == ctypes.sizeof(ctypes.c_long) * 2 def test_interpret_ptr_with_offset_in_array(): state = parse(""" typedef struct PyHeapTypeObject { long a, b; } PyHeapTypeObject; typedef struct slotdef { char *name; int offset; int flags; } slotdef; #define offsetof(type, member) ( (int) & ((type*)0) -> member ) static slotdef slotdefs[] = { {"a", offsetof(PyHeapTypeObject, a), 1}, {"b", offsetof(PyHeapTypeObject, b), 2}, {0} }; int f() { slotdef *p; for (p = slotdefs; p->name; p++) { if(p[1].name && p->offset > p[1].offset) return -1; } return slotdefs[1].offset; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == ctypes.sizeof(ctypes.c_long) def test_interpret_func_ptr_ternary(): state = parse(""" typedef int (*func)(int); static int g(int x) { return x + 1; } int f() { func fp = 1 ? g : 0; if(!fp) return -1; return (*fp)(4); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_ternary_void_p_and_int_p(): state = parse(""" int f() { int x = 5; int* xp = 1 ? &x : ((void*)0); return *xp; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_if_if_else_hanging(): state = parse(""" int f() { int a = 1, b = 2, c = 3, x = -5; if (a == 2) { x = 1; if (b == 2) return -1; } else { x = 2; if (c == 2) { return -2; } } return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 2 def test_interpret_func_ptr_call_with_check(): state = parse(""" #define NULL 0 typedef struct _obj { struct _type* ob_type; } PyObject; typedef long (*hashfunc)(PyObject *); typedef struct _type { hashfunc tp_hash; } PyTypeObject; long PyObject_Hash(PyObject *v) { PyTypeObject *tp = v->ob_type; if (tp->tp_hash != NULL) return (*tp->tp_hash)(v); return -10; } static long hash1(PyObject*) { return 1; } int f() { PyTypeObject t1 = { hash1 }; PyObject o1 = { &t1 }; int x1 = PyObject_Hash(&o1); PyTypeObject t2 = { NULL }; PyObject o2 = { &t2 }; int x2 = PyObject_Hash(&o2); return x1 - x2; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("PyObject_Hash", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 11 def test_interpret_func_ptr_via_created_obj(): state = parse(""" #include <stdlib.h> #include <assert.h> typedef struct _obj { int dummy; } PyObject; typedef long (*hashfunc)(PyObject *); typedef struct _type { PyObject base; hashfunc tp_hash; } PyTypeObject; static long hash1(PyObject*); static long hash2(PyObject*) { return -5; } PyObject* new_type() { PyObject* obj = (PyObject*) malloc(sizeof(PyTypeObject)); PyTypeObject* tobj = (PyTypeObject*) obj; tobj->tp_hash = 0; assert(tobj->tp_hash == 0); tobj->tp_hash = hash1; PyTypeObject dummy = {{}, hash1}; assert(dummy.tp_hash != 0); assert(dummy.tp_hash == hash1); assert(dummy.tp_hash != hash2); assert(tobj->tp_hash != 0); assert(tobj->tp_hash == dummy.tp_hash); return obj; } static long hash1(PyObject*) { return 42; } int f() { PyObject* obj = new_type(); PyTypeObject* tobj = (PyTypeObject*) obj; int x = tobj->tp_hash(0); free(obj); return x; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("new_type", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_local_obj_bracy_init_func_ptr(): state = parse(""" #include <assert.h> typedef int (*hashfunc)(int); typedef struct _obj { hashfunc v; } PyObject; static int hash1(int) { return 42; } static int hash2(int) { return 43; } int f() { PyObject obj = {hash1}; assert(obj.v != 0); assert(obj.v == hash1); assert(obj.v != hash2); int x = obj.v(13); obj.v = 0; assert(obj.v == 0); obj.v = hash2; assert(obj.v == hash2); return x; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_func_ptr_bracy_init(): state = parse(""" #include <assert.h> typedef long (*hashfunc)(long); typedef struct _type { hashfunc tp_hash; } PyTypeObject; static long hash1(long) { return 42; } static long hash2(long) { return -5; } int f() { hashfunc h; h = hash1; PyTypeObject dummy = {hash1}; assert(dummy.tp_hash != 0); assert(dummy.tp_hash == hash1); assert(dummy.tp_hash != hash2); return dummy.tp_hash(13); } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_array_access_ptr_heap(): state = parse(""" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> typedef struct _object { int v; } PyObject; typedef long Py_ssize_t; typedef struct _dictentry { Py_ssize_t me_hash; PyObject *me_key; PyObject *me_value; } PyDictEntry; #define PyDict_MINSIZE 8 typedef struct _dictobject PyDictObject; struct _dictobject { PyObject base; Py_ssize_t ma_mask; PyDictEntry *ma_table; PyDictEntry *(*ma_lookup)(PyDictObject *mp, PyObject *key, long hash); PyDictEntry ma_smalltable[PyDict_MINSIZE]; }; static int _iwashere = 0; static PyDictEntry * lookdict_string(PyDictObject *mp, PyObject *key, register long hash) { register size_t i; register size_t mask = (size_t)mp->ma_mask; PyDictEntry *ep0 = mp->ma_table; register PyDictEntry *ep; i = hash & mask; ep = &ep0[i]; _iwashere = 1; if (ep->me_key == NULL || ep->me_key == key) return ep; return 0; } typedef union _gc_head { struct { union _gc_head *gc_next; union _gc_head *gc_prev; Py_ssize_t gc_refs; } gc; long double dummy; /* force worst-case alignment */ } PyGC_Head; #define AS_GC(o) ((PyGC_Head *)(o)-1) #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1)) PyObject* _PyObject_GC_Malloc(size_t basicsize) { PyObject *op; PyGC_Head *g; g = (PyGC_Head *)malloc(sizeof(PyGC_Head) + basicsize); memset(g, 0, sizeof(PyGC_Head)); g->gc.gc_refs = -1; op = FROM_GC(g); return op; } void PyObject_GC_Del(void *op) { PyGC_Head *g = AS_GC(op); free(g); } #define INIT_NONZERO_DICT_SLOTS(mp) do { \\ (mp)->ma_table = (mp)->ma_smalltable; \\ (mp)->ma_mask = PyDict_MINSIZE - 1; \\ } while(0) static PyObject* dict_new() { PyObject *self; self = (PyObject*) _PyObject_GC_Malloc(sizeof(PyDictObject)); memset(self, 0, sizeof(PyDictObject)); PyDictObject *d = (PyDictObject *)self; assert(d->ma_table == NULL); INIT_NONZERO_DICT_SLOTS(d); d->ma_lookup = lookdict_string; return self; } int f() { PyDictObject* d = (PyDictObject*) dict_new(); PyObject key_stack; PyDictEntry* entry = d->ma_lookup(d, &key_stack, 13); assert(_iwashere); assert(entry); PyObject_GC_Del(d); return 42; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("dict_new", output=sys.stdout) interpreter.dumpFunc("lookdict_string", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 42 def test_interpret_for_loop_continue(): state = parse(""" int f() { int i = 0; for (; i < 5; ++i) { continue; } return i; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run:") r = interpreter.runFunc("f") print("result:", r) assert r.value == 5 def test_interpret_void_p_p(): state = parse(""" static void** slotptr() { const char* s = "foo"; return (void**) s; } int f() { void** p = slotptr(); return ((const char*) p)[1]; } """) print("Parsed:") print("slotptr:", state.funcs["slotptr"]) assert isinstance(state.funcs["slotptr"].body, CBody) f_content = state.funcs["slotptr"].body.contentlist assert isinstance(f_content[1], CReturnStatement) print("slotptr return body:") pprint(f_content[1].body) assert isinstance(f_content[1].body, CStatement) assert isinstance(f_content[1].body._leftexpr, CFuncCall) cast_base = f_content[1].body._leftexpr.base pprint(cast_base) assert isType(cast_base) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("slotptr", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('o') def test_interpret_void_p_p_incr(): state = parse(""" static void** slotptr() { const char* ptr = "foobar"; long offset = 1; if (ptr != 0) ptr += offset; return (void**) ptr; } int f() { void** p = slotptr(); return ((const char*) p)[2]; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("slotptr", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('b') def test_interpret_static_func_ptr_to_void_p(): state = parse(""" typedef int (*unaryfunc)(int); typedef struct _typeobj { unaryfunc tp_repr; } PyTypeObject; static int type_repr(int x) { return x; } static void look(void* wrapper) { void* w; w = wrapper; } int f() { PyTypeObject my_type = {type_repr}; look((void*) my_type.tp_repr); return 42; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("look", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_func_to_void_p(): state = parse(""" static int* type_repr(int x) { return x; } static void look(void* wrapper) { void* w; w = wrapper; } int f() { look((void*) type_repr); return 42; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("look", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_func_addr_to_void_p(): state = parse(""" static int* type_repr(int x) { return x; } static void look(void* wrapper) { void* w; w = wrapper; } int f() { look((void*) &type_repr); return 42; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("look", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_func_call_pass_array(): state = parse(""" typedef struct PyMethodDef { char* name; } PyMethodDef; typedef int PyObject; static PyObject* PyCFunction_New(PyMethodDef*) { return 0; } static struct PyMethodDef tp_new_methoddef[] = { {"__new__"}, {0} }; int f() { PyObject *func; func = PyCFunction_New(tp_new_methoddef); if (func == 0) return 13; return -1; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 13 def test_interpret_struct_same_name_as_typedef(): state = parse(""" typedef struct PyMethodDef { char* ml_name; } PyMethodDef; int f() { PyMethodDef m = {"foo"}; struct PyMethodDef* mp = &m; return mp->ml_name[1]; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('o') def test_interpret_struct_same_name_as_typedef_2(): state = parse(""" typedef struct PyMethodDef { char* ml_name; } PyMethodDef; int f() { struct PyMethodDef m = {"foo"}; return m.ml_name[1]; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('o') def test_interpret_func_ptr_in_static_array(): state = parse(""" typedef struct _methdef { char* ml_name; } PyMethodDef; typedef struct _typeobj { char* name; PyMethodDef *tp_methods; } PyTypeObject; static PyMethodDef object_methods[] = { {"__reduce_ex__"}, {"__reduce__"}, {0} }; PyTypeObject PyBaseObject_Type = { "foo", object_methods /* tp_methods */ }; typedef int PyObject; PyObject* PyDescr_NewMethod(PyTypeObject *type, PyMethodDef *method) { PyMethodDef* m; m = method; return 0; } static int add_methods(PyTypeObject *type, PyMethodDef *meth) { for (; meth->ml_name != 0; meth++) { PyObject *descr; descr = PyDescr_NewMethod(type, meth); } return 0; } int f() { add_methods(&PyBaseObject_Type, PyBaseObject_Type.tp_methods); return 42; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("add_methods", output=sys.stdout) interpreter.dumpFunc("PyDescr_NewMethod", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_sys_types_h(): state = parse(""" #include <sys/types.h> int f() { size_t x = 42; return (int) x; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_local_func_ptr_type(): state = parse(""" typedef int PyObject; PyObject* g(PyObject* x) { return x; } int f() { PyObject *(*fp)(PyObject *); fp = g; PyObject x = 42; PyObject* y = fp(&x); return *y; } """) pprint(state.funcs["f"].body.contentlist[0]) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_struct_return(): state = parse(""" typedef struct _complex { int real; int imag; } Py_complex; Py_complex c_sum(Py_complex a, Py_complex b) { Py_complex r; r.real = a.real + b.real; r.imag = a.imag + b.imag; return r; } int f() { Py_complex s; Py_complex a = {1, 2}; Py_complex b = {3, 5}; s = c_sum(a, b); return s.real + s.imag; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 11 def test_interpret_struct_init_assign(): state = parse(""" #include <assert.h> typedef struct _complex { int real; int imag; } Py_complex; typedef struct _A { int x; Py_complex a; Py_complex b; } A; int f() { Py_complex z = {1, 2}; A o1 = {1, {2, 3}, z}; assert(o1.x == 1); assert(o1.a.real == 2); assert(o1.b.imag == 2); A o2; o2 = o1; assert(o2.x == 1); assert(o2.a.real == 2); assert(o2.b.imag == 2); A o3 = o2; assert(o3.x == 1); assert(o3.a.real == 2); assert(o3.b.imag == 2); return o3.x + o3.b.imag + o2.x + o2.a.real + o1.b.real; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 7 def test_interpret_var_args_noop(): state = parse(""" #include <stdarg.h> typedef int PyObject; PyObject* PyErr_Format(PyObject *exception, const char *format, ...) { va_list vargs; PyObject* string; va_start(vargs, format); va_end(vargs); return 0; } int f() { PyErr_Format(0, "foo%i%i%s", 1, 2, "bar"); return 7; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("PyErr_Format", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 7 def test_interpret_var_args_vsprintf(): state = parse(""" #include <stdarg.h> #include <stdio.h> #include <assert.h> #include <string.h> typedef int PyObject; char buffer[100]; void g(const char *format, ...) { va_list vargs; va_start(vargs, format); vsprintf(buffer, format, vargs); va_end(vargs); } int f() { g("foo%i%i%s", 1, 2, "bar"); assert(strcmp(buffer, "foo12bar") == 0); return (int) buffer[4]; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == ord('2') def test_interpret_var_args_va_list_param(): state = parse(""" #include <stdarg.h> void h(va_list) {} void g(const char* format, ...) { va_list vargs; va_start(vargs, format); h(vargs); va_end(vargs); } int f() { g("foo"); return 42; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) interpreter.dumpFunc("h", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_goto_named_func(): state = parse(""" int g() { return 42; } int f() { int a; a = g(); goto g; a = 13; g: return a; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_enum_return(): state = parse(""" typedef enum {PyGILState_LOCKED, PyGILState_UNLOCKED} PyGILState_STATE; PyGILState_STATE PyGILState_Ensure(void) { return PyGILState_UNLOCKED; } int f() { return PyGILState_Ensure(); } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("PyGILState_Ensure", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 def test_interpret_enum_cast(): state = parse(""" enum why_code {A, B, C}; int f() { enum why_code why; why = (enum why_code) 2; return why; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 2 def test_interpret_enum_stmnt_bitor(): state = parse(""" enum why_code {A=1, B=2, C=4}; int f() { enum why_code why = A; if (why & (A | B)) return 42; return -1; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_attrib_access_after_cast_in_iif(): state = parse(""" struct _typeobj; typedef struct _obj { struct _typeobj* ob_type; } PyObject; typedef struct _typeobj { PyObject base; } PyTypeObject; typedef struct _instobj { PyObject base; PyObject* in_class; } PyInstanceObject; int f() { PyInstanceObject a; PyObject *x = &a, *b; b = 1 ? (PyObject*)((PyInstanceObject*)(x))->in_class : (PyObject*)((x)->ob_type); return 3; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_attrib_access_after_cast_simple(): state = parse(""" typedef struct _obj { int v; } PyObject; typedef struct _instobj { PyObject base; PyObject* in_class; } PyInstanceObject; int f() { PyInstanceObject _a; PyInstanceObject *a = &_a; PyObject *b; b = (PyObject*) (a)->in_class; return 3; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_cast_precedence_over_op(): state = parse(""" typedef unsigned char uchar; int f() { uchar a = 240, b = 240; return (int) a + b; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 480 def test_interpret_struct_ptr_to_itself_indirect(): state = parse(""" struct B; struct A { struct B* x; }; struct B { struct A x; }; int f() { struct A a; struct B b; return 3; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_struct_ptr_to_itself_indirect2(): state = parse(""" struct C; struct B { struct C* x; }; struct A { struct B x; }; struct C { struct A x; }; int f() { struct A a; struct B b; struct C c; return 3; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_struct_with_itself_indirect_error(): state = parse(""" struct B; typedef struct B B; struct A { B x; }; struct B { struct A x; }; int f() { struct A a; return 3; } """) interpreter = Interpreter() interpreter.register(state) try: interpreter.dumpFunc("f", output=sys.stdout) except RecursiveStructConstruction as e: print(repr(e)) pass # ok, we expect that else: assert False, "Not expected, no error!" def test_interpret_py_atexit(): state = parse(""" #define NEXITFUNCS 32 static void (*exitfuncs[NEXITFUNCS])(void); static int nexitfuncs = 0; int Py_AtExit(void (*func)(void)) { if (nexitfuncs >= NEXITFUNCS) return -1; exitfuncs[nexitfuncs++] = func; return 0; } static void call_ll_exitfuncs(void) { while (nexitfuncs > 0) (*exitfuncs[--nexitfuncs])(); } static int iwashere = -1; static void g() { iwashere = 42; } int f() { Py_AtExit(g); call_ll_exitfuncs(); return iwashere; } """) print(state.vars["exitfuncs"]) print(state.vars["exitfuncs"].type) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("Py_AtExit", output=sys.stdout) interpreter.dumpFunc("call_ll_exitfuncs", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 42 def test_interpret_local_typedef_var(): state = parse(""" int f() { typedef int Int; Int x = 43; return x; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 43 def test_interpret_func_ptr_local_typedef_va_arg(): state = parse(""" #include <stdarg.h> typedef int PyObject; PyObject* p(void* a) { return (PyObject*) a; } PyObject* h(va_list *p_va) { typedef PyObject *(*converter)(void *); converter func = va_arg(*p_va, converter); void *arg = va_arg(*p_va, void *); return (*func)(arg); } int g(int x, ...) { va_list vargs; va_start(vargs, x); PyObject* r_p; r_p = h(&vargs); int r = *r_p; va_end(vargs); return r + x; } int f() { int x = 43; return g(13, p, &x) + 1; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) interpreter.dumpFunc("h", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 + 13 + 43 def test_interpret_va_arg_custom(): state = parse(""" #include <stdarg.h> #include <string.h> int g(const char* format, ...) { int res = 0; va_list vargs; va_start(vargs, format); char c; for(; c = *format; ++format) { switch(c) { case 'c': res += va_arg(vargs, char); break; case 'i': res += va_arg(vargs, int); break; case 'l': res += va_arg(vargs, long); break; case 's': res += strlen(va_arg(vargs, char*)); break; default: return -1; } } va_end(vargs); return res; } int f() { return g("iscl", 13, "foo", 'A', 11); } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 13 + len("foo") + ord('A') + 11 def test_interpret_va_arg_copy(): state = parse(""" #include <stdarg.h> static int va_build_value(const char *format, va_list va) { va_list lva; #ifdef VA_LIST_IS_ARRAY memcpy(lva, va, sizeof(va_list)); #else #ifdef __va_copy __va_copy(lva, va); #else lva = va; #endif #endif return va_arg(lva, int) + va_arg(lva, int); } int g(const char* format, ...) { va_list vargs; va_start(vargs, format); int r = va_build_value(format, vargs); va_end(vargs); return r; } int f() { return g("iscl", 13, 11); } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("g", output=sys.stdout) interpreter.dumpFunc("va_build_value", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 13 + 11 def test_interpret_assign_func_ptr(): state = parse(""" typedef int PyObject; PyObject* p(void* a) { return (PyObject*) a; } int f() { int r = 0; typedef PyObject *(*converter)(void *); int x = 1; converter func = p; r += *func(&x); converter func2 = func; r += *func2(&x); func = func2; r += *func(&x); return r; } """) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_sig_handler(): state = parse(""" #include <signal.h> typedef void (*PyOS_sighandler_t)(int); PyOS_sighandler_t PyOS_getsig(int sig) { PyOS_sighandler_t handler; handler = signal(sig, SIG_IGN); if (handler != SIG_ERR) signal(sig, handler); return handler; } int f() { PyOS_getsig(SIGINT); return 3; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("PyOS_getsig", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_locale_include(): state = parse(""" #include <locale.h> int f() { return 3; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_fcntl_open_close(): state = parse(""" #include <fcntl.h> typedef long Py_ssize_t; static void dev_urandom_noraise() { int fd; fd = open("/dev/urandom", O_RDONLY); close(fd); } int f() { dev_urandom_noraise(); return 3; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) interpreter.dumpFunc("dev_urandom_noraise", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 3 def test_interpret_strcpy_str_A(): state = parse(""" #include <stdlib.h> #include <string.h> int f() { char *s = malloc(10); strcpy(s, "ABC"); int c = s[0]; free(s); return c; } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 65 def test_interpret_int_div(): state = parse(""" #include <stdlib.h> #include <string.h> int f() { int x; x = 1; x = x / 2; return (int) (x * 2); } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 0 def test_interpret_float_div(): state = parse(""" #include <stdlib.h> #include <string.h> int f() { float x; x = 1.; x = x / 2.; return (int) (x * 2.1); } """, withGlobalIncludeWrappers=True) interpreter = Interpreter() interpreter.register(state) print("Func dump:") interpreter.dumpFunc("f", output=sys.stdout) print("Run f:") r = interpreter.runFunc("f") print("result:", r) assert isinstance(r, ctypes.c_int) assert r.value == 1 if __name__ == '__main__': helpers_test.main(globals())
[]
[]
[ "_cparser_test_interpret_env_c_str" ]
[]
["_cparser_test_interpret_env_c_str"]
python
1
0
tests/plugins/test_tpu_spawn.py
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from unittest.mock import MagicMock import pytest import torch from torch.utils.data import DataLoader from pytorch_lightning import Trainer from pytorch_lightning.plugins.training_type import TPUSpawnPlugin from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.boring_model import BoringModel, RandomDataset from tests.helpers.dataloaders import CustomNotImplementedErrorDataloader from tests.helpers.runif import RunIf from tests.helpers.utils import pl_multi_process_test class BoringModelNoDataloaders(BoringModel): def train_dataloader(self): raise NotImplementedError def val_dataloader(self): raise NotImplementedError def test_dataloader(self): raise NotImplementedError def predict_dataloader(self): raise NotImplementedError _loader = DataLoader(RandomDataset(32, 64)) _loader_no_len = CustomNotImplementedErrorDataloader(_loader) @pytest.mark.parametrize( "train_dataloaders, val_dataloaders, test_dataloaders, predict_dataloaders", [ (_loader_no_len, None, None, None), (None, _loader_no_len, None, None), (None, None, _loader_no_len, None), (None, None, None, _loader_no_len), (None, [_loader, _loader_no_len], None, None), ], ) @mock.patch("pytorch_lightning.plugins.training_type.tpu_spawn.xm") def test_error_iterable_dataloaders_passed_to_fit( _, tmpdir, train_dataloaders, val_dataloaders, test_dataloaders, predict_dataloaders ): """Test that the TPUSpawnPlugin identifies dataloaders with iterable datasets and fails early.""" trainer = Trainer() model = BoringModelNoDataloaders() model.trainer = trainer trainer._data_connector.attach_dataloaders( model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, test_dataloaders=test_dataloaders, predict_dataloaders=predict_dataloaders, ) with pytest.raises(MisconfigurationException, match="TPUs do not currently support"): TPUSpawnPlugin(MagicMock()).connect(model) @mock.patch("pytorch_lightning.plugins.training_type.tpu_spawn.xm") def test_error_process_iterable_dataloader(_): with pytest.raises(MisconfigurationException, match="TPUs do not currently support"): TPUSpawnPlugin(MagicMock()).process_dataloader(_loader_no_len) class BoringModelTPU(BoringModel): def on_train_start(self) -> None: assert self.device == torch.device("xla", index=1) assert os.environ.get("PT_XLA_DEBUG") == "1" @RunIf(tpu=True) @pl_multi_process_test def test_model_tpu_one_core(): """Tests if device/debug flag is set correctely when training and after teardown for TPUSpawnPlugin.""" trainer = Trainer(tpu_cores=1, fast_dev_run=True, strategy=TPUSpawnPlugin(debug=True)) # assert training type plugin attributes for device setting assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin) assert not trainer.training_type_plugin.on_gpu assert trainer.training_type_plugin.on_tpu assert trainer.training_type_plugin.root_device == torch.device("xla", index=1) model = BoringModelTPU() trainer.fit(model) assert "PT_XLA_DEBUG" not in os.environ
[]
[]
[ "PT_XLA_DEBUG" ]
[]
["PT_XLA_DEBUG"]
python
1
0
cloudfoundry_integration_test.go
// +build cf_integration package drmaa2os_test import ( . "github.com/dgruber/drmaa2os" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/dgruber/drmaa2interface" "os" ) var _ = Describe("Cloud Foundry integration", func() { var sm drmaa2interface.SessionManager var js drmaa2interface.JobSession var jt drmaa2interface.JobTemplate BeforeEach(func() { os.Remove("cintegration_tmp.db") var err error // test expects that these environment variables are set Ω(os.Getenv("CF_INSTANCE_GUID")).ShouldNot(Equal("")) Ω(os.Getenv("CF_API")).ShouldNot(Equal("")) Ω(os.Getenv("CF_USER")).ShouldNot(Equal("")) Ω(os.Getenv("CF_PASSWORD")).ShouldNot(Equal("")) sm, err = NewCloudFoundrySessionManager(os.Getenv("CF_API"), os.Getenv("CF_USER"), os.Getenv("CF_PASSWORD"), "cintegration_tmp.db") Ω(err).Should(BeNil()) js, err = sm.CreateJobSession("testsession", "") Ω(err).Should(BeNil()) jt = drmaa2interface.JobTemplate{ RemoteCommand: "/bin/sleep", Args: []string{"13"}, JobCategory: os.Getenv("CF_INSTANCE_GUID"), } }) It("submits a task", func() { job, err := js.RunJob(jt) Ω(err).Should(BeNil()) Ω(job).ShouldNot(BeNil()) }) It("submits a task and waits for it", func() { jt.Args = []string{"0"} job, err := js.RunJob(jt) Ω(err).Should(BeNil()) Ω(job).ShouldNot(BeNil()) Ω(job.WaitTerminated(drmaa2interface.InfiniteTime)).Should(BeNil()) Ω(job.GetState()).Should(Equal(drmaa2interface.Done)) ji, err := job.GetJobInfo() Ω(err).Should(BeNil()) Ω(ji.ExitStatus).Should(BeNumerically("==", 0)) }) It("submits a failing task and waits for it", func() { jt.RemoteCommand = "notacommand" job, err := js.RunJob(jt) Ω(err).Should(BeNil()) Ω(job).ShouldNot(BeNil()) Ω(job.WaitTerminated(drmaa2interface.InfiniteTime)).Should(BeNil()) Ω(job.GetState()).Should(Equal(drmaa2interface.Failed)) }) It("submits a task array and waits for it", func() { jt.Args = []string{"0"} aj, err := js.RunBulkJobs(jt, 1, 3, 1, -1) Ω(err).Should(BeNil()) Ω(aj).ShouldNot(BeNil()) Ω(len(aj.GetJobs())).Should(BeNumerically("==", 3)) for _, job := range aj.GetJobs() { Ω(err).Should(BeNil()) Ω(job.WaitTerminated(drmaa2interface.InfiniteTime)).Should(BeNil()) Ω(job.GetState()).Should(Equal(drmaa2interface.Done)) ji, err := job.GetJobInfo() Ω(err).Should(BeNil()) Ω(ji.ExitStatus).Should(BeNumerically("==", 0)) } }) })
[ "\"CF_INSTANCE_GUID\"", "\"CF_API\"", "\"CF_USER\"", "\"CF_PASSWORD\"", "\"CF_API\"", "\"CF_USER\"", "\"CF_PASSWORD\"", "\"CF_INSTANCE_GUID\"" ]
[]
[ "CF_INSTANCE_GUID", "CF_USER", "CF_PASSWORD", "CF_API" ]
[]
["CF_INSTANCE_GUID", "CF_USER", "CF_PASSWORD", "CF_API"]
go
4
0
integration/integration_suite_test.go
package integration_test import ( "log" "os" "path/filepath" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/manager" "code.cloudfoundry.org/cf-operator/integration/machinery" "code.cloudfoundry.org/cf-operator/pkg/client/clientset/versioned" "code.cloudfoundry.org/cf-operator/pkg/operator" ) func TestIntegration(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Integration Suite") } var stop chan struct{} type IntegrationSuite struct { machinery.Machine mgr manager.Manager namespace string kubeConfig *rest.Config logRecorded *observer.ObservedLogs log *zap.SugaredLogger } var suite = &IntegrationSuite{ namespace: "", } var _ = BeforeSuite(func() { suite.setup() err := suite.startClients(suite.kubeConfig) Expect(err).NotTo(HaveOccurred()) suite.startOperator() }) var _ = AfterSuite(func() { defer func() { if stop != nil { close(stop) } }() }) func (s *IntegrationSuite) setup() { ns, found := os.LookupEnv("TEST_NAMESPACE") if !found { ns = "default" } s.namespace = ns var core zapcore.Core core, s.logRecorded = observer.New(zapcore.InfoLevel) s.log = zap.New(core).Sugar() err := suite.setupKube() Expect(err).NotTo(HaveOccurred()) suite.mgr, err = operator.NewManager(suite.log, suite.kubeConfig, manager.Options{Namespace: suite.namespace}) Expect(err).NotTo(HaveOccurred()) } func (s *IntegrationSuite) setupKube() (err error) { location := os.Getenv("KUBE_CONFIG") if location == "" { location = filepath.Join(os.Getenv("HOME"), ".kube", "config") } s.kubeConfig, err = clientcmd.BuildConfigFromFlags("", location) if err != nil { log.Printf("INFO: cannot use kube config: %s\n", err) s.kubeConfig, err = rest.InClusterConfig() if err != nil { return } } return } func (s *IntegrationSuite) startClients(kubeConfig *rest.Config) (err error) { s.Clientset, err = kubernetes.NewForConfig(kubeConfig) if err != nil { return } s.VersionedClientset, err = versioned.NewForConfig(kubeConfig) return } func (s *IntegrationSuite) startOperator() { stop = make(chan struct{}) go s.mgr.Start(stop) }
[ "\"KUBE_CONFIG\"", "\"HOME\"" ]
[]
[ "HOME", "KUBE_CONFIG" ]
[]
["HOME", "KUBE_CONFIG"]
go
2
0
lib/setup.py
# -------------------------------------------------------- # FCN # Copyright (c) 2016 # Licensed under The MIT License [see LICENSE for details] # Written by Yu Xiang # -------------------------------------------------------- import os from os.path import join as pjoin import numpy as np from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext def find_in_path(name, path): "Find a file in a search path" #adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None def locate_cuda(): """Locate the CUDA environment on the system Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH. """ # first check if the CUDAHOME env variable is in use if 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] nvcc = pjoin(home, 'bin', 'nvcc') else: # otherwise, search the PATH for NVCC default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path) if nvcc is None: raise EnvironmentError('The nvcc binary could not be ' 'located in your $PATH. Either add it to your path, or set $CUDAHOME') home = os.path.dirname(os.path.dirname(nvcc)) cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} for k, v in cudaconfig.iteritems(): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) return cudaconfig CUDA = locate_cuda() # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() def customize_compiler_for_nvcc(self): """inject deep into distutils to customize how the dispatch to gcc/nvcc works. If you subclass UnixCCompiler, it's not trivial to get your subclass injected in, and still have the right customizations (i.e. distutils.sysconfig.customize_compiler) run on it. So instead of going the OO route, I have this. Note, it's kindof like a wierd functional subclassing going on.""" # tell the compiler it can processes .cu self.src_extensions.append('.cu') # save references to the default compiler_so and _comple methods default_compiler_so = self.compiler_so super = self._compile # now redefine the _compile method. This gets executed for each # object but distutils doesn't have the ability to change compilers # based on source extension: we add it. def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): if os.path.splitext(src)[1] == '.cu': # use the cuda for .cu files self.set_executable('compiler_so', CUDA['nvcc']) # use only a subset of the extra_postargs, which are 1-1 translated # from the extra_compile_args in the Extension class postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] super(obj, src, ext, cc_args, postargs, pp_opts) # reset the default compiler_so, which we might have changed for cuda self.compiler_so = default_compiler_so # inject our redefined _compile method into the class self._compile = _compile # run the customize_compiler class custom_build_ext(build_ext): def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self) ext_modules = [ Extension('normals.gpu_normals', ['normals/compute_normals.cu', 'normals/gpu_normals.pyx'], library_dirs=[CUDA['lib64']], libraries=['cudart'], language='c++', runtime_library_dirs=[CUDA['lib64']], # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler() below extra_compile_args={'gcc': ["-Wno-unused-function"], 'nvcc': ['-arch=sm_35', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"]}, include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3'] ), Extension( "utils.cython_bbox", ["utils/bbox.pyx"], extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, include_dirs = [numpy_include] ), Extension( "nms.cpu_nms", ["nms/cpu_nms.pyx"], extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, include_dirs = [numpy_include] ), Extension('nms.gpu_nms', ['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'], library_dirs=[CUDA['lib64']], libraries=['cudart'], language='c++', runtime_library_dirs=[CUDA['lib64']], # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler() below extra_compile_args={'gcc': ["-Wno-unused-function"], 'nvcc': ['-arch=sm_52', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"]}, include_dirs = [numpy_include, CUDA['include']] ) #Extension( # "kinect_fusion.kfusion", # the extension name # sources=['kinect_fusion/kfusion.pyx'], # language='c++', # extra_objects=["kinect_fusion/build/libkfusion.so"], # extra_compile_args={'gcc': ["-Wno-unused-function"], # 'nvcc': ['-arch=sm_35', # '--ptxas-options=-v', # '-c', # '--compiler-options', # "'-fPIC'"]}, # include_dirs = ['/usr/local/include/eigen3', '/usr/local/cuda/include', 'kinect_fusion/include'] #) ] setup( name='fcn', ext_modules=ext_modules, # inject our custom trigger cmdclass={'build_ext': custom_build_ext}, )
[]
[]
[ "PATH", "CUDAHOME" ]
[]
["PATH", "CUDAHOME"]
python
2
0
src/specs/integration/bosh_wait_scripts/bosh_wait_scripts_suite_test.go
package bosh_wait_scripts_test import ( "os" "testing" helpers "specs/test_helpers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) func TestBoshWaitScripts(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "PXC Acceptance Tests -- BoshWaitScripts Suite") } var _ = BeforeSuite(func() { requiredEnvs := []string{ "BOSH_ENVIRONMENT", "BOSH_CA_CERT", "BOSH_CLIENT", "BOSH_CLIENT_SECRET", "BOSH_DEPLOYMENT", "CREDHUB_SERVER", "CREDHUB_CLIENT", "CREDHUB_SECRET", } helpers.CheckForRequiredEnvVars(requiredEnvs) helpers.SetupBoshDeployment() if os.Getenv("BOSH_ALL_PROXY") != "" { helpers.SetupSocks5Proxy() } })
[ "\"BOSH_ALL_PROXY\"" ]
[]
[ "BOSH_ALL_PROXY" ]
[]
["BOSH_ALL_PROXY"]
go
1
0
bin/cqlsh.py
#!/bin/sh # -*- mode: Python -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """:" # bash code here; finds a suitable python interpreter and execs this file. # prefer unqualified "python" if suitable: python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \ && exec python "$0" "$@" for pyver in 2.7; do which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@" done echo "No appropriate python interpreter found." >&2 exit 1 ":""" from __future__ import with_statement import cmd import codecs import ConfigParser import csv import getpass import optparse import os import platform import sys import traceback import warnings import webbrowser from StringIO import StringIO from contextlib import contextmanager from glob import glob from uuid import UUID if sys.version_info[0] != 2 or sys.version_info[1] != 7: sys.exit("\nCQL Shell supports only Python 2.7\n") UTF8 = 'utf-8' CP65001 = 'cp65001' # Win utf-8 variant description = "CQL Shell for Apache Cassandra" version = "5.0.1" readline = None try: # check if tty first, cause readline doesn't check, and only cares # about $TERM. we don't want the funky escape code stuff to be # output if not a tty. if sys.stdin.isatty(): import readline except ImportError: pass CQL_LIB_PREFIX = 'cassandra-driver-internal-only-' CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.0.html' # default location of local CQL.html if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'): # default location of local CQL.html CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html' elif os.path.exists('/usr/share/doc/cassandra/CQL.html'): # fallback to package file CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html' else: # fallback to online version CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK # On Linux, the Python webbrowser module uses the 'xdg-open' executable # to open a file/URL. But that only works, if the current session has been # opened from _within_ a desktop environment. I.e. 'xdg-open' will fail, # if the session's been opened via ssh to a remote box. # # Use 'python' to get some information about the detected browsers. # >>> import webbrowser # >>> webbrowser._tryorder # >>> webbrowser._browser # if len(webbrowser._tryorder) == 0: CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '': # only on Linux (some OS with xdg-open) webbrowser._tryorder.remove('xdg-open') webbrowser._tryorder.append('xdg-open') # use bundled libs for python-cql and thrift, if available. if there # is a ../lib dir, use bundled libs there preferentially. ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')] myplatform = platform.system() is_win = myplatform == 'Windows' # Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216) if is_win and sys.version_info < (3, 3): codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None) if myplatform == 'Linux': ZIPLIB_DIRS.append('/usr/share/cassandra/lib') if os.environ.get('CQLSH_NO_BUNDLED', ''): ZIPLIB_DIRS = () def find_zip(libprefix): for ziplibdir in ZIPLIB_DIRS: zips = glob(os.path.join(ziplibdir, libprefix + '*.zip')) if zips: return max(zips) # probably the highest version, if multiple cql_zip = find_zip(CQL_LIB_PREFIX) if cql_zip: ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):] sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver)) third_parties = ('futures-', 'six-') for lib in third_parties: lib_zip = find_zip(lib) if lib_zip: sys.path.insert(0, lib_zip) warnings.filterwarnings("ignore", r".*blist.*") try: import cassandra except ImportError, e: sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n" 'You might try "pip install cassandra-driver".\n\n' 'Python: %s\n' 'Module load path: %r\n\n' 'Error: %s\n' % (sys.executable, sys.path, e)) from cassandra.auth import PlainTextAuthProvider from cassandra.cluster import Cluster from cassandra.marshal import int64_unpack from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata, TableMetadata, protect_name, protect_names) from cassandra.policies import WhiteListRoundRobinPolicy from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable from cassandra.util import datetime_from_timestamp # cqlsh should run correctly when run out of a Cassandra source tree, # out of an unpacked Cassandra tarball, and after a proper package install. cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib') if os.path.isdir(cqlshlibdir): sys.path.insert(0, cqlshlibdir) from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling from cqlshlib.copyutil import ExportTask, ImportTask from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN, RED, FormattedValue, colorme) from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT, DEFAULT_TIMESTAMP_FORMAT, DateTimeFormat, format_by_type, format_value_utype, formatter_for) from cqlshlib.tracing import print_trace, print_trace_session from cqlshlib.util import get_file_encoding_bomsize, trim_if_present DEFAULT_HOST = '127.0.0.1' DEFAULT_PORT = 9042 DEFAULT_CQLVER = '3.4.0' DEFAULT_PROTOCOL_VERSION = 4 DEFAULT_CONNECT_TIMEOUT_SECONDS = 5 DEFAULT_REQUEST_TIMEOUT_SECONDS = 10 DEFAULT_FLOAT_PRECISION = 5 DEFAULT_MAX_TRACE_WAIT = 10 if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__: DEFAULT_COMPLETEKEY = '\t' else: DEFAULT_COMPLETEKEY = 'tab' cqldocs = None cqlruleset = None epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a host (and optional port number) are given on the command line, they take precedence over any defaults.""" % globals() parser = optparse.OptionParser(description=description, epilog=epilog, usage="Usage: %prog [options] [host [port]]", version='cqlsh ' + version) parser.add_option("-C", "--color", action='store_true', dest='color', help='Always use color output') parser.add_option("--no-color", action='store_false', dest='color', help='Never use color output') parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be: - one of the supported browsers in https://docs.python.org/2/library/webbrowser.html. - browser path followed by %s, example: /usr/bin/google-chrome-stable %s""") parser.add_option('--ssl', action='store_true', help='Use SSL', default=False) parser.add_option("-u", "--username", help="Authenticate as user.") parser.add_option("-p", "--password", help="Authenticate using password.") parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.') parser.add_option("-f", "--file", help="Execute commands from FILE, then exit") parser.add_option('--debug', action='store_true', help='Show additional debugging information') parser.add_option("--encoding", help="Specify a non-default encoding for output." + " (Default: %s)" % (UTF8,)) parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.") parser.add_option('--cqlversion', default=DEFAULT_CQLVER, help='Specify a particular CQL version (default: %default).' ' Examples: "3.0.3", "3.1.0"') parser.add_option("-e", "--execute", help='Execute the statement and quit.') parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout', help='Specify the connection timeout in seconds (default: %default seconds).') parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout', help='Specify the default request timeout in seconds (default: %default seconds).') parser.add_option("-t", "--tty", action='store_true', dest='tty', help='Force tty mode (command prompt).') optvalues = optparse.Values() (options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues) # BEGIN history/config definition HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra')) if hasattr(options, 'cqlshrc'): CONFIG_FILE = options.cqlshrc if not os.path.exists(CONFIG_FILE): print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR) CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc') else: CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc') HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history') if not os.path.exists(HISTORY_DIR): try: os.mkdir(HISTORY_DIR) except OSError: print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc')) if os.path.exists(OLD_CONFIG_FILE): if os.path.exists(CONFIG_FILE): print '\nWarning: cqlshrc config files were found at both the old location (%s) and \ the new location (%s), the old config file will not be migrated to the new \ location, and the new location will be used for now. You should manually \ consolidate the config files at the new location and remove the old file.' \ % (OLD_CONFIG_FILE, CONFIG_FILE) else: os.rename(OLD_CONFIG_FILE, CONFIG_FILE) OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history')) if os.path.exists(OLD_HISTORY): os.rename(OLD_HISTORY, HISTORY) # END history/config definition CQL_ERRORS = ( cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure, cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut, cassandra.cluster.NoHostAvailable, cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException, cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable ) debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES') # we want the cql parser to understand our cqlsh-specific commands too my_commands_ending_with_newline = ( 'help', '?', 'consistency', 'serial', 'describe', 'desc', 'show', 'source', 'capture', 'login', 'debug', 'tracing', 'expand', 'paging', 'exit', 'quit', 'clear', 'cls' ) cqlsh_syntax_completers = [] def cqlsh_syntax_completer(rulename, termname): def registrator(f): cqlsh_syntax_completers.append((rulename, termname, f)) return f return registrator cqlsh_extra_syntax_rules = r''' <cqlshCommand> ::= <CQL_Statement> | <specialCommand> ( ";" | "\n" ) ; <specialCommand> ::= <describeCommand> | <consistencyCommand> | <serialConsistencyCommand> | <showCommand> | <sourceCommand> | <captureCommand> | <copyCommand> | <loginCommand> | <debugCommand> | <helpCommand> | <tracingCommand> | <expandCommand> | <exitCommand> | <pagingCommand> | <clearCommand> ; <describeCommand> ::= ( "DESCRIBE" | "DESC" ) ( "FUNCTIONS" | "FUNCTION" udf=<anyFunctionName> | "AGGREGATES" | "AGGREGATE" uda=<userAggregateName> | "KEYSPACES" | "KEYSPACE" ksname=<keyspaceName>? | ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName> | "INDEX" idx=<indexName> | "MATERIALIZED" "VIEW" mv=<materializedViewName> | ( "COLUMNFAMILIES" | "TABLES" ) | "FULL"? "SCHEMA" | "CLUSTER" | "TYPES" | "TYPE" ut=<userTypeName> | (ksname=<keyspaceName> | cf=<columnFamilyName> | idx=<indexName> | mv=<materializedViewName>)) ; <consistencyCommand> ::= "CONSISTENCY" ( level=<consistencyLevel> )? ; <consistencyLevel> ::= "ANY" | "ONE" | "TWO" | "THREE" | "QUORUM" | "ALL" | "LOCAL_QUORUM" | "EACH_QUORUM" | "SERIAL" | "LOCAL_SERIAL" | "LOCAL_ONE" ; <serialConsistencyCommand> ::= "SERIAL" "CONSISTENCY" ( level=<serialConsistencyLevel> )? ; <serialConsistencyLevel> ::= "SERIAL" | "LOCAL_SERIAL" ; <showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> ) ; <sourceCommand> ::= "SOURCE" fname=<stringLiteral> ; <captureCommand> ::= "CAPTURE" ( fname=( <stringLiteral> | "OFF" ) )? ; <copyCommand> ::= "COPY" cf=<columnFamilyName> ( "(" [colnames]=<colname> ( "," [colnames]=<colname> )* ")" )? ( dir="FROM" ( fname=<stringLiteral> | "STDIN" ) | dir="TO" ( fname=<stringLiteral> | "STDOUT" ) ) ( "WITH" <copyOption> ( "AND" <copyOption> )* )? ; <copyOption> ::= [optnames]=(<identifier>|<reserved_identifier>) "=" [optvals]=<copyOptionVal> ; <copyOptionVal> ::= <identifier> | <reserved_identifier> | <term> ; # avoiding just "DEBUG" so that this rule doesn't get treated as a terminal <debugCommand> ::= "DEBUG" "THINGS"? ; <helpCommand> ::= ( "HELP" | "?" ) [topic]=( /[a-z_]*/ )* ; <tracingCommand> ::= "TRACING" ( switch=( "ON" | "OFF" ) )? ; <expandCommand> ::= "EXPAND" ( switch=( "ON" | "OFF" ) )? ; <pagingCommand> ::= "PAGING" ( switch=( "ON" | "OFF" | /[0-9]+/) )? ; <loginCommand> ::= "LOGIN" username=<username> (password=<stringLiteral>)? ; <exitCommand> ::= "exit" | "quit" ; <clearCommand> ::= "CLEAR" | "CLS" ; <qmark> ::= "?" ; ''' @cqlsh_syntax_completer('helpCommand', 'topic') def complete_help(ctxt, cqlsh): return sorted([t.upper() for t in cqldocs.get_help_topics() + cqlsh.get_help_topics()]) def complete_source_quoted_filename(ctxt, cqlsh): partial_path = ctxt.get_binding('partial', '') head, tail = os.path.split(partial_path) exhead = os.path.expanduser(head) try: contents = os.listdir(exhead or '.') except OSError: return () matches = filter(lambda f: f.startswith(tail), contents) annotated = [] for f in matches: match = os.path.join(head, f) if os.path.isdir(os.path.join(exhead, f)): match += '/' annotated.append(match) return annotated cqlsh_syntax_completer('sourceCommand', 'fname')(complete_source_quoted_filename) cqlsh_syntax_completer('captureCommand', 'fname')(complete_source_quoted_filename) @cqlsh_syntax_completer('copyCommand', 'fname') def copy_fname_completer(ctxt, cqlsh): lasttype = ctxt.get_binding('*LASTTYPE*') if lasttype == 'unclosedString': return complete_source_quoted_filename(ctxt, cqlsh) partial_path = ctxt.get_binding('partial') if partial_path == '': return ["'"] return () @cqlsh_syntax_completer('copyCommand', 'colnames') def complete_copy_column_names(ctxt, cqlsh): existcols = map(cqlsh.cql_unprotect_name, ctxt.get_binding('colnames', ())) ks = cqlsh.cql_unprotect_name(ctxt.get_binding('ksname', None)) cf = cqlsh.cql_unprotect_name(ctxt.get_binding('cfname')) colnames = cqlsh.get_column_names(ks, cf) if len(existcols) == 0: return [colnames[0]] return set(colnames[1:]) - set(existcols) COPY_COMMON_OPTIONS = ['DELIMITER', 'QUOTE', 'ESCAPE', 'HEADER', 'NULL', 'DATETIMEFORMAT', 'MAXATTEMPTS', 'REPORTFREQUENCY', 'DECIMALSEP', 'THOUSANDSSEP', 'BOOLSTYLE', 'NUMPROCESSES', 'CONFIGFILE', 'RATEFILE'] COPY_FROM_OPTIONS = ['CHUNKSIZE', 'INGESTRATE', 'MAXBATCHSIZE', 'MINBATCHSIZE', 'MAXROWS', 'SKIPROWS', 'SKIPCOLS', 'MAXPARSEERRORS', 'MAXINSERTERRORS', 'ERRFILE', 'PREPAREDSTATEMENTS'] COPY_TO_OPTIONS = ['ENCODING', 'PAGESIZE', 'PAGETIMEOUT', 'BEGINTOKEN', 'ENDTOKEN', 'MAXOUTPUTSIZE', 'MAXREQUESTS'] @cqlsh_syntax_completer('copyOption', 'optnames') def complete_copy_options(ctxt, cqlsh): optnames = map(str.upper, ctxt.get_binding('optnames', ())) direction = ctxt.get_binding('dir').upper() if direction == 'FROM': opts = set(COPY_COMMON_OPTIONS + COPY_FROM_OPTIONS) - set(optnames) elif direction == 'TO': opts = set(COPY_COMMON_OPTIONS + COPY_TO_OPTIONS) - set(optnames) return opts @cqlsh_syntax_completer('copyOption', 'optvals') def complete_copy_opt_values(ctxt, cqlsh): optnames = ctxt.get_binding('optnames', ()) lastopt = optnames[-1].lower() if lastopt == 'header': return ['true', 'false'] return [cqlhandling.Hint('<single_character_string>')] class NoKeyspaceError(Exception): pass class KeyspaceNotFound(Exception): pass class ColumnFamilyNotFound(Exception): pass class IndexNotFound(Exception): pass class MaterializedViewNotFound(Exception): pass class ObjectNotFound(Exception): pass class VersionNotSupported(Exception): pass class UserTypeNotFound(Exception): pass class FunctionNotFound(Exception): pass class AggregateNotFound(Exception): pass class DecodeError(Exception): verb = 'decode' def __init__(self, thebytes, err, colname=None): self.thebytes = thebytes self.err = err self.colname = colname def __str__(self): return str(self.thebytes) def message(self): what = 'value %r' % (self.thebytes,) if self.colname is not None: what = 'value %r (for column %r)' % (self.thebytes, self.colname) return 'Failed to %s %s : %s' \ % (self.verb, what, self.err) def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.message()) class FormatError(DecodeError): verb = 'format' def full_cql_version(ver): while ver.count('.') < 2: ver += '.0' ver_parts = ver.split('-', 1) + [''] vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]]) return ver, vertuple def format_value(val, output_encoding, addcolor=False, date_time_format=None, float_precision=None, colormap=None, nullval=None): if isinstance(val, DecodeError): if addcolor: return colorme(repr(val.thebytes), colormap, 'error') else: return FormattedValue(repr(val.thebytes)) return format_by_type(type(val), val, output_encoding, colormap=colormap, addcolor=addcolor, nullval=nullval, date_time_format=date_time_format, float_precision=float_precision) def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None): if file is None: file = sys.stderr try: file.write(warnings.formatwarning(message, category, filename, lineno, line='')) except IOError: pass warnings.showwarning = show_warning_without_quoting_line warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure) def insert_driver_hooks(): extend_cql_deserialization() auto_format_udts() def extend_cql_deserialization(): """ The python driver returns BLOBs as string, but we expect them as bytearrays the implementation of cassandra.cqltypes.BytesType.deserialize. The deserializers package exists only when the driver has been compiled with cython extensions and cassandra.deserializers.DesBytesType replaces cassandra.cqltypes.BytesType.deserialize. DesBytesTypeByteArray is a fast deserializer that converts blobs into bytearrays but it was only introduced recently (3.1.0). If it is available we use it, otherwise we remove cassandra.deserializers.DesBytesType so that we fall back onto cassandra.cqltypes.BytesType.deserialize just like in the case where no cython extensions are present. """ if hasattr(cassandra, 'deserializers'): if hasattr(cassandra.deserializers, 'DesBytesTypeByteArray'): cassandra.deserializers.DesBytesType = cassandra.deserializers.DesBytesTypeByteArray else: del cassandra.deserializers.DesBytesType cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts)) class DateOverFlowWarning(RuntimeWarning): pass # Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp def deserialize_date_fallback_int(byts, protocol_version): timestamp_ms = int64_unpack(byts) try: return datetime_from_timestamp(timestamp_ms / 1000.0) except OverflowError: warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. Timestamps are displayed in milliseconds from epoch.")) return timestamp_ms cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int) if hasattr(cassandra, 'deserializers'): del cassandra.deserializers.DesDateType # Return cassandra.cqltypes.EMPTY instead of None for empty values cassandra.cqltypes.CassandraType.support_empty_values = True def auto_format_udts(): # when we see a new user defined type, set up the shell formatting for it udt_apply_params = cassandra.cqltypes.UserType.apply_parameters def new_apply_params(cls, *args, **kwargs): udt_class = udt_apply_params(*args, **kwargs) formatter_for(udt_class.typename)(format_value_utype) return udt_class cassandra.cqltypes.UserType.udt_apply_parameters = classmethod(new_apply_params) make_udt_class = cassandra.cqltypes.UserType.make_udt_class def new_make_udt_class(cls, *args, **kwargs): udt_class = make_udt_class(*args, **kwargs) formatter_for(udt_class.tuple_type.__name__)(format_value_utype) return udt_class cassandra.cqltypes.UserType.make_udt_class = classmethod(new_make_udt_class) class FrozenType(cassandra.cqltypes._ParameterizedType): """ Needed until the bundled python driver adds FrozenType. """ typename = "frozen" num_subtypes = 1 @classmethod def deserialize_safe(cls, byts, protocol_version): subtype, = cls.subtypes return subtype.from_binary(byts) @classmethod def serialize_safe(cls, val, protocol_version): subtype, = cls.subtypes return subtype.to_binary(val, protocol_version) class Shell(cmd.Cmd): custom_prompt = os.getenv('CQLSH_PROMPT', '') if custom_prompt is not '': custom_prompt += "\n" default_prompt = custom_prompt + "cqlsh> " continue_prompt = " ... " keyspace_prompt = custom_prompt + "cqlsh:%s> " keyspace_continue_prompt = "%s ... " show_line_nums = False debug = False stop = False last_hist = None shunted_query_out = None use_paging = True default_page_size = 100 def __init__(self, hostname, port, color=False, username=None, password=None, encoding=None, stdin=None, tty=True, completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None, cqlver=DEFAULT_CQLVER, keyspace=None, tracing_enabled=False, expand_enabled=False, display_nanotime_format=DEFAULT_NANOTIME_FORMAT, display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT, display_date_format=DEFAULT_DATE_FORMAT, display_float_precision=DEFAULT_FLOAT_PRECISION, display_timezone=None, max_trace_wait=DEFAULT_MAX_TRACE_WAIT, ssl=False, single_statement=None, request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS, protocol_version=DEFAULT_PROTOCOL_VERSION, connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS): cmd.Cmd.__init__(self, completekey=completekey) self.hostname = hostname self.port = port self.auth_provider = None if username: if not password: password = getpass.getpass() self.auth_provider = PlainTextAuthProvider(username=username, password=password) self.username = username self.keyspace = keyspace self.ssl = ssl self.tracing_enabled = tracing_enabled self.page_size = self.default_page_size self.expand_enabled = expand_enabled if use_conn: self.conn = use_conn else: self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver, protocol_version=protocol_version, auth_provider=self.auth_provider, ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None, load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]), control_connection_timeout=connect_timeout, connect_timeout=connect_timeout) self.owns_connection = not use_conn self.set_expanded_cql_version(cqlver) if keyspace: self.session = self.conn.connect(keyspace) else: self.session = self.conn.connect() if browser == "": browser = None self.browser = browser self.color = color self.display_nanotime_format = display_nanotime_format self.display_timestamp_format = display_timestamp_format self.display_date_format = display_date_format self.display_float_precision = display_float_precision self.display_timezone = display_timezone self.session.default_timeout = request_timeout self.session.row_factory = ordered_dict_factory self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE self.get_connection_versions() self.current_keyspace = keyspace self.display_timestamp_format = display_timestamp_format self.display_nanotime_format = display_nanotime_format self.display_date_format = display_date_format self.max_trace_wait = max_trace_wait self.session.max_trace_wait = max_trace_wait self.tty = tty self.encoding = encoding self.check_windows_encoding() self.output_codec = codecs.lookup(encoding) self.statement = StringIO() self.lineno = 1 self.in_comment = False self.prompt = '' if stdin is None: stdin = sys.stdin if tty: self.reset_prompt() self.report_connection() print 'Use HELP for help.' else: self.show_line_nums = True self.stdin = stdin self.query_out = sys.stdout self.consistency_level = cassandra.ConsistencyLevel.ONE self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL self.empty_lines = 0 self.statement_error = False self.single_statement = single_statement @property def is_using_utf8(self): # utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001] def check_windows_encoding(self): if is_win and os.name == 'nt' and self.tty and \ self.is_using_utf8 and sys.stdout.encoding != CP65001: self.printerr("\nWARNING: console codepage must be set to cp65001 " "to support {} encoding on Windows platforms.\n" "If you experience encoding problems, change your console" " codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding)) def set_expanded_cql_version(self, ver): ver, vertuple = full_cql_version(ver) self.cql_version = ver self.cql_ver_tuple = vertuple def cqlver_atleast(self, major, minor=0, patch=0): return self.cql_ver_tuple[:3] >= (major, minor, patch) def myformat_value(self, val, **kwargs): if isinstance(val, DecodeError): self.decoding_errors.append(val) try: dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format, date_format=self.display_date_format, nanotime_format=self.display_nanotime_format, timezone=self.display_timezone) return format_value(val, self.output_codec.name, addcolor=self.color, date_time_format=dtformats, float_precision=self.display_float_precision, **kwargs) except Exception, e: err = FormatError(val, e) self.decoding_errors.append(err) return format_value(err, self.output_codec.name, addcolor=self.color) def myformat_colname(self, name, table_meta=None): column_colors = COLUMN_NAME_COLORS.copy() # check column role and color appropriately if table_meta: if name in [col.name for col in table_meta.partition_key]: column_colors.default_factory = lambda: RED elif name in [col.name for col in table_meta.clustering_key]: column_colors.default_factory = lambda: CYAN return self.myformat_value(name, colormap=column_colors) def report_connection(self): self.show_host() self.show_version() def show_host(self): print "Connected to %s at %s:%d." % \ (self.applycolor(self.get_cluster_name(), BLUE), self.hostname, self.port) def show_version(self): vers = self.connection_versions.copy() vers['shver'] = version # system.Versions['cql'] apparently does not reflect changes with # set_cql_version. vers['cql'] = self.cql_version print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers def show_session(self, sessionid, partial_session=False): print_trace_session(self, self.session, sessionid, partial_session) def get_connection_versions(self): result, = self.session.execute("select * from system.local where key = 'local'") vers = { 'build': result['release_version'], 'protocol': result['native_protocol_version'], 'cql': result['cql_version'], } self.connection_versions = vers def get_keyspace_names(self): return map(str, self.conn.metadata.keyspaces.keys()) def get_columnfamily_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return map(str, self.get_keyspace_meta(ksname).tables.keys()) def get_materialized_view_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return map(str, self.get_keyspace_meta(ksname).views.keys()) def get_index_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return map(str, self.get_keyspace_meta(ksname).indexes.keys()) def get_column_names(self, ksname, cfname): if ksname is None: ksname = self.current_keyspace layout = self.get_table_meta(ksname, cfname) return [unicode(col) for col in layout.columns] def get_usertype_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return self.get_keyspace_meta(ksname).user_types.keys() def get_usertype_layout(self, ksname, typename): if ksname is None: ksname = self.current_keyspace ks_meta = self.get_keyspace_meta(ksname) try: user_type = ks_meta.user_types[typename] except KeyError: raise UserTypeNotFound("User type %r not found" % typename) return [(field_name, field_type.cql_parameterized_type()) for field_name, field_type in zip(user_type.field_names, user_type.field_types)] def get_userfunction_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values()) def get_useraggregate_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values()) def get_cluster_name(self): return self.conn.metadata.cluster_name def get_partitioner(self): return self.conn.metadata.partitioner def get_keyspace_meta(self, ksname): if ksname not in self.conn.metadata.keyspaces: raise KeyspaceNotFound('Keyspace %r not found.' % ksname) return self.conn.metadata.keyspaces[ksname] def get_keyspaces(self): return self.conn.metadata.keyspaces.values() def get_ring(self, ks): self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True) return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks] def get_table_meta(self, ksname, tablename): if ksname is None: ksname = self.current_keyspace ksmeta = self.get_keyspace_meta(ksname) if tablename not in ksmeta.tables: if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']: self.get_fake_auth_table_meta(ksname, tablename) else: raise ColumnFamilyNotFound("Column family %r not found" % tablename) else: return ksmeta.tables[tablename] def get_fake_auth_table_meta(self, ksname, tablename): # may be using external auth implementation so internal tables # aren't actually defined in schema. In this case, we'll fake # them up if tablename == 'roles': ks_meta = KeyspaceMetadata(ksname, True, None, None) table_meta = TableMetadata(ks_meta, 'roles') table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type) table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType) table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType) elif tablename == 'role_permissions': ks_meta = KeyspaceMetadata(ksname, True, None, None) table_meta = TableMetadata(ks_meta, 'role_permissions') table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type) table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type) table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type) else: raise ColumnFamilyNotFound("Column family %r not found" % tablename) def get_index_meta(self, ksname, idxname): if ksname is None: ksname = self.current_keyspace ksmeta = self.get_keyspace_meta(ksname) if idxname not in ksmeta.indexes: raise IndexNotFound("Index %r not found" % idxname) return ksmeta.indexes[idxname] def get_view_meta(self, ksname, viewname): if ksname is None: ksname = self.current_keyspace ksmeta = self.get_keyspace_meta(ksname) if viewname not in ksmeta.views: raise MaterializedViewNotFound("Materialized view %r not found" % viewname) return ksmeta.views[viewname] def get_object_meta(self, ks, name): if name is None: if ks and ks in self.conn.metadata.keyspaces: return self.conn.metadata.keyspaces[ks] elif self.current_keyspace is None: raise ObjectNotFound("%r not found in keyspaces" % (ks)) else: name = ks ks = self.current_keyspace if ks is None: ks = self.current_keyspace ksmeta = self.get_keyspace_meta(ks) if name in ksmeta.tables: return ksmeta.tables[name] elif name in ksmeta.indexes: return ksmeta.indexes[name] elif name in ksmeta.views: return ksmeta.views[name] raise ObjectNotFound("%r not found in keyspace %r" % (name, ks)) def get_usertypes_meta(self): data = self.session.execute("select * from system.schema_usertypes") if not data: return cql3handling.UserTypesMeta({}) return cql3handling.UserTypesMeta.from_layout(data) def get_trigger_names(self, ksname=None): if ksname is None: ksname = self.current_keyspace return [trigger.name for table in self.get_keyspace_meta(ksname).tables.values() for trigger in table.triggers.values()] def reset_statement(self): self.reset_prompt() self.statement.truncate(0) self.empty_lines = 0 def reset_prompt(self): if self.current_keyspace is None: self.set_prompt(self.default_prompt, True) else: self.set_prompt(self.keyspace_prompt % self.current_keyspace, True) def set_continue_prompt(self): if self.empty_lines >= 3: self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.") self.empty_lines = 0 return if self.current_keyspace is None: self.set_prompt(self.continue_prompt) else: spaces = ' ' * len(str(self.current_keyspace)) self.set_prompt(self.keyspace_continue_prompt % spaces) self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0 @contextmanager def prepare_loop(self): readline = None if self.tty and self.completekey: try: import readline except ImportError: if is_win: print "WARNING: pyreadline dependency missing. Install to enable tab completion." pass else: old_completer = readline.get_completer() readline.set_completer(self.complete) if readline.__doc__ is not None and 'libedit' in readline.__doc__: readline.parse_and_bind("bind -e") readline.parse_and_bind("bind '" + self.completekey + "' rl_complete") readline.parse_and_bind("bind ^R em-inc-search-prev") else: readline.parse_and_bind(self.completekey + ": complete") try: yield finally: if readline is not None: readline.set_completer(old_completer) def get_input_line(self, prompt=''): if self.tty: try: self.lastcmd = raw_input(prompt).decode(self.encoding) except UnicodeDecodeError: self.lastcmd = '' traceback.print_exc() self.check_windows_encoding() line = self.lastcmd + '\n' else: self.lastcmd = self.stdin.readline() line = self.lastcmd if not len(line): raise EOFError self.lineno += 1 return line def use_stdin_reader(self, until='', prompt=''): until += '\n' while True: try: newline = self.get_input_line(prompt=prompt) except EOFError: return if newline == until: return yield newline def cmdloop(self): """ Adapted from cmd.Cmd's version, because there is literally no way with cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in input and an actual EOF. """ with self.prepare_loop(): while not self.stop: try: if self.single_statement: line = self.single_statement self.stop = True else: line = self.get_input_line(self.prompt) self.statement.write(line) if self.onecmd(self.statement.getvalue()): self.reset_statement() except EOFError: self.handle_eof() except CQL_ERRORS, cqlerr: self.printerr(cqlerr.message.decode(encoding='utf-8')) except KeyboardInterrupt: self.reset_statement() print def onecmd(self, statementtext): """ Returns true if the statement is complete and was handled (meaning it can be reset). """ try: statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext) except pylexotron.LexingError, e: if self.show_line_nums: self.printerr('Invalid syntax at char %d' % (e.charnum,)) else: self.printerr('Invalid syntax at line %d, char %d' % (e.linenum, e.charnum)) statementline = statementtext.split('\n')[e.linenum - 1] self.printerr(' %s' % statementline) self.printerr(' %s^' % (' ' * e.charnum)) return True while statements and not statements[-1]: statements = statements[:-1] if not statements: return True if endtoken_escaped or statements[-1][-1][0] != 'endtoken': self.set_continue_prompt() return for st in statements: try: self.handle_statement(st, statementtext) except Exception, e: if self.debug: traceback.print_exc() else: self.printerr(e) return True def handle_eof(self): if self.tty: print statement = self.statement.getvalue() if statement.strip(): if not self.onecmd(statement): self.printerr('Incomplete statement at end of file') self.do_exit() def handle_statement(self, tokens, srcstr): # Concat multi-line statements and insert into history if readline is not None: nl_count = srcstr.count("\n") new_hist = srcstr.replace("\n", " ").rstrip() if nl_count > 1 and self.last_hist != new_hist: readline.add_history(new_hist.encode(self.encoding)) self.last_hist = new_hist cmdword = tokens[0][1] if cmdword == '?': cmdword = 'help' custom_handler = getattr(self, 'do_' + cmdword.lower(), None) if custom_handler: parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr, startsymbol='cqlshCommand') if parsed and not parsed.remainder: # successful complete parse return custom_handler(parsed) else: return self.handle_parse_error(cmdword, tokens, parsed, srcstr) return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr)) def handle_parse_error(self, cmdword, tokens, parsed, srcstr): if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate', 'create', 'drop', 'alter', 'grant', 'revoke', 'batch', 'list'): # hey, maybe they know about some new syntax we don't. type # assumptions won't work, but maybe the query will. return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr)) if parsed: self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0])) else: self.printerr('Improper %s command.' % cmdword) def do_use(self, parsed): ksname = parsed.get_binding('ksname') success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig())) if success: if ksname[0] == '"' and ksname[-1] == '"': self.current_keyspace = self.cql_unprotect_name(ksname) else: self.current_keyspace = ksname.lower() def do_select(self, parsed): tracing_was_enabled = self.tracing_enabled ksname = parsed.get_binding('ksname') stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces') self.tracing_enabled = self.tracing_enabled and not stop_tracing statement = parsed.extract_orig() self.perform_statement(statement) self.tracing_enabled = tracing_was_enabled def perform_statement(self, statement): stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None) success, future = self.perform_simple_statement(stmt) if future: if future.warnings: self.print_warnings(future.warnings) if self.tracing_enabled: try: for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level): print_trace(self, trace) except TraceUnavailable: msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,) self.writeresult(msg, color=RED) for trace_id in future.get_query_trace_ids(): self.show_session(trace_id, partial_session=True) except Exception, err: self.printerr("Unable to fetch query trace: %s" % (str(err),)) return success def parse_for_select_meta(self, query_string): try: parsed = cqlruleset.cql_parse(query_string)[1] except IndexError: return None ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) name = self.cql_unprotect_name(parsed.get_binding('cfname', None)) try: return self.get_table_meta(ks, name) except ColumnFamilyNotFound: try: return self.get_view_meta(ks, name) except MaterializedViewNotFound: raise ObjectNotFound("%r not found in keyspace %r" % (name, ks)) def parse_for_update_meta(self, query_string): try: parsed = cqlruleset.cql_parse(query_string)[1] except IndexError: return None ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) cf = self.cql_unprotect_name(parsed.get_binding('cfname')) return self.get_table_meta(ks, cf) def perform_simple_statement(self, statement): if not statement: return False, None future = self.session.execute_async(statement, trace=self.tracing_enabled) result = None try: result = future.result() except CQL_ERRORS, err: self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8')) except Exception: import traceback self.printerr(traceback.format_exc()) # Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689) if not future.is_schema_agreed: try: self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch except Exception: self.printerr("Warning: schema version mismatch detected; check the schema versions of your " "nodes in system.local and system.peers.") self.conn.refresh_schema_metadata(-1) if result is None: return False, None if statement.query_string[:6].lower() == 'select': self.print_result(result, self.parse_for_select_meta(statement.query_string)) elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"): self.print_result(result, self.get_table_meta('system_auth', 'roles')) elif statement.query_string.lower().startswith("list"): self.print_result(result, self.get_table_meta('system_auth', 'role_permissions')) elif result: # CAS INSERT/UPDATE self.writeresult("") self.print_static_result(result.column_names, list(result), self.parse_for_update_meta(statement.query_string)) self.flush_output() return True, future def print_result(self, result, table_meta): self.decoding_errors = [] self.writeresult("") if result.has_more_pages and self.tty: num_rows = 0 while True: page = result.current_rows if page: num_rows += len(page) self.print_static_result(result.column_names, page, table_meta) if result.has_more_pages: raw_input("---MORE---") result.fetch_next_page() else: break else: rows = list(result) num_rows = len(rows) self.print_static_result(result.column_names, rows, table_meta) self.writeresult("(%d rows)" % num_rows) if self.decoding_errors: for err in self.decoding_errors[:2]: self.writeresult(err.message(), color=RED) if len(self.decoding_errors) > 2: self.writeresult('%d more decoding errors suppressed.' % (len(self.decoding_errors) - 2), color=RED) def print_static_result(self, column_names, rows, table_meta): if not column_names and not table_meta: return column_names = column_names or table_meta.columns.keys() formatted_names = [self.myformat_colname(name, table_meta) for name in column_names] if not rows: # print header only self.print_formatted_result(formatted_names, None) return formatted_values = [map(self.myformat_value, row.values()) for row in rows] if self.expand_enabled: self.print_formatted_result_vertically(formatted_names, formatted_values) else: self.print_formatted_result(formatted_names, formatted_values) def print_formatted_result(self, formatted_names, formatted_values): # determine column widths widths = [n.displaywidth for n in formatted_names] if formatted_values is not None: for fmtrow in formatted_values: for num, col in enumerate(fmtrow): widths[num] = max(widths[num], col.displaywidth) # print header header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths)) self.writeresult(' ' + header.rstrip()) self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths)) # stop if there are no rows if formatted_values is None: self.writeresult("") return # print row data for row in formatted_values: line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths)) self.writeresult(' ' + line) self.writeresult("") def print_formatted_result_vertically(self, formatted_names, formatted_values): max_col_width = max([n.displaywidth for n in formatted_names]) max_val_width = max([n.displaywidth for row in formatted_values for n in row]) # for each row returned, list all the column-value pairs for row_id, row in enumerate(formatted_values): self.writeresult("@ Row %d" % (row_id + 1)) self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width])) for field_id, field in enumerate(row): column = formatted_names[field_id].ljust(max_col_width, color=self.color) value = field.ljust(field.displaywidth, color=self.color) self.writeresult(' ' + " | ".join([column, value])) self.writeresult('') def print_warnings(self, warnings): if warnings is None or len(warnings) == 0: return self.writeresult('') self.writeresult('Warnings :') for warning in warnings: self.writeresult(warning) self.writeresult('') def emptyline(self): pass def parseline(self, line): # this shouldn't be needed raise NotImplementedError def complete(self, text, state): if readline is None: return if state == 0: try: self.completion_matches = self.find_completions(text) except Exception: if debug_completion: import traceback traceback.print_exc() else: raise try: return self.completion_matches[state] except IndexError: return None def find_completions(self, text): curline = readline.get_line_buffer() prevlines = self.statement.getvalue() wholestmt = prevlines + curline begidx = readline.get_begidx() + len(prevlines) stuff_to_complete = wholestmt[:begidx] return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self, debug=debug_completion, startsymbol='cqlshCommand') def set_prompt(self, prompt, prepend_user=False): if prepend_user and self.username: self.prompt = "%s@%s" % (self.username, prompt) return self.prompt = prompt def cql_unprotect_name(self, namestr): if namestr is None: return return cqlruleset.dequote_name(namestr) def cql_unprotect_value(self, valstr): if valstr is not None: return cqlruleset.dequote_value(valstr) def print_recreate_keyspace(self, ksdef, out): out.write(ksdef.export_as_string()) out.write("\n") def print_recreate_columnfamily(self, ksname, cfname, out): """ Output CQL commands which should be pasteable back into a CQL session to recreate the given table. Writes output to the given out stream. """ out.write(self.get_table_meta(ksname, cfname).export_as_string()) out.write("\n") def print_recreate_index(self, ksname, idxname, out): """ Output CQL commands which should be pasteable back into a CQL session to recreate the given index. Writes output to the given out stream. """ out.write(self.get_index_meta(ksname, idxname).export_as_string()) out.write("\n") def print_recreate_materialized_view(self, ksname, viewname, out): """ Output CQL commands which should be pasteable back into a CQL session to recreate the given materialized view. Writes output to the given out stream. """ out.write(self.get_view_meta(ksname, viewname).export_as_string()) out.write("\n") def print_recreate_object(self, ks, name, out): """ Output CQL commands which should be pasteable back into a CQL session to recreate the given object (ks, table or index). Writes output to the given out stream. """ out.write(self.get_object_meta(ks, name).export_as_string()) out.write("\n") def describe_keyspaces(self): print cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names())) print def describe_keyspace(self, ksname): print self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout) print def describe_columnfamily(self, ksname, cfname): if ksname is None: ksname = self.current_keyspace if ksname is None: raise NoKeyspaceError("No keyspace specified and no current keyspace") print self.print_recreate_columnfamily(ksname, cfname, sys.stdout) print def describe_index(self, ksname, idxname): print self.print_recreate_index(ksname, idxname, sys.stdout) print def describe_materialized_view(self, ksname, viewname): if ksname is None: ksname = self.current_keyspace if ksname is None: raise NoKeyspaceError("No keyspace specified and no current keyspace") print self.print_recreate_materialized_view(ksname, viewname, sys.stdout) print def describe_object(self, ks, name): print self.print_recreate_object(ks, name, sys.stdout) print def describe_columnfamilies(self, ksname): print if ksname is None: for k in self.get_keyspaces(): name = protect_name(k.name) print 'Keyspace %s' % (name,) print '---------%s' % ('-' * len(name)) cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name))) print else: cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname))) print def describe_functions(self, ksname): print if ksname is None: for ksmeta in self.get_keyspaces(): name = protect_name(ksmeta.name) print 'Keyspace %s' % (name,) print '---------%s' % ('-' * len(name)) cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys())) print else: ksmeta = self.get_keyspace_meta(ksname) cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys())) print def describe_function(self, ksname, functionname): if ksname is None: ksname = self.current_keyspace if ksname is None: raise NoKeyspaceError("No keyspace specified and no current keyspace") print ksmeta = self.get_keyspace_meta(ksname) functions = filter(lambda f: f.name == functionname, ksmeta.functions.values()) if len(functions) == 0: raise FunctionNotFound("User defined function %r not found" % functionname) print "\n\n".join(func.export_as_string() for func in functions) print def describe_aggregates(self, ksname): print if ksname is None: for ksmeta in self.get_keyspaces(): name = protect_name(ksmeta.name) print 'Keyspace %s' % (name,) print '---------%s' % ('-' * len(name)) cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys())) print else: ksmeta = self.get_keyspace_meta(ksname) cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys())) print def describe_aggregate(self, ksname, aggregatename): if ksname is None: ksname = self.current_keyspace if ksname is None: raise NoKeyspaceError("No keyspace specified and no current keyspace") print ksmeta = self.get_keyspace_meta(ksname) aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values()) if len(aggregates) == 0: raise FunctionNotFound("User defined aggregate %r not found" % aggregatename) print "\n\n".join(aggr.export_as_string() for aggr in aggregates) print def describe_usertypes(self, ksname): print if ksname is None: for ksmeta in self.get_keyspaces(): name = protect_name(ksmeta.name) print 'Keyspace %s' % (name,) print '---------%s' % ('-' * len(name)) cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys())) print else: ksmeta = self.get_keyspace_meta(ksname) cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys())) print def describe_usertype(self, ksname, typename): if ksname is None: ksname = self.current_keyspace if ksname is None: raise NoKeyspaceError("No keyspace specified and no current keyspace") print ksmeta = self.get_keyspace_meta(ksname) try: usertype = ksmeta.user_types[typename] except KeyError: raise UserTypeNotFound("User type %r not found" % typename) print usertype.export_as_string() print def describe_cluster(self): print '\nCluster: %s' % self.get_cluster_name() p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.') print 'Partitioner: %s\n' % p # TODO: snitch? # snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.') # print 'Snitch: %s\n' % snitch if self.current_keyspace is not None and self.current_keyspace != 'system': print "Range ownership:" ring = self.get_ring(self.current_keyspace) for entry in ring.items(): print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]])) print def describe_schema(self, include_system=False): print for k in self.get_keyspaces(): if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES: self.print_recreate_keyspace(k, sys.stdout) print def do_describe(self, parsed): """ DESCRIBE [cqlsh only] (DESC may be used as a shorthand.) Outputs information about the connected Cassandra cluster, or about the data objects stored in the cluster. Use in one of the following ways: DESCRIBE KEYSPACES Output the names of all keyspaces. DESCRIBE KEYSPACE [<keyspacename>] Output CQL commands that could be used to recreate the given keyspace, and the objects in it (such as tables, types, functions, etc.). In some cases, as the CQL interface matures, there will be some metadata about a keyspace that is not representable with CQL. That metadata will not be shown. The '<keyspacename>' argument may be omitted, in which case the current keyspace will be described. DESCRIBE TABLES Output the names of all tables in the current keyspace, or in all keyspaces if there is no current keyspace. DESCRIBE TABLE [<keyspace>.]<tablename> Output CQL commands that could be used to recreate the given table. In some cases, as above, there may be table metadata which is not representable and which will not be shown. DESCRIBE INDEX <indexname> Output the CQL command that could be used to recreate the given index. In some cases, there may be index metadata which is not representable and which will not be shown. DESCRIBE MATERIALIZED VIEW <viewname> Output the CQL command that could be used to recreate the given materialized view. In some cases, there may be materialized view metadata which is not representable and which will not be shown. DESCRIBE CLUSTER Output information about the connected Cassandra cluster, such as the cluster name, and the partitioner and snitch in use. When you are connected to a non-system keyspace, also shows endpoint-range ownership information for the Cassandra ring. DESCRIBE [FULL] SCHEMA Output CQL commands that could be used to recreate the entire (non-system) schema. Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace k. Use DESCRIBE FULL SCHEMA to include the system keyspaces. DESCRIBE TYPES Output the names of all user-defined-types in the current keyspace, or in all keyspaces if there is no current keyspace. DESCRIBE TYPE [<keyspace>.]<type> Output the CQL command that could be used to recreate the given user-defined-type. DESCRIBE FUNCTIONS Output the names of all user-defined-functions in the current keyspace, or in all keyspaces if there is no current keyspace. DESCRIBE FUNCTION [<keyspace>.]<function> Output the CQL command that could be used to recreate the given user-defined-function. DESCRIBE AGGREGATES Output the names of all user-defined-aggregates in the current keyspace, or in all keyspaces if there is no current keyspace. DESCRIBE AGGREGATE [<keyspace>.]<aggregate> Output the CQL command that could be used to recreate the given user-defined-aggregate. DESCRIBE <objname> Output CQL commands that could be used to recreate the entire object schema, where object can be either a keyspace or a table or an index or a materialized view (in this order). """ what = parsed.matched[1][1].lower() if what == 'functions': self.describe_functions(self.current_keyspace) elif what == 'function': ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None)) functionname = self.cql_unprotect_name(parsed.get_binding('udfname')) self.describe_function(ksname, functionname) elif what == 'aggregates': self.describe_aggregates(self.current_keyspace) elif what == 'aggregate': ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None)) aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname')) self.describe_aggregate(ksname, aggregatename) elif what == 'keyspaces': self.describe_keyspaces() elif what == 'keyspace': ksname = self.cql_unprotect_name(parsed.get_binding('ksname', '')) if not ksname: ksname = self.current_keyspace if ksname is None: self.printerr('Not in any keyspace.') return self.describe_keyspace(ksname) elif what in ('columnfamily', 'table'): ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) cf = self.cql_unprotect_name(parsed.get_binding('cfname')) self.describe_columnfamily(ks, cf) elif what == 'index': ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) idx = self.cql_unprotect_name(parsed.get_binding('idxname', None)) self.describe_index(ks, idx) elif what == 'materialized' and parsed.matched[2][1].lower() == 'view': ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) mv = self.cql_unprotect_name(parsed.get_binding('mvname')) self.describe_materialized_view(ks, mv) elif what in ('columnfamilies', 'tables'): self.describe_columnfamilies(self.current_keyspace) elif what == 'types': self.describe_usertypes(self.current_keyspace) elif what == 'type': ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) ut = self.cql_unprotect_name(parsed.get_binding('utname')) self.describe_usertype(ks, ut) elif what == 'cluster': self.describe_cluster() elif what == 'schema': self.describe_schema(False) elif what == 'full' and parsed.matched[2][1].lower() == 'schema': self.describe_schema(True) elif what: ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) name = self.cql_unprotect_name(parsed.get_binding('cfname')) if not name: name = self.cql_unprotect_name(parsed.get_binding('idxname', None)) if not name: name = self.cql_unprotect_name(parsed.get_binding('mvname', None)) self.describe_object(ks, name) do_desc = do_describe def do_copy(self, parsed): r""" COPY [cqlsh only] COPY x FROM: Imports CSV data into a Cassandra table COPY x TO: Exports data from a Cassandra table in CSV format. COPY <table_name> [ ( column [, ...] ) ] FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN ) [ WITH <option>='value' [AND ...] ]; File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv. COPY <table_name> [ ( column [, ...] ) ] TO ( '<filename>' | STDOUT ) [ WITH <option>='value' [AND ...] ]; Available common COPY options and defaults: DELIMITER=',' - character that appears between records QUOTE='"' - quoting character to be used to quote fields ESCAPE='\' - character to appear before the QUOTE char when quoted HEADER=false - whether to ignore the first line NULL='' - string that represents a null value DATETIMEFORMAT= - timestamp strftime format '%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc MAXATTEMPTS=5 - the maximum number of attempts per batch or range REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds DECIMALSEP='.' - the separator for decimal values THOUSANDSSEP='' - the separator for thousands digit groups BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false, for example yes,no or 1,0 NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one capped at 16 CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser documentation) where you can specify WITH options under the following optional sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table], [copy-from:ks.table], where <ks> is your keyspace name and <table> is your table name. Options are read from these sections, in the order specified above, and command line options always override options in configuration files. Depending on the COPY direction, only the relevant copy-from or copy-to sections are used. If no configfile is specified then .cqlshrc is searched instead. RATEFILE='' - an optional file where to print the output statistics Available COPY FROM options and defaults: CHUNKSIZE=5000 - the size of chunks passed to worker processes INGESTRATE=100000 - an approximate ingest rate in rows per second MINBATCHSIZE=10 - the minimum size of an import batch MAXBATCHSIZE=20 - the maximum size of an import batch MAXROWS=-1 - the maximum number of rows, -1 means no maximum SKIPROWS=0 - the number of rows to skip SKIPCOLS='' - a comma separated list of column names to skip MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum ERRFILE='' - a file where to store all rows that could not be imported, by default this is import_ks_table.err where <ks> is your keyspace and <table> is your table name. PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to False if you don't mind shifting data parsing to the cluster. The cluster will also have to compile every batch statement. For large and oversized clusters this will result in a faster import but for smaller clusters it may generate timeouts. Available COPY TO options and defaults: ENCODING='utf8' - encoding for CSV output PAGESIZE='1000' - the page size for fetching results PAGETIMEOUT=10 - the page timeout in seconds for fetching results BEGINTOKEN='' - the minimum token string to consider when exporting data ENDTOKEN='' - the maximum token string to consider when exporting data MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines, beyond this maximum the output file will be split into segments, -1 means unlimited. When entering CSV data on STDIN, you can use the sequence "\." on a line by itself to end the data input. """ ks = self.cql_unprotect_name(parsed.get_binding('ksname', None)) if ks is None: ks = self.current_keyspace if ks is None: raise NoKeyspaceError("Not in any keyspace.") table = self.cql_unprotect_name(parsed.get_binding('cfname')) columns = parsed.get_binding('colnames', None) if columns is not None: columns = map(self.cql_unprotect_name, columns) else: # default to all known columns columns = self.get_column_names(ks, table) fname = parsed.get_binding('fname', None) if fname is not None: fname = self.cql_unprotect_value(fname) copyoptnames = map(str.lower, parsed.get_binding('optnames', ())) copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ())) opts = dict(zip(copyoptnames, copyoptvals)) direction = parsed.get_binding('dir').upper() if direction == 'FROM': task = ImportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE) elif direction == 'TO': task = ExportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE) else: raise SyntaxError("Unknown direction %s" % direction) task.run() def do_show(self, parsed): """ SHOW [cqlsh only] Displays information about the current cqlsh session. Can be called in the following ways: SHOW VERSION Shows the version and build of the connected Cassandra instance, as well as the versions of the CQL spec and the Thrift protocol that the connected Cassandra instance understands. SHOW HOST Shows where cqlsh is currently connected. SHOW SESSION <sessionid> Pretty-prints the requested tracing session. """ showwhat = parsed.get_binding('what').lower() if showwhat == 'version': self.get_connection_versions() self.show_version() elif showwhat == 'host': self.show_host() elif showwhat.startswith('session'): session_id = parsed.get_binding('sessionid').lower() self.show_session(UUID(session_id)) else: self.printerr('Wait, how do I show %r?' % (showwhat,)) def do_source(self, parsed): """ SOURCE [cqlsh only] Executes a file containing CQL statements. Gives the output for each statement in turn, if any, or any errors that occur along the way. Errors do NOT abort execution of the CQL source file. Usage: SOURCE '<file>'; That is, the path to the file to be executed must be given inside a string literal. The path is interpreted relative to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME. See also the --file option to cqlsh. """ fname = parsed.get_binding('fname') fname = os.path.expanduser(self.cql_unprotect_value(fname)) try: encoding, bom_size = get_file_encoding_bomsize(fname) f = codecs.open(fname, 'r', encoding) f.seek(bom_size) except IOError, e: self.printerr('Could not open %r: %s' % (fname, e)) return username = self.auth_provider.username if self.auth_provider else None password = self.auth_provider.password if self.auth_provider else None subshell = Shell(self.hostname, self.port, color=self.color, username=username, password=password, encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn, cqlver=self.cql_version, keyspace=self.current_keyspace, tracing_enabled=self.tracing_enabled, display_nanotime_format=self.display_nanotime_format, display_timestamp_format=self.display_timestamp_format, display_date_format=self.display_date_format, display_float_precision=self.display_float_precision, display_timezone=self.display_timezone, max_trace_wait=self.max_trace_wait, ssl=self.ssl, request_timeout=self.session.default_timeout, connect_timeout=self.conn.connect_timeout) subshell.cmdloop() f.close() def do_capture(self, parsed): """ CAPTURE [cqlsh only] Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it is captured. Usage: CAPTURE '<file>'; CAPTURE OFF; CAPTURE; That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME. Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh session. To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF. To inspect the current capture configuration, use CAPTURE with no arguments. """ fname = parsed.get_binding('fname') if fname is None: if self.shunted_query_out is not None: print "Currently capturing query output to %r." % (self.query_out.name,) else: print "Currently not capturing query output." return if fname.upper() == 'OFF': if self.shunted_query_out is None: self.printerr('Not currently capturing output.') return self.query_out.close() self.query_out = self.shunted_query_out self.color = self.shunted_color self.shunted_query_out = None del self.shunted_color return if self.shunted_query_out is not None: self.printerr('Already capturing output to %s. Use CAPTURE OFF' ' to disable.' % (self.query_out.name,)) return fname = os.path.expanduser(self.cql_unprotect_value(fname)) try: f = open(fname, 'a') except IOError, e: self.printerr('Could not open %r for append: %s' % (fname, e)) return self.shunted_query_out = self.query_out self.shunted_color = self.color self.query_out = f self.color = False print 'Now capturing query output to %r.' % (fname,) def do_tracing(self, parsed): """ TRACING [cqlsh] Enables or disables request tracing. TRACING ON Enables tracing for all further requests. TRACING OFF Disables tracing. TRACING TRACING with no arguments shows the current tracing status. """ self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr) def do_expand(self, parsed): """ EXPAND [cqlsh] Enables or disables expanded (vertical) output. EXPAND ON Enables expanded (vertical) output. EXPAND OFF Disables expanded (vertical) output. EXPAND EXPAND with no arguments shows the current value of expand setting. """ self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr) def do_consistency(self, parsed): """ CONSISTENCY [cqlsh only] Overrides default consistency level (default level is ONE). CONSISTENCY <level> Sets consistency level for future requests. Valid consistency levels: ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL. SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates. CONSISTENCY CONSISTENCY with no arguments shows the current consistency level. """ level = parsed.get_binding('level') if level is None: print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level]) return self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()] print 'Consistency level set to %s.' % (level.upper(),) def do_serial(self, parsed): """ SERIAL CONSISTENCY [cqlsh only] Overrides serial consistency level (default level is SERIAL). SERIAL CONSISTENCY <level> Sets consistency level for future conditional updates. Valid consistency levels: SERIAL, LOCAL_SERIAL. SERIAL CONSISTENCY SERIAL CONSISTENCY with no arguments shows the current consistency level. """ level = parsed.get_binding('level') if level is None: print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level]) return self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()] print 'Serial consistency level set to %s.' % (level.upper(),) def do_login(self, parsed): """ LOGIN [cqlsh only] Changes login information without requiring restart. LOGIN <username> (<password>) Login using the specified username. If password is specified, it will be used otherwise, you will be prompted to enter. """ username = parsed.get_binding('username') password = parsed.get_binding('password') if password is None: password = getpass.getpass() else: password = password[1:-1] auth_provider = PlainTextAuthProvider(username=username, password=password) conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version, protocol_version=self.conn.protocol_version, auth_provider=auth_provider, ssl_options=self.conn.ssl_options, load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]), control_connection_timeout=self.conn.connect_timeout, connect_timeout=self.conn.connect_timeout) if self.current_keyspace: session = conn.connect(self.current_keyspace) else: session = conn.connect() # Update after we've connected in case we fail to authenticate self.conn = conn self.auth_provider = auth_provider self.username = username self.session = session def do_exit(self, parsed=None): """ EXIT/QUIT [cqlsh only] Exits cqlsh. """ self.stop = True if self.owns_connection: self.conn.shutdown() do_quit = do_exit def do_clear(self, parsed): """ CLEAR/CLS [cqlsh only] Clears the console. """ import subprocess subprocess.call(['clear', 'cls'][is_win], shell=True) do_cls = do_clear def do_debug(self, parsed): import pdb pdb.set_trace() def get_help_topics(self): topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__] for hide_from_help in ('quit',): topics.remove(hide_from_help) return topics def columnize(self, slist, *a, **kw): return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw) def do_help(self, parsed): """ HELP [cqlsh only] Gives information about cqlsh commands. To see available topics, enter "HELP" without any arguments. To see help on a topic, use "HELP <topic>". """ topics = parsed.get_binding('topic', ()) if not topics: shell_topics = [t.upper() for t in self.get_help_topics()] self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80) cql_topics = [t.upper() for t in cqldocs.get_help_topics()] self.print_topics("CQL help topics:", cql_topics, 15, 80) return for t in topics: if t.lower() in self.get_help_topics(): doc = getattr(self, 'do_' + t.lower()).__doc__ self.stdout.write(doc + "\n") elif t.lower() in cqldocs.get_help_topics(): urlpart = cqldocs.get_help_topic(t) if urlpart is not None: url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart) if len(webbrowser._tryorder) == 0: self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url)) elif self.browser is not None: webbrowser.get(self.browser).open_new_tab(url) else: webbrowser.open_new_tab(url) else: self.printerr("*** No help on %s" % (t,)) def do_unicode(self, parsed): """ Textual input/output When control characters, or other characters which can't be encoded in your current locale, are found in values of 'text' or 'ascii' types, it will be shown as a backslash escape. If color is enabled, any such backslash escapes will be shown in a different color from the surrounding text. Unicode code points in your data will be output intact, if the encoding for your locale is capable of decoding them. If you prefer that non-ascii characters be shown with Python-style "\\uABCD" escape sequences, invoke cqlsh with an ASCII locale (for example, by setting the $LANG environment variable to "C"). """ def do_paging(self, parsed): """ PAGING [cqlsh] Enables or disables query paging. PAGING ON Enables query paging for all further queries. PAGING OFF Disables paging. PAGING PAGING with no arguments shows the current query paging status. """ (self.use_paging, requested_page_size) = SwitchCommandWithValue( "PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr) if self.use_paging and requested_page_size is not None: self.page_size = requested_page_size if self.use_paging: print("Page size: {}".format(self.page_size)) else: self.page_size = self.default_page_size def applycolor(self, text, color=None): if not color or not self.color: return text return color + text + ANSI_RESET def writeresult(self, text, color=None, newline=True, out=None): if out is None: out = self.query_out # convert Exceptions, etc to text if not isinstance(text, (unicode, str)): text = unicode(text) if isinstance(text, unicode): text = text.encode(self.encoding) to_write = self.applycolor(text, color) + ('\n' if newline else '') out.write(to_write) def flush_output(self): self.query_out.flush() def printerr(self, text, color=RED, newline=True, shownum=None): self.statement_error = True if shownum is None: shownum = self.show_line_nums if shownum: text = '%s:%d:%s' % (self.stdin.name, self.lineno, text) self.writeresult(text, color, newline=newline, out=sys.stderr) class SwitchCommand(object): command = None description = None def __init__(self, command, desc): self.command = command self.description = desc def execute(self, state, parsed, printerr): switch = parsed.get_binding('switch') if switch is None: if state: print "%s is currently enabled. Use %s OFF to disable" \ % (self.description, self.command) else: print "%s is currently disabled. Use %s ON to enable." \ % (self.description, self.command) return state if switch.upper() == 'ON': if state: printerr('%s is already enabled. Use %s OFF to disable.' % (self.description, self.command)) return state print 'Now %s is enabled' % (self.description,) return True if switch.upper() == 'OFF': if not state: printerr('%s is not enabled.' % (self.description,)) return state print 'Disabled %s.' % (self.description,) return False class SwitchCommandWithValue(SwitchCommand): """The same as SwitchCommand except it also accepts a value in place of ON. This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE) eg: PAGING 50 returns (True, 50) PAGING OFF returns (False, None) PAGING ON returns (True, None) The value_type must match for the PASSED_VALUE, otherwise it will return None. """ def __init__(self, command, desc, value_type=int): SwitchCommand.__init__(self, command, desc) self.value_type = value_type def execute(self, state, parsed, printerr): binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr) switch = parsed.get_binding('switch') try: value = self.value_type(switch) binary_switch_value = True except (ValueError, TypeError): value = None return (binary_switch_value, value) def option_with_default(cparser_getter, section, option, default=None): try: return cparser_getter(section, option) except ConfigParser.Error: return default def raw_option_with_default(configs, section, option, default=None): """ Same (almost) as option_with_default() but won't do any string interpolation. Useful for config values that include '%' symbol, e.g. time format string. """ try: return configs.get(section, option, raw=True) except ConfigParser.Error: return default def should_use_color(): if not sys.stdout.isatty(): return False if os.environ.get('TERM', '') in ('dumb', ''): return False try: import subprocess p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE) stdout, _ = p.communicate() if int(stdout.strip()) < 8: return False except (OSError, ImportError, ValueError): # oh well, we tried. at least we know there's a $TERM and it's # not "dumb". pass return True def read_options(cmdlineargs, environment): configs = ConfigParser.SafeConfigParser() configs.read(CONFIG_FILE) rawconfigs = ConfigParser.RawConfigParser() rawconfigs.read(CONFIG_FILE) optvalues = optparse.Values() optvalues.username = option_with_default(configs.get, 'authentication', 'username') optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password') optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace') optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None) optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey', DEFAULT_COMPLETEKEY) optvalues.color = option_with_default(configs.getboolean, 'ui', 'color') optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format', DEFAULT_TIMESTAMP_FORMAT) optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format', DEFAULT_NANOTIME_FORMAT) optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format', DEFAULT_DATE_FORMAT) optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision', DEFAULT_FLOAT_PRECISION) optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit()) optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait', DEFAULT_MAX_TRACE_WAIT) optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None) optvalues.debug = False optvalues.file = None optvalues.ssl = False optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8) optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty()) optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', DEFAULT_CQLVER) optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS) optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS) optvalues.execute = None (options, arguments) = parser.parse_args(cmdlineargs, values=optvalues) hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST) port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT) try: options.connect_timeout = int(options.connect_timeout) except ValueError: parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,)) options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS try: options.request_timeout = int(options.request_timeout) except ValueError: parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,)) options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS hostname = environment.get('CQLSH_HOST', hostname) port = environment.get('CQLSH_PORT', port) if len(arguments) > 0: hostname = arguments[0] if len(arguments) > 1: port = arguments[1] if options.file or options.execute: options.tty = False if options.execute and not options.execute.endswith(';'): options.execute += ';' if optvalues.color in (True, False): options.color = optvalues.color else: if options.file is not None: options.color = False else: options.color = should_use_color() options.cqlversion, cqlvertup = full_cql_version(options.cqlversion) if cqlvertup[0] < 3: parser.error('%r is not a supported CQL version.' % options.cqlversion) else: options.cqlmodule = cql3handling try: port = int(port) except ValueError: parser.error('%r is not a valid port number.' % port) return options, hostname, port def setup_cqlruleset(cqlmodule): global cqlruleset cqlruleset = cqlmodule.CqlRuleSet cqlruleset.append_rules(cqlsh_extra_syntax_rules) for rulename, termname, func in cqlsh_syntax_completers: cqlruleset.completer_for(rulename, termname)(func) cqlruleset.commands_end_with_newline.update(my_commands_ending_with_newline) def setup_cqldocs(cqlmodule): global cqldocs cqldocs = cqlmodule.cqldocs def init_history(): if readline is not None: try: readline.read_history_file(HISTORY) except IOError: pass delims = readline.get_completer_delims() delims.replace("'", "") delims += '.' readline.set_completer_delims(delims) def save_history(): if readline is not None: try: readline.write_history_file(HISTORY) except IOError: pass def main(options, hostname, port): setup_cqlruleset(options.cqlmodule) setup_cqldocs(options.cqlmodule) init_history() csv.field_size_limit(options.field_size_limit) if options.file is None: stdin = None else: try: encoding, bom_size = get_file_encoding_bomsize(options.file) stdin = codecs.open(options.file, 'r', encoding) stdin.seek(bom_size) except IOError, e: sys.exit("Can't open %r: %s" % (options.file, e)) if options.debug: sys.stderr.write("Using CQL driver: %s\n" % (cassandra,)) sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,)) sys.stderr.write("Using '%s' encoding\n" % (options.encoding,)) # create timezone based on settings, environment or auto-detection timezone = None if options.timezone or 'TZ' in os.environ: try: import pytz if options.timezone: try: timezone = pytz.timezone(options.timezone) except: sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone)) if 'TZ' in os.environ: try: timezone = pytz.timezone(os.environ['TZ']) except: sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ'])) except ImportError: sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n") # try auto-detect timezone if tzlocal is installed if not timezone: try: from tzlocal import get_localzone timezone = get_localzone() except ImportError: # we silently ignore and fallback to UTC unless a custom timestamp format (which likely # does contain a TZ part) was specified if options.time_format != DEFAULT_TIMESTAMP_FORMAT: sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" + "Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n") try: shell = Shell(hostname, port, color=options.color, username=options.username, password=options.password, stdin=stdin, tty=options.tty, completekey=options.completekey, browser=options.browser, cqlver=options.cqlversion, keyspace=options.keyspace, display_timestamp_format=options.time_format, display_nanotime_format=options.nanotime_format, display_date_format=options.date_format, display_float_precision=options.float_precision, display_timezone=timezone, max_trace_wait=options.max_trace_wait, ssl=options.ssl, single_statement=options.execute, request_timeout=options.request_timeout, connect_timeout=options.connect_timeout, encoding=options.encoding) except KeyboardInterrupt: sys.exit('Connection aborted.') except CQL_ERRORS, e: sys.exit('Connection error: %s' % (e,)) except VersionNotSupported, e: sys.exit('Unsupported CQL version: %s' % (e,)) if options.debug: shell.debug = True shell.cmdloop() save_history() batch_mode = options.file or options.execute if batch_mode and shell.statement_error: sys.exit(2) # always call this regardless of module name: when a sub-process is spawned # on Windows then the module name is not __main__, see CASSANDRA-9304 insert_driver_hooks() if __name__ == '__main__': main(*read_options(sys.argv[1:], os.environ)) # vim: set ft=python et ts=4 sw=4 :
[]
[]
[ "CQLSH_DEBUG_COMPLETION", "XDG_DATA_DIRS", "CQLSH_PROMPT", "TERM", "TZ", "CQLSH_NO_BUNDLED" ]
[]
["CQLSH_DEBUG_COMPLETION", "XDG_DATA_DIRS", "CQLSH_PROMPT", "TERM", "TZ", "CQLSH_NO_BUNDLED"]
python
6
0
profile_test.go
package profile_test import ( "bufio" "bytes" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestProfiles(t *testing.T) { for _, profTest := range profileTests { t.Logf("Run profile test '%s'", profTest.name) stdout, stderr, err := runTest(t, profTest.code) for _, check := range profTest.checks { check(t, stdout, stderr, err) } } checkPprofFiles(t, []string{ "./cpu.pprof", "./mem.pprof", "./mutex.pprof", "./block.pprof", "./trace.pprof", "./thread.pprof", "./goroutine.pprof", }) cleanupPprofFiles(t, []string{ "./cpu.pprof", "./mem.pprof", "./mutex.pprof", "./block.pprof", "./trace.pprof", "./thread.pprof", "./goroutine.pprof", }) } func TestOptions(t *testing.T) { for _, profTest := range optionsTests { t.Logf("Run option test '%s'", profTest.name) stdout, stderr, err := runTest(t, profTest.code) for _, check := range profTest.checks { check(t, stdout, stderr, err) } } checkPprofFiles(t, []string{ "./cpu.pprof", os.Getenv("HOME") + "/cpu.pprof", }) cleanupPprofFiles(t, []string{ "./cpu.pprof", os.Getenv("HOME") + "/cpu.pprof", }) } type profileTest struct { name string code string checks []checkFn } type checkFn func(t *testing.T, stdout, stderr []byte, err error) // Stdout verifies that the given lines match the output from stdout func Stdout(expectedLines ...string) checkFn { return func(t *testing.T, stdout, stderr []byte, err error) { for _, expected := range expectedLines { if !validateOutput(stdout, expected) { t.Errorf("stdout: expected '%s', actual '%s'", expected, stdout) } } } } // NotInStdout verifies that the given lines do not match the output from stdout func NotInStdout(expectedLines ...string) checkFn { return func(t *testing.T, stdout, stderr []byte, err error) { for _, expected := range expectedLines { if validateOutput(stdout, expected) { t.Errorf("stdout: '%s' was not expected, but found in stdout '%s'", expected, stdout) } } } } // NoStdout checks that stdout was blank func NoStdout(t *testing.T, stdout, stderr []byte, err error) { if len(stdout) > 0 { t.Errorf("stdout: expected 0 bytes, actual %d bytes - bytes to string: '%s'", len(stdout), string(stdout)) } } // Stderr verifies that the given lines match the output from stderr func Stderr(expectedLines ...string) checkFn { return func(t *testing.T, stdout, stderr []byte, err error) { for _, expected := range expectedLines { if !validateOutput(stderr, expected) { t.Errorf("stderr: expected '%s', actual '%s'", expected, stderr) } } } } // NoStderr checks that stderr was blank func NoStderr(t *testing.T, stdout, stderr []byte, err error) { if len(stderr) > 0 { t.Errorf("stderr: expected 0 bytes, actual %d bytes - bytes to string: '%s'", len(stderr), string(stderr)) } } // Err checks that there was an error returned func Err(t *testing.T, stdout, stderr []byte, err error) { if err == nil { t.Errorf("expected error") } } // NoErr checks that err was nil func NoErr(t *testing.T, stdout, stderr []byte, err error) { if err != nil { t.Errorf("error: expected nil, actual '%v'", err) } } // validateOutput checks if the expected input line is among data from stdout/stderr func validateOutput(std []byte, expected string) bool { scanner := bufio.NewScanner(bytes.NewReader(std)) for scanner.Scan() { if strings.Contains(strings.ToLower(scanner.Text()), strings.ToLower(expected)) { return true } } return false } /* runTest executes the go program supplied and returns the contents of stdout, stderr and an error which may contain status information about the result of the execution. */ func runTest(t *testing.T, codeToTest string) ([]byte, []byte, error) { tempGopathDir, goPathErr := ioutil.TempDir("", "profile_tests_") checkErr(t, goPathErr) defer os.RemoveAll(tempGopathDir) tempSrcDir := filepath.Join(tempGopathDir, "src") mkdirErr := os.Mkdir(tempSrcDir, 0755) checkErr(t, mkdirErr) tempMainPath := filepath.Join(tempSrcDir, "main.go") mainErr := ioutil.WriteFile(tempMainPath, []byte(codeToTest), 0644) checkErr(t, mainErr) var stdout, stderr bytes.Buffer cmd := exec.Command("go", "run", tempMainPath) cmd.Stdout = &stdout cmd.Stderr = &stderr runErr := cmd.Run() return stdout.Bytes(), stderr.Bytes(), runErr } /* checkErr checks if the error provided as input is different than nil. In case the error is not nil, the test will fail. */ func checkErr(t *testing.T, err error) { if err != nil { t.Fatal(err) } } // checkPprofFile checks if input pprof files exist func checkPprofFiles(t *testing.T, pprofFilesPath []string) { for _, pprof := range pprofFilesPath { info, err := os.Stat(pprof) assert.Nil(t, err) assert.False(t, os.IsNotExist(err)) assert.False(t, info.IsDir()) } } // cleanupPprofFiles deletes all specified pprof files func cleanupPprofFiles(t *testing.T, pprofFilesPath []string) { for _, pprof := range pprofFilesPath { err := os.Remove(pprof) if err != nil { t.Fatal(err) } } }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
bazel/_gevent_test_main.py
# Copyright 2021 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gevent from gevent import monkey monkey.patch_all() threadpool = gevent.hub.get_hub().threadpool # Currently, each channel corresponds to a single native thread in the # gevent threadpool. Thus, when the unit test suite spins up hundreds of # channels concurrently, some will be starved out, causing the test to # increase in duration. We increase the max size here so this does not # happen. threadpool.maxsize = 1024 threadpool.size = 32 import traceback, signal from typing import Sequence import grpc.experimental.gevent grpc.experimental.gevent.init_gevent() import gevent import greenlet import datetime import grpc import unittest import sys import os import pkgutil def trace_callback(event, args): if event in ("switch", "throw"): origin, target = args sys.stderr.write("{} Transfer from {} to {} with {}\n".format(datetime.datetime.now(), origin, target, event)) else: sys.stderr.write("Unknown event {}.\n".format(event)) sys.stderr.flush() if os.getenv("GREENLET_TRACE") is not None: greenlet.settrace(trace_callback) def debug(sig, frame): d={'_frame':frame} d.update(frame.f_globals) d.update(frame.f_locals) sys.stderr.write("Traceback:\n{}".format("\n".join(traceback.format_stack(frame)))) import gevent.util; gevent.util.print_run_info() sys.stderr.flush() signal.signal(signal.SIGTERM, debug) class SingleLoader(object): def __init__(self, pattern: str): loader = unittest.TestLoader() self.suite = unittest.TestSuite() tests = [] for importer, module_name, is_package in pkgutil.walk_packages([os.path.dirname(os.path.relpath(__file__))]): if pattern in module_name: module = importer.find_module(module_name).load_module(module_name) tests.append(loader.loadTestsFromModule(module)) if len(tests) != 1: raise AssertionError("Expected only 1 test module. Found {}".format(tests)) self.suite.addTest(tests[0]) def loadTestsFromNames(self, names: Sequence[str], module: str = None) -> unittest.TestSuite: return self.suite if __name__ == "__main__": if len(sys.argv) != 2: print(f"USAGE: {sys.argv[0]} TARGET_MODULE", file=sys.stderr) target_module = sys.argv[1] loader = SingleLoader(target_module) runner = unittest.TextTestRunner() result = gevent.spawn(runner.run, loader.suite) result.join() if not result.value.wasSuccessful(): sys.exit("Test failure.")
[]
[]
[ "GREENLET_TRACE" ]
[]
["GREENLET_TRACE"]
python
1
0
internal/metrics.go
package internal import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "mqtg-bot/internal/common" "net/http" _ "net/http/pprof" "os" ) const DEFAULT_PORT = "80" type Metrics struct { common.MetricCollectors numOfIncMessagesFromTelegram prometheus.Gauge numOfOutMessagesToTelegram prometheus.Gauge } func InitPrometheusMetrics() Metrics { var m Metrics m.numOfIncMessagesFromTelegram = m.MetricCollectors.InitMetric("Number of incoming messages from Telegram") m.numOfOutMessagesToTelegram = m.MetricCollectors.InitMetric("Number of outgoing messages to Telegram") return m } func (bot *TelegramBot) StartPprofAndMetricsListener() { http.Handle("/metrics", promhttp.Handler()) port := os.Getenv("PORT") if port == "" { port = DEFAULT_PORT } http.ListenAndServe(":"+port, nil) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
operator/controllers/utilities.go
package controllers import ( "bytes" "context" "fmt" "math/rand" "os" "strconv" "strings" "time" mpsv1alpha1 "github.com/playfab/thundernetes/operator/api/v1alpha1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) const ( SidecarContainerName = "thundernetes-sidecar" InitContainerName = "initcontainer" GameServerKind = "GameServer" GameServerBuildKind = "GameServerBuild" DataVolumeName = "data" DataVolumeMountPath = "/data" // MinPort is minimum Port Number MinPort int32 = 10000 // MaxPort is maximum Port Number MaxPort int32 = 50000 RandStringSize = 5 LabelBuildID = "BuildID" LabelBuildName = "BuildName" LabelOwningGameServer = "OwningGameServer" LabelOwningOperator = "OwningOperator" serviceAccountGameServerEditor = "thundernetes-gameserver-editor" GsdkConfigFile = "/data/Config/gsdkConfig.json" LogDirectory = "/data/GameLogs/" CertificatesDirectory = "/data/GameCertificates" GameSharedContentDirectory = "/data/GameSharedContent" SidecarPort int32 = 56001 ) var SidecarImage string var InitContainerImage string func init() { rand.Seed(time.Now().UTC().UnixNano()) //randomize name creation SidecarImage = os.Getenv("THUNDERNETES_SIDECAR_IMAGE") if SidecarImage == "" { panic("THUNDERNETES_SIDECAR_IMAGE cannot be empty") } InitContainerImage = os.Getenv("THUNDERNETES_INIT_CONTAINER_IMAGE") if InitContainerImage == "" { panic("THUNDERNETES_INIT_CONTAINER_IMAGE cannot be empty") } addMetricsToRegistry() } // generateName generates a random string concatenated with prefix and a dash func generateName(prefix string) string { return prefix + "-" + randString(RandStringSize) } // randString creates a random string with lowercase characters func randString(n int) string { letters := []rune("abcdefghijklmnopqrstuvwxyz") b := make([]rune, n) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } // GetPublicIPForNode returns the Public IP of the node // if the Node does not have a Public IP, method returns the internal one func GetPublicIPForNode(ctx context.Context, r client.Reader, nodeName string) (string, error) { log := log.FromContext(ctx) var node corev1.Node if err := r.Get(ctx, client.ObjectKey{Name: nodeName}, &node); err != nil { return "", err } for _, x := range node.Status.Addresses { if x.Type == corev1.NodeExternalIP { return x.Address, nil } } log.Info(fmt.Sprintf("Node with name %s does not have a Public IP, will try to return the internal IP", nodeName)) // externalIP not found, try InternalIP for _, x := range node.Status.Addresses { if x.Type == corev1.NodeInternalIP { return x.Address, nil } } return "", fmt.Errorf("node %s does not have a Public or Internal IP", nodeName) } // NewGameServerForGameServerBuild creates a GameServer for a GameServerBuild func NewGameServerForGameServerBuild(gsb *mpsv1alpha1.GameServerBuild, portRegistry *PortRegistry) (*mpsv1alpha1.GameServer, error) { gs := &mpsv1alpha1.GameServer{ ObjectMeta: metav1.ObjectMeta{ Name: generateName(gsb.Name), Namespace: gsb.Namespace, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(gsb, schema.GroupVersionKind{ Group: mpsv1alpha1.GroupVersion.Group, Version: mpsv1alpha1.GroupVersion.Version, Kind: GameServerBuildKind, }), }, Labels: map[string]string{LabelBuildID: gsb.Spec.BuildID, LabelBuildName: gsb.Name}, }, Spec: mpsv1alpha1.GameServerSpec{ PodSpec: gsb.Spec.PodSpec, BuildID: gsb.Spec.BuildID, TitleID: gsb.Spec.TitleID, PortsToExpose: gsb.Spec.PortsToExpose, BuildMetadata: gsb.Spec.BuildMetadata, }, // we don't create any status since we have the .Status subresource enabled } // assigning host ports for all the containers in the PodSpec for i := 0; i < len(gsb.Spec.PodSpec.Containers); i++ { container := gsb.Spec.PodSpec.Containers[i] for i := 0; i < len(container.Ports); i++ { if sliceContainsPortToExpose(gsb.Spec.PortsToExpose, container.Name, container.Ports[i].Name) { port, err := portRegistry.GetNewPort() if err != nil { return nil, err } container.Ports[i].HostPort = port } } } return gs, nil } // NewPodForGameServer returns a Kubernetes Pod struct for a specified GameServer // Pod has the same name as the GameServer // It also sets a label called "GameServer" with the value of the corresponding GameServer resource func NewPodForGameServer(gs *mpsv1alpha1.GameServer) *corev1.Pod { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: gs.Name, // same Name as the GameServer Namespace: gs.Namespace, Labels: map[string]string{ LabelBuildID: gs.Spec.BuildID, LabelBuildName: gs.Labels[LabelBuildName], LabelOwningGameServer: gs.Name, LabelOwningOperator: "thundernetes", }, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(gs, schema.GroupVersionKind{ Group: mpsv1alpha1.GroupVersion.Group, Version: mpsv1alpha1.GroupVersion.Version, Kind: GameServerKind, }), }, }, Spec: gs.Spec.PodSpec, } // following methods should be called in this exact order modifyRestartPolicy(pod) createDataVolumeOnPod(pod) // attach data volume and env for all containers in the Pod for i := 0; i < len(pod.Spec.Containers); i++ { attachDataVolumeOnContainer(&pod.Spec.Containers[i]) pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, getGameServerEnvVariables(gs)...) } attachSidecar(gs, pod) attachInitContainer(gs, pod) addServiceAccountName(pod) return pod } func modifyRestartPolicy(pod *corev1.Pod) { pod.Spec.RestartPolicy = corev1.RestartPolicyNever } // attachSidecar attaches the sidecar container to the GameServer Pod func attachSidecar(gs *mpsv1alpha1.GameServer, pod *corev1.Pod) { sidecar := corev1.Container{ Name: SidecarContainerName, ImagePullPolicy: corev1.PullIfNotPresent, Image: SidecarImage, Ports: []corev1.ContainerPort{ { Name: "port", ContainerPort: SidecarPort, Protocol: corev1.ProtocolTCP, }, }, Env: getGameServerEnvVariables(gs), VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, MountPath: DataVolumeMountPath, }, }, } pod.Spec.Containers = append(pod.Spec.Containers, sidecar) } // attachInitContainer attaches the init container to the GameServer Pod func attachInitContainer(gs *mpsv1alpha1.GameServer, pod *corev1.Pod) { initcontainer := corev1.Container{ Name: InitContainerName, ImagePullPolicy: corev1.PullIfNotPresent, Image: InitContainerImage, Env: getInitContainerEnvVariables(gs), VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, MountPath: DataVolumeMountPath, }, }, } pod.Spec.InitContainers = append(pod.Spec.InitContainers, initcontainer) } // createDataVolumeOnPod creates a Volume that will be mounted to the GameServer Pod // The init container writes to this volume whereas the GameServer container reads from it (the GSDK methods) func createDataVolumeOnPod(pod *corev1.Pod) { dataDir := corev1.Volume{ Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } pod.Spec.Volumes = append(pod.Spec.Volumes, dataDir) } // attachDataVolumeOnContainer attaches the data volume to the specified container func attachDataVolumeOnContainer(container *corev1.Container) { container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: DataVolumeName, MountPath: DataVolumeMountPath, }) } // addServiceAccountName customizes the ServiceAccountName field of the Pod // We add special RBAC permissions since the sidecar has to modify the GameServer.Status.State field func addServiceAccountName(pod *corev1.Pod) { pod.Spec.ServiceAccountName = serviceAccountGameServerEditor } // getInitContainerEnvVariables returns the environment variables for the init container func getInitContainerEnvVariables(gs *mpsv1alpha1.GameServer) []corev1.EnvVar { envList := []corev1.EnvVar{ { Name: "HEARTBEAT_ENDPOINT", Value: fmt.Sprintf("localhost:%d", SidecarPort), }, { Name: "GSDK_CONFIG_FILE", Value: GsdkConfigFile, }, { Name: "PF_SHARED_CONTENT_FOLDER", Value: GameSharedContentDirectory, }, { Name: "CERTIFICATE_FOLDER", Value: CertificatesDirectory, }, { Name: "PF_SERVER_LOG_DIRECTORY", Value: LogDirectory, }, { Name: "PF_VM_ID", Value: "thundernetes-aks-cluster", }, { Name: "PF_GAMESERVER_NAME", // this becomes SessionHostId in gsdkConfig.json file Value: gs.Name, // GameServer.Name is the same as Pod.Name }, } var b bytes.Buffer // get game ports for _, container := range gs.Spec.PodSpec.Containers { if container.Name == SidecarContainerName { continue } for _, port := range container.Ports { containerPort := strconv.Itoa(int(port.ContainerPort)) hostPort := strconv.Itoa(int(port.HostPort)) if sliceContainsPortToExpose(gs.Spec.PortsToExpose, container.Name, port.Name) { b.WriteString(port.Name + "," + containerPort + "," + hostPort + "?") } } } envList = append(envList, corev1.EnvVar{ Name: "PF_GAMESERVER_PORTS", Value: strings.TrimSuffix(b.String(), "?"), }) var buildMetada string for _, metadataItem := range gs.Spec.BuildMetadata { buildMetada += metadataItem.Key + "," + metadataItem.Value + "?" } envList = append(envList, corev1.EnvVar{ Name: "PF_GAMESERVER_BUILD_METADATA", Value: strings.TrimSuffix(buildMetada, "?"), }) return envList } // ger getGameServerEnvVariables returns the environment variables for the GameServer container func getGameServerEnvVariables(gs *mpsv1alpha1.GameServer) []corev1.EnvVar { envList := []corev1.EnvVar{ { Name: "PF_GAMESERVER_NAME", Value: gs.Name, }, { Name: "GSDK_CONFIG_FILE", Value: GsdkConfigFile, }, { Name: "PF_GAMESERVER_NAMESPACE", Value: gs.Namespace, }, { Name: "PF_BUILD_ID", Value: gs.Spec.BuildID, }, { Name: "PF_TITLE_ID", Value: gs.Spec.TitleID, }, } return envList } // sliceContainsPortToExpose returns true if the specific containerName/tuple value is contained in the slice func sliceContainsPortToExpose(slice []mpsv1alpha1.PortToExpose, containerName, portName string) bool { for _, item := range slice { if item.ContainerName == containerName && item.PortName == portName { return true } } return false } // containsString returns true if the specific string value is contained in the slice func containsString(slice []string, s string) bool { for _, item := range slice { if item == s { return true } } return false } // getContainerHostPortTuples returns a concatenated of hostPort:containerPort tuples func getContainerHostPortTuples(pod *corev1.Pod) string { var ports strings.Builder for _, container := range pod.Spec.Containers { // ignore the sidecar, since we don't want its ports to be visible if container.Name == SidecarContainerName { continue } for _, portInfo := range container.Ports { ports.WriteString(fmt.Sprintf("%d:%d,", portInfo.ContainerPort, portInfo.HostPort)) } } return strings.TrimSuffix(ports.String(), ",") }
[ "\"THUNDERNETES_SIDECAR_IMAGE\"", "\"THUNDERNETES_INIT_CONTAINER_IMAGE\"" ]
[]
[ "THUNDERNETES_SIDECAR_IMAGE", "THUNDERNETES_INIT_CONTAINER_IMAGE" ]
[]
["THUNDERNETES_SIDECAR_IMAGE", "THUNDERNETES_INIT_CONTAINER_IMAGE"]
go
2
0
vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker_test.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.12 package unitchecker_test // This test depends on features such as // go vet's support for vetx files (1.11) and // the (*os.ProcessState).ExitCode method (1.12). import ( "flag" "os" "os/exec" "regexp" "runtime" "strings" "testing" "golang.org/x/tools/go/analysis/passes/findcall" "golang.org/x/tools/go/analysis/passes/printf" "golang.org/x/tools/go/analysis/unitchecker" "golang.org/x/tools/go/packages/packagestest" ) func TestMain(m *testing.M) { if os.Getenv("UNITCHECKER_CHILD") == "1" { // child process main() panic("unreachable") } flag.Parse() os.Exit(m.Run()) } func main() { unitchecker.Main( findcall.Analyzer, printf.Analyzer, ) } // This is a very basic integration test of modular // analysis with facts using unitchecker under "go vet". // It fork/execs the main function above. func TestIntegration(t *testing.T) { packagestest.TestAll(t, testIntegration) } func testIntegration(t *testing.T, exporter packagestest.Exporter) { if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping fork/exec test on this platform") } exported := packagestest.Export(t, exporter, []packagestest.Module{{ Name: "golang.org/fake", Files: map[string]interface{}{ "a/a.go": `package a func _() { MyFunc123() } func MyFunc123() {} `, "b/b.go": `package b import "golang.org/fake/a" func _() { a.MyFunc123() MyFunc123() } func MyFunc123() {} `, }}}) defer exported.Cleanup() const wantA = `# golang.org/fake/a ([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go:4:11: call of MyFunc123\(...\) ` const wantB = `# golang.org/fake/b ([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:6:13: call of MyFunc123\(...\) ([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:7:11: call of MyFunc123\(...\) ` const wantAJSON = `# golang.org/fake/a \{ "golang.org/fake/a": \{ "findcall": \[ \{ "posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go:4:11", "message": "call of MyFunc123\(...\)" \} \] \} \} ` for _, test := range []struct { args string wantOut string wantExit int }{ {args: "golang.org/fake/a", wantOut: wantA, wantExit: 2}, {args: "golang.org/fake/b", wantOut: wantB, wantExit: 2}, {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExit: 2}, {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExit: 0}, {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExit: 2}, } { cmd := exec.Command("go", "vet", "-vettool="+os.Args[0], "-findcall.name=MyFunc123") cmd.Args = append(cmd.Args, strings.Fields(test.args)...) cmd.Env = append(exported.Config.Env, "UNITCHECKER_CHILD=1") cmd.Dir = exported.Config.Dir out, err := cmd.CombinedOutput() exitcode := 0 if exitErr, ok := err.(*exec.ExitError); ok { exitcode = exitErr.ExitCode() } if exitcode != test.wantExit { t.Errorf("%s: got exit code %d, want %d", test.args, exitcode, test.wantExit) } matched, err := regexp.Match(test.wantOut, out) if err != nil { t.Fatal(err) } if !matched { t.Errorf("%s: got <<%s>>, want match of regexp <<%s>>", test.args, out, test.wantOut) } } }
[ "\"UNITCHECKER_CHILD\"" ]
[]
[ "UNITCHECKER_CHILD" ]
[]
["UNITCHECKER_CHILD"]
go
1
0
jfrog-cli/bintray/cli.go
package bintray import ( "github.com/codegangsta/cli" "github.com/jfrog/jfrog-cli-go/jfrog-cli/bintray/commands" accesskeysdoc "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/accesskeys" configdocs "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/config" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/downloadfile" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/downloadver" entitlementsdocs "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/entitlements" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/gpgsignfile" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/gpgsignver" logsdocs "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/logs" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/packagecreate" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/packagedelete" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/packageshow" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/packageupdate" streamdocs "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/stream" uploaddocs "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/upload" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/urlsign" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/versioncreate" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/versiondelete" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/versionpublish" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/versionshow" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/bintray/versionupdate" "github.com/jfrog/jfrog-cli-go/jfrog-cli/docs/common" "github.com/jfrog/jfrog-cli-go/jfrog-cli/utils/cliutils" "github.com/jfrog/jfrog-cli-go/jfrog-cli/utils/config" "github.com/jfrog/jfrog-client-go/bintray" "github.com/jfrog/jfrog-client-go/bintray/auth" "github.com/jfrog/jfrog-client-go/bintray/services" "github.com/jfrog/jfrog-client-go/bintray/services/accesskeys" "github.com/jfrog/jfrog-client-go/bintray/services/entitlements" "github.com/jfrog/jfrog-client-go/bintray/services/packages" "github.com/jfrog/jfrog-client-go/bintray/services/url" "github.com/jfrog/jfrog-client-go/bintray/services/utils" "github.com/jfrog/jfrog-client-go/bintray/services/versions" clientutils "github.com/jfrog/jfrog-client-go/utils" "github.com/jfrog/jfrog-client-go/utils/log" "os" "strconv" "strings" "errors" ) func GetCommands() []cli.Command { return []cli.Command{ { Name: "config", Flags: getConfigFlags(), Aliases: []string{"c"}, Usage: configdocs.Description, HelpName: common.CreateUsage("bt config", configdocs.Description, configdocs.Usage), ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { configure(c) }, }, { Name: "upload", Flags: getUploadFlags(), Aliases: []string{"u"}, Usage: uploaddocs.Description, HelpName: common.CreateUsage("bt upload", uploaddocs.Description, uploaddocs.Usage), UsageText: uploaddocs.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { upload(c) }, }, { Name: "download-file", Flags: getDownloadFileFlags(), Aliases: []string{"dlf"}, Usage: downloadfile.Description, HelpName: common.CreateUsage("bt download-file", downloadfile.Description, downloadfile.Usage), UsageText: downloadfile.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { downloadFile(c) }, }, { Name: "download-ver", Flags: getDownloadVersionFlags(), Aliases: []string{"dlv"}, Usage: downloadver.Description, HelpName: common.CreateUsage("bt download-ver", downloadver.Description, downloadver.Usage), UsageText: downloadver.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { downloadVersion(c) }, }, { Name: "package-show", Flags: getFlags(), Aliases: []string{"ps"}, Usage: packageshow.Description, HelpName: common.CreateUsage("bt package-show", packageshow.Description, packageshow.Usage), UsageText: packageshow.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { showPackage(c) }, }, { Name: "package-create", Flags: getCreateAndUpdatePackageFlags(), Aliases: []string{"pc"}, Usage: packagecreate.Description, HelpName: common.CreateUsage("bt package-create", packagecreate.Description, packagecreate.Usage), UsageText: packagecreate.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { createPackage(c) }, }, { Name: "package-update", Flags: getCreateAndUpdatePackageFlags(), Aliases: []string{"pu"}, Usage: packageupdate.Description, HelpName: common.CreateUsage("bt package-update", packageupdate.Description, packageupdate.Usage), UsageText: packageupdate.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { updatePackage(c) }, }, { Name: "package-delete", Flags: getDeletePackageAndVersionFlags(), Aliases: []string{"pd"}, Usage: packagedelete.Description, HelpName: common.CreateUsage("bt package-delete", packagedelete.Description, packagedelete.Usage), UsageText: packagedelete.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { deletePackage(c) }, }, { Name: "version-show", Flags: getFlags(), Aliases: []string{"vs"}, Usage: versionshow.Description, HelpName: common.CreateUsage("bt version-show", versionshow.Description, versionshow.Usage), UsageText: versionshow.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { showVersion(c) }, }, { Name: "version-create", Flags: getCreateAndUpdateVersionFlags(), Aliases: []string{"vc"}, Usage: versioncreate.Description, HelpName: common.CreateUsage("bt version-create", versioncreate.Description, versioncreate.Usage), UsageText: versioncreate.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { createVersion(c) }, }, { Name: "version-update", Flags: getCreateAndUpdateVersionFlags(), Aliases: []string{"vu"}, Usage: versionupdate.Description, HelpName: common.CreateUsage("bt version-update", versionupdate.Description, versionupdate.Usage), UsageText: versionupdate.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { updateVersion(c) }, }, { Name: "version-delete", Flags: getDeletePackageAndVersionFlags(), Aliases: []string{"vd"}, Usage: versiondelete.Description, HelpName: common.CreateUsage("bt version-delete", versiondelete.Description, versiondelete.Usage), UsageText: versiondelete.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { deleteVersion(c) }, }, { Name: "version-publish", Flags: getFlags(), Aliases: []string{"vp"}, Usage: versionpublish.Description, HelpName: common.CreateUsage("bt version-publish", versionpublish.Description, versionpublish.Usage), UsageText: versionpublish.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { publishVersion(c) }, }, { Name: "entitlements", Flags: getEntitlementsFlags(), Aliases: []string{"ent"}, Usage: entitlementsdocs.Description, HelpName: common.CreateUsage("bt entitlements", entitlementsdocs.Description, entitlementsdocs.Usage), UsageText: entitlementsdocs.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { handleEntitlements(c) }, }, { Name: "access-keys", Flags: getAccessKeysFlags(), Aliases: []string{"acc-keys"}, Usage: accesskeysdoc.Description, HelpName: common.CreateUsage("bt access-keys", accesskeysdoc.Description, accesskeysdoc.Usage), UsageText: accesskeysdoc.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { accessKeys(c) }, }, { Name: "url-sign", Flags: getUrlSigningFlags(), Aliases: []string{"us"}, Usage: urlsign.Description, HelpName: common.CreateUsage("bt url-sign", urlsign.Description, urlsign.Usage), UsageText: urlsign.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { signUrl(c) }, }, { Name: "gpg-sign-file", Flags: getGpgSigningFlags(), Aliases: []string{"gsf"}, Usage: gpgsignfile.Description, HelpName: common.CreateUsage("bt gpg-sign-file", gpgsignfile.Description, gpgsignfile.Usage), UsageText: gpgsignfile.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { gpgSignFile(c) }, }, { Name: "gpg-sign-ver", Flags: getGpgSigningFlags(), Aliases: []string{"gsv"}, Usage: gpgsignver.Description, HelpName: common.CreateUsage("bt gpg-sign-ver", gpgsignver.Description, gpgsignver.Usage), UsageText: gpgsignver.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { gpgSignVersion(c) }, }, { Name: "logs", Flags: getFlags(), Aliases: []string{"l"}, Usage: logsdocs.Description, HelpName: common.CreateUsage("bt logs", logsdocs.Description, logsdocs.Usage), UsageText: logsdocs.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { logs(c) }, }, { Name: "stream", Flags: getStreamFlags(), Aliases: []string{"st"}, Usage: streamdocs.Description, HelpName: common.CreateUsage("bt stream", streamdocs.Description, streamdocs.Usage), UsageText: streamdocs.Arguments, ArgsUsage: common.CreateEnvVars(), Action: func(c *cli.Context) { stream(c) }, }, } } func getFlags() []cli.Flag { return []cli.Flag{ cli.StringFlag{ Name: "user", Value: "", Usage: "[Optional] Bintray username. If not set, the subject sent as part of the command argument is used for authentication.", }, cli.StringFlag{ Name: "key", Value: "", Usage: "[Mandatory] Bintray API key", }, } } func getStreamFlags() []cli.Flag { return append(getFlags(), []cli.Flag{ cli.StringFlag{ Name: "include", Value: "", Usage: "[Optional] List of events type in the form of \"value1;value2;...\" leave empty to include all.", }, }...) } func getConfigFlags() []cli.Flag { flags := []cli.Flag{ cli.BoolTFlag{ Name: "interactive", Usage: "[Default: true] Set to false if you do not want the config command to be interactive.", }, } flags = append(flags, getFlags()...) return append(flags, cli.StringFlag{ Name: "licenses", Value: "", Usage: "[Optional] Default package licenses in the form of Apache-2.0,GPL-3.0...", }) } func getPackageFlags() []cli.Flag { return []cli.Flag{ cli.StringFlag{ Name: "licenses", Value: "", Usage: "[Mandatory for OSS] Package licenses in the form of Apache-2.0,GPL-3.0...", }, cli.StringFlag{ Name: "vcs-url", Value: "", Usage: "[Mandatory for OSS] Package VCS URL.", }, cli.BoolFlag{ Name: "pub-dn", Usage: "[Default: false] Public download numbers.", }, cli.BoolTFlag{ Name: "pub-stats", Usage: "[Default: true] Public statistics.", }, cli.StringFlag{ Name: "desc", Value: "", Usage: "[Optional] Package description.", }, cli.StringFlag{ Name: "labels", Value: "", Usage: "[Optional] Package lables in the form of \"lable11\",\"lable2\"...", }, cli.StringFlag{ Name: "cust-licenses", Value: "", Usage: "[Optional] Package custom licenses in the form of \"my-license-1\",\"my-license-2\"...", }, cli.StringFlag{ Name: "website-url", Value: "", Usage: "[Optional] Package web site URL.", }, cli.StringFlag{ Name: "issuetracker-url", Value: "", Usage: "[Optional] Package Issues Tracker URL.", }, cli.StringFlag{ Name: "github-repo", Value: "", Usage: "[Optional] Package Github repository.", }, cli.StringFlag{ Name: "github-rel-notes", Value: "", Usage: "[Optional] Github release notes file.", }, } } func getVersionFlags() []cli.Flag { return []cli.Flag{ cli.BoolFlag{ Name: "github-tag-rel-notes", Usage: "[Default: false] Set to true if you wish to use a Github tag release notes.", }, cli.StringFlag{ Name: "desc", Value: "", Usage: "[Optional] Version description.", }, cli.StringFlag{ Name: "released", Value: "", Usage: "[Optional] Release date in ISO8601 format (yyyy-MM-dd'T'HH:mm:ss.SSSZ)", }, cli.StringFlag{ Name: "github-rel-notes", Value: "", Usage: "[Optional] Github release notes file.", }, cli.StringFlag{ Name: "vcs-tag", Value: "", Usage: "[Optional] VCS tag.", }, } } func getCreateAndUpdatePackageFlags() []cli.Flag { return append(getFlags(), getPackageFlags()...) } func getCreateAndUpdateVersionFlags() []cli.Flag { return append(getFlags(), getVersionFlags()...) } func getDeletePackageAndVersionFlags() []cli.Flag { return append(getFlags(), cli.BoolFlag{ Name: "quiet", Usage: "[Default: false] Set to true to skip the delete confirmation message.", }) } func getDownloadFlags() []cli.Flag { return []cli.Flag{ cli.BoolFlag{ Name: "flat", Usage: "[Default: false] Set to true if you do not wish to have the Bintray path structure created locally for your downloaded files.", }, cli.StringFlag{ Name: "min-split", Value: "", Usage: "[Default: 5120] Minimum file size in KB to split into ranges when downloading. Set to -1 for no splits.", }, cli.StringFlag{ Name: "split-count", Value: "", Usage: "[Default: 3] Number of parts to split a file when downloading. Set to 0 for no splits.", }, cli.BoolFlag{ Name: "unpublished", Usage: "[Default: false] Download both published and unpublished files.", }, } } func getDownloadFileFlags() []cli.Flag { return append(getFlags(), getDownloadFlags()...) } func getDownloadVersionFlags() []cli.Flag { flags := append(getFlags(), cli.StringFlag{ Name: "threads", Value: "", Usage: "[Default: 3] Number of artifacts to download in parallel.", }) return append(flags, getDownloadFlags()...) } func getUploadFlags() []cli.Flag { return append(getFlags(), []cli.Flag{ cli.BoolTFlag{ Name: "recursive", Usage: "[Default: true] Set to false if you do not wish to collect files in sub-folders to be uploaded to Bintray.", }, cli.BoolTFlag{ Name: "flat", Usage: "[Default: true] If set to false, files are uploaded according to their file system hierarchy.", }, cli.BoolFlag{ Name: "regexp", Usage: "[Default: false] Set to true to use a regular expression instead of wildcards expression to collect files to upload.", }, cli.BoolFlag{ Name: "publish", Usage: "[Default: false] Set to true to publish the uploaded files.", }, cli.BoolFlag{ Name: "override", Usage: "[Default: false] Set to true to enable overriding existing published files.", }, cli.BoolFlag{ Name: "explode", Usage: "[Default: false] Set to true to explode archived files after upload.", }, cli.StringFlag{ Name: "threads", Value: "", Usage: "[Default: 3] Number of artifacts to upload in parallel.", }, cli.BoolFlag{ Name: "dry-run", Usage: "[Default: false] Set to true to disable communication with Bintray.", }, cli.StringFlag{ Name: "deb", Value: "", Usage: "[Optional] Used for Debian packages in the form of distribution/component/architecture.", }, }...) } func getEntitlementsFlags() []cli.Flag { return append(getFlags(), []cli.Flag{ cli.StringFlag{ Name: "id", Usage: "[Optional] Entitlement ID. Used for entitlements update.", }, cli.StringFlag{ Name: "access", Usage: "[Optional] Entitlement access. Used for entitlements creation and update.", }, cli.StringFlag{ Name: "keys", Usage: "[Optional] Used for entitlements creation and update. List of Access Keys in the form of \"key1\",\"key2\"...", }, cli.StringFlag{ Name: "path", Usage: "[Optional] Entitlement path. Used for entitlements creating and update.", }, }...) } func getAccessKeysFlags() []cli.Flag { return append(getFlags(), []cli.Flag{ cli.StringFlag{ Name: "org", Usage: "[Optional] Bintray organization", }, cli.StringFlag{ Name: "password", Usage: "[Optional] Access Key password.", }, cli.StringFlag{ Name: "expiry", Usage: "[Optional] Access Key expiry (required for 'jfrog bt acc-keys show/create/update/delete'", }, cli.StringFlag{ Name: "ex-check-url", Usage: "[Optional] You can optionally provide an existence check directive, in the form of a callback URL, to verify whether the source identity of the Access Key still exists.", }, cli.StringFlag{ Name: "ex-check-cache", Usage: "[Optional] You can optionally provide the period in seconds for the callback URL results cache.", }, cli.StringFlag{ Name: "white-cidrs", Usage: "[Optional] Specifying white CIDRs in the form of 127.0.0.1/22,193.5.0.1/92 will allow access only for those IPs that exist in that address range.", }, cli.StringFlag{ Name: "black-cidrs", Usage: "[Optional] Specifying black CIDRs in the form of 127.0.0.1/22,193.5.0.1/92 will block access for all IPs that exist in the specified range.", }, cli.BoolTFlag{ Name: "api-only", Usage: "[Default: true] You can set api_only to false to allow access keys access to Bintray UI as well as to the API.", }, }...) } func getUrlSigningFlags() []cli.Flag { return append(getFlags(), []cli.Flag{ cli.StringFlag{ Name: "expiry", Usage: "[Optional] An expiry date for the URL, in Unix epoch time in milliseconds, after which the URL will be invalid. By default, expiry date will be 24 hours.", }, cli.StringFlag{ Name: "valid-for", Usage: "[Optional] The number of seconds since generation before the URL expires. Mutually exclusive with the --expiry option.", }, cli.StringFlag{ Name: "callback-id", Usage: "[Optional] An applicative identifier for the request. This identifier appears in download logs and is used in email and download webhook notifications.", }, cli.StringFlag{ Name: "callback-email", Usage: "[Optional] An email address to send mail to when a user has used the download URL. This requiers a callback_id. The callback-id will be included in the mail message.", }, cli.StringFlag{ Name: "callback-url", Usage: "[Optional] A webhook URL to call when a user has used the download URL.", }, cli.StringFlag{ Name: "callback-method", Usage: "[Optional] HTTP method to use for making the callback. Will use POST by default. Supported methods are: GET, POST, PUT and HEAD.", }, }...) } func getGpgSigningFlags() []cli.Flag { return append(getFlags(), cli.StringFlag{ Name: "passphrase", Usage: "[Optional] GPG passphrase.", }) } func configure(c *cli.Context) { if c.NArg() > 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } else if c.NArg() == 1 { if c.Args().Get(0) == "show" { commands.ShowConfig() } else if c.Args().Get(0) == "clear" { commands.ClearConfig() } else { cliutils.ExitOnErr(errors.New("Unknown argument '"+c.Args().Get(0)+"'. Available arguments are 'show' and 'clear'.")) } } else { interactive := c.BoolT("interactive") if !interactive { if c.String("user") == "" || c.String("key") == "" { cliutils.ExitOnErr(errors.New("The --user and --key options are mandatory when the --interactive option is set to false")) } } bintrayDetails, err := createBintrayDetails(c, false) cliutils.ExitOnErr(err) cliBtDetails := &config.BintrayDetails{ User: bintrayDetails.GetUser(), Key: bintrayDetails.GetKey(), ApiUrl: bintrayDetails.GetApiUrl(), DownloadServerUrl: bintrayDetails.GetDownloadServerUrl(), DefPackageLicense: bintrayDetails.GetDefPackageLicense(), } commands.Config(cliBtDetails, nil, interactive) } } func showPackage(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } packagePath, err := packages.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.ShowPackage(btConfig, packagePath) cliutils.ExitOnErr(err) } func showVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionPath, err := versions.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.ShowVersion(btConfig, versionPath) cliutils.ExitOnErr(err) } func createPackage(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } packageParams, err := createPackageParams(c) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.CreatePackage(btConfig, packageParams) cliutils.ExitOnErr(err) } func createVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionParams, err := createVersionParams(c) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.CreateVersion(btConfig, versionParams) cliutils.ExitOnErr(err) } func updateVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionParams, err := createVersionParams(c) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.UpdateVersion(btConfig, versionParams) cliutils.ExitOnErr(err) } func updatePackage(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } packageParams, err := createPackageParams(c) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.UpdatePackage(btConfig, packageParams) cliutils.ExitOnErr(err) } func deletePackage(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } packagePath, err := packages.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) if !c.Bool("quiet") { confirmed := cliutils.InteractiveConfirm("Delete package " + packagePath.Package + "?") if !confirmed { return } } err = commands.DeletePackage(btConfig, packagePath) cliutils.ExitOnErr(err) } func deleteVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionPath, err := versions.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) if !c.Bool("quiet") { confirmed := cliutils.InteractiveConfirm("Delete version " + versionPath.Version + " of package " + versionPath.Package + "?") if !confirmed { return } } err = commands.DeleteVersion(btConfig, versionPath) cliutils.ExitOnErr(err) } func publishVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionPath, err := versions.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.PublishVersion(btConfig, versionPath) cliutils.ExitOnErr(err) } func downloadVersion(c *cli.Context) { if c.NArg() < 1 || c.NArg() > 2 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } var err error params := services.NewDownloadVersionParams() params.IncludeUnpublished = c.Bool("unpublished") params.Path, err = services.CreateVersionDetailsForDownloadVersion(c.Args().Get(0)) cliutils.ExitOnErr(err) params.TargetPath = c.Args().Get(1) if strings.HasPrefix(params.TargetPath, "/") { params.TargetPath = params.TargetPath[1:] } btConfig := newBintrayConfig(c) downloaded, failed, err := commands.DownloadVersion(btConfig, params) err = cliutils.PrintSummaryReport(downloaded, failed, err) cliutils.ExitOnErr(err) if failed > 0 { cliutils.ExitOnErr(errors.New("")) } } func upload(c *cli.Context) { if c.NArg() < 2 || c.NArg() > 3 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } params := services.NewUploadParams() params.Pattern = c.Args().Get(0) var err error params.Path, err = versions.CreatePath(c.Args().Get(1)) cliutils.ExitOnErr(err) params.TargetPath = c.Args().Get(2) if strings.HasPrefix(params.TargetPath, "/") { params.TargetPath = params.TargetPath[1:] } params.Deb = c.String("deb") if params.Deb != "" && len(strings.Split(params.Deb, "/")) != 3 { cliutils.ExitOnErr(errors.New("The --deb option should be in the form of distribution/component/architecture")) } params.Recursive = c.BoolT("recursive") params.Flat = c.BoolT("flat") params.Publish = c.Bool("publish") params.Override = c.Bool("override") params.Explode = c.Bool("explode") params.UseRegExp = c.Bool("regexp") uploadConfig := newBintrayConfig(c) uploaded, failed, err := commands.Upload(uploadConfig, params) err = cliutils.PrintSummaryReport(uploaded, failed, err) cliutils.ExitOnErr(err) if failed > 0 { cliutils.ExitOnErr(errors.New("")) } } func downloadFile(c *cli.Context) { if c.NArg() < 1 || c.NArg() > 2 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } var err error params := services.NewDownloadFileParams() params.Flat = c.Bool("flat") params.IncludeUnpublished = c.Bool("unpublished") params.PathDetails, err = utils.CreatePathDetails(c.Args().Get(0)) cliutils.ExitOnErr(err) params.TargetPath = c.Args().Get(1) if strings.HasPrefix(params.TargetPath, "/") { params.TargetPath = params.TargetPath[1:] } btConfig := newBintrayConfig(c) downloaded, failed, err := commands.DownloadFile(btConfig, params) err = cliutils.PrintSummaryReport(downloaded, failed, err) cliutils.ExitOnErr(err) } func signUrl(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } btConfig := newBintrayConfig(c) signUrlParams := createUrlSigningFlags(c) err := commands.SignVersion(btConfig, signUrlParams) cliutils.ExitOnErr(err) } func gpgSignFile(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } pathDetails, err := utils.CreatePathDetails(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.GpgSignFile(btConfig, pathDetails, c.String("passphrase")) cliutils.ExitOnErr(err) } func logs(c *cli.Context) { btConfig := newBintrayConfig(c) if c.NArg() == 1 { versionPath, err := versions.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) err = commands.LogsList(btConfig, versionPath) cliutils.ExitOnErr(err) } else if c.NArg() == 3 && c.Args().Get(0) == "download" { versionPath, err := versions.CreatePath(c.Args().Get(1)) cliutils.ExitOnErr(err) err = commands.DownloadLog(btConfig, versionPath, c.Args().Get(2)) cliutils.ExitOnErr(err) } else { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } } func stream(c *cli.Context) { bintrayDetails, err := createBintrayDetails(c, true) if err != nil { cliutils.ExitOnErr(err) } if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } streamDetails := &commands.StreamDetails{ BintrayDetails: bintrayDetails, Subject: c.Args().Get(0), Include: c.String("include"), } commands.Stream(streamDetails, os.Stdout) } func gpgSignVersion(c *cli.Context) { if c.NArg() != 1 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionDetails, err := versions.CreatePath(c.Args().Get(0)) cliutils.ExitOnErr(err) btConfig := newBintrayConfig(c) err = commands.GpgSignVersion(btConfig, versionDetails, c.String("passphrase")) cliutils.ExitOnErr(err) } func accessKeys(c *cli.Context) { var err error org := c.String("org") btConfig := newBintrayConfig(c) if c.NArg() == 0 { err = commands.ShowAllAccessKeys(btConfig, org) cliutils.ExitOnErr(err) return } if c.NArg() != 2 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } keyId := c.Args().Get(1) switch c.Args().Get(0) { case "show": err = commands.ShowAccessKey(btConfig, org, keyId) case "delete": err = commands.DeleteAccessKey(btConfig, org, keyId) case "create": err = commands.CreateAccessKey(btConfig, createAccessKeysParams(c, org, keyId)) case "update": err = commands.UpdateAccessKey(btConfig, createAccessKeysParams(c, org, keyId)) default: cliutils.ExitOnErr(errors.New("Expecting show, create, update or delete before the key argument. Got "+c.Args().Get(0))) } cliutils.ExitOnErr(err) } func handleEntitlements(c *cli.Context) { if c.NArg() == 0 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } btConfig := newBintrayConfig(c) if c.NArg() == 1 { details, err := entitlements.CreateVersionDetails(c.Args().Get(0)) cliutils.ExitOnErr(err) err = commands.ShowAllEntitlements(btConfig, details) cliutils.ExitOnErr(err) return } if c.NArg() != 2 { cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c) } versionPath, err := entitlements.CreateVersionDetails(c.Args().Get(1)) cliutils.ExitOnErr(err) switch c.Args().Get(0) { case "show": id := c.String("id") if id == "" { cliutils.ExitOnErr(errors.New("Please add the --id option")) } err = commands.ShowEntitlement(btConfig, id, versionPath) case "create": params := createEntitlementFlagsForCreate(c, versionPath) err = commands.CreateEntitlement(btConfig, params) case "update": params := createEntitlementFlagsForUpdate(c, versionPath) err = commands.UpdateEntitlement(btConfig, params) case "delete": id := c.String("id") if id == "" { cliutils.ExitOnErr(errors.New("Please add the --id option")) } err = commands.DeleteEntitlement(btConfig, id, versionPath) default: cliutils.ExitOnErr(errors.New("Expecting show, create, update or delete before "+c.Args().Get(1)+". Got "+c.Args().Get(0))) } cliutils.ExitOnErr(err) } func createPackageParams(c *cli.Context) (*packages.Params, error) { licenses := c.String("licenses") if licenses == "" { confDetails, err := commands.GetConfig() if err != nil { return nil, err } licenses = confDetails.DefPackageLicense } packagePath, err := packages.CreatePath(c.Args().Get(0)) if err != nil { return nil, err } params := packages.NewPackageParams() params.Path = packagePath params.Desc = c.String("desc") params.Labels = c.String("labels") params.Licenses = licenses params.CustomLicenses = c.String("cust-licenses") params.VcsUrl = c.String("vcs-url") params.WebsiteUrl = c.String("website-url") params.IssueTrackerUrl = c.String("issuetracker-url") params.GithubRepo = c.String("github-repo") params.GithubReleaseNotesFile = c.String("github-rel-notes") params.PublicDownloadNumbers = c.Bool("pub-dn") params.PublicStats = c.BoolT("pub-stats") return params, nil } func newBintrayConfig(c *cli.Context) bintray.Config { btDetails, err := createBintrayDetails(c, true) cliutils.ExitOnErr(err) btConfig := bintray.NewConfigBuilder(). SetBintrayDetails(btDetails). SetDryRun(c.Bool("dry-run")). SetThreads(getThreadsOptionValue(c)). SetMinSplitSize(getMinSplitFlag(c)). SetSplitCount(getSplitCountFlag(c)). SetLogger(log.Logger). Build() return btConfig } func createVersionParams(c *cli.Context) (*versions.Params, error) { versionDetails, err := versions.CreatePath(c.Args().Get(0)) if err != nil { return nil, err } params := versions.NewVersionParams() params.Path = versionDetails params.Desc = c.String("desc") params.VcsTag = c.String("vcs-tag") params.Released = c.String("released") params.GithubReleaseNotesFile = c.String("github-rel-notes") params.GithubUseTagReleaseNotes = c.Bool("github-tag-rel-notes") return params, nil } func createUrlSigningFlags(c *cli.Context) *url.Params { if c.String("valid-for") != "" { _, err := strconv.ParseInt(c.String("valid-for"), 10, 64) if err != nil { cliutils.ExitOnErr(errors.New("The '--valid-for' option should have a numeric value.")) } } urlSigningDetails, err := utils.CreatePathDetails(c.Args().Get(0)) cliutils.ExitOnErr(err) var expiry int64 if c.String("expiry") != "" { var err error expiry, err = strconv.ParseInt(c.String("expiry"), 10, 64) if err != nil { cliutils.ExitOnErr(errors.New("The --expiry option should have a numeric value.")) } } params := url.NewURLParams() params.PathDetails = urlSigningDetails params.Expiry = expiry params.ValidFor = c.Int("valid-for") params.CallbackId = c.String("callback-id") params.CallbackEmail = c.String("callback-email") params.CallbackUrl = c.String("callback-url") params.CallbackMethod = c.String("callback-method") return params } func getThreadsOptionValue(c *cli.Context) (threads int) { if c.String("threads") == "" { threads = 3 } else { var err error threads, err = strconv.Atoi(c.String("threads")) if err != nil || threads < 1 { cliutils.ExitOnErr(errors.New("The '--threads' option should have a numeric positive value.")) } } return } func createEntitlementFlagsForCreate(c *cli.Context, path *versions.Path) *entitlements.Params { if c.String("access") == "" { cliutils.ExitOnErr(errors.New("Please add the --access option")) } params := entitlements.NewEntitlementsParams() params.VersionPath = path params.Path = c.String("path") params.Access = c.String("access") params.Keys = c.String("keys") return params } func createEntitlementFlagsForUpdate(c *cli.Context, path *versions.Path) *entitlements.Params { if c.String("id") == "" { cliutils.ExitOnErr(errors.New("Please add the --id option")) } if c.String("access") == "" { cliutils.ExitOnErr(errors.New("Please add the --access option")) } params := entitlements.NewEntitlementsParams() params.VersionPath = path params.Id = c.String("id") params.Path = c.String("path") params.Access = c.String("access") params.Keys = c.String("keys") return params } func createAccessKeysParams(c *cli.Context, org, keyId string) *accesskeys.Params { var cachePeriod int if c.String("ex-check-cache") != "" { var err error cachePeriod, err = strconv.Atoi(c.String("ex-check-cache")) if err != nil { cliutils.ExitOnErr(errors.New("The --ex-check-cache option should have a numeric value.")) } } var expiry int64 if c.String("expiry") != "" { var err error expiry, err = strconv.ParseInt(c.String("expiry"), 10, 64) if err != nil { cliutils.ExitOnErr(errors.New("The --expiry option should have a numeric value.")) } } params := accesskeys.NewAccessKeysParams() params.Id = keyId params.Password = c.String("password") params.Org = org params.Expiry = expiry params.ExistenceCheckUrl = c.String("ex-check-url") params.ExistenceCheckCache = cachePeriod params.WhiteCidrs = c.String("white-cidrs") params.BlackCidrs = c.String("black-cidrs") params.ApiOnly = c.BoolT("recursive") return params } func offerConfig(c *cli.Context) (*config.BintrayDetails, error) { exists, err := config.IsBintrayConfExists() if err != nil { return nil, err } if exists { return nil, nil } val, err := cliutils.GetBoolEnvValue("JFROG_CLI_OFFER_CONFIG", true) if err != nil { return nil, err } if !val { config.SaveBintrayConf(new(config.BintrayDetails)) return nil, nil } msg := "Some CLI commands require the following common options:\n" + "- User\n" + "- API Key\n" + "- Default Package Licenses\n" + "Configuring JFrog CLI with these parameters now will save you having to include them as command options.\n" + "You can also configure these parameters later using the 'config' command.\n" + "Configure now?" confirmed := cliutils.InteractiveConfirm(msg) if !confirmed { config.SaveBintrayConf(new(config.BintrayDetails)) return nil, nil } bintrayDetails, err := createBintrayDetails(c, false) if err != nil { return nil, err } cliBtDetails := &config.BintrayDetails{ ApiUrl: bintrayDetails.GetApiUrl(), DownloadServerUrl: bintrayDetails.GetDownloadServerUrl(), User: bintrayDetails.GetUser(), Key: bintrayDetails.GetKey(), DefPackageLicense: bintrayDetails.GetDefPackageLicense()} details, err := commands.Config(nil, cliBtDetails, true) cliutils.ExitOnErr(err) details.ApiUrl = bintrayDetails.GetApiUrl() details.DownloadServerUrl = bintrayDetails.GetDownloadServerUrl() return details, nil } func createBintrayDetails(c *cli.Context, includeConfig bool) (auth.BintrayDetails, error) { if includeConfig { bintrayDetails, err := offerConfig(c) if err != nil { return nil, err } if bintrayDetails != nil { btDetails := auth.NewBintrayDetails() btDetails.SetApiUrl(bintrayDetails.ApiUrl) btDetails.SetDownloadServerUrl(bintrayDetails.DownloadServerUrl) btDetails.SetUser(bintrayDetails.User) btDetails.SetKey(bintrayDetails.Key) btDetails.SetDefPackageLicense(bintrayDetails.DefPackageLicense) return btDetails, nil } } user := c.String("user") key := c.String("key") defaultPackageLicenses := c.String("licenses") if includeConfig && (user == "" || key == "" || defaultPackageLicenses == "") { confDetails, err := commands.GetConfig() if err != nil { return nil, err } if user == "" { user = confDetails.User } if key == "" { key = confDetails.Key } if key == "" { cliutils.ExitOnErr(errors.New("Please set your Bintray API key using the config command or send it as the --key option.")) } if defaultPackageLicenses == "" { defaultPackageLicenses = confDetails.DefPackageLicense } } btDetails := auth.NewBintrayDetails() apiUrl := os.Getenv("JFROG_CLI_BINTRAY_API_URL") if apiUrl != "" { apiUrl = clientutils.AddTrailingSlashIfNeeded(apiUrl) btDetails.SetApiUrl(apiUrl) } downloadServerUrl := os.Getenv("JFROG_CLI_BINTRAY_DOWNLOAD_URL") if downloadServerUrl != "" { downloadServerUrl = clientutils.AddTrailingSlashIfNeeded(downloadServerUrl) btDetails.SetDownloadServerUrl(downloadServerUrl) } btDetails.SetUser(user) btDetails.SetKey(key) btDetails.SetDefPackageLicense(defaultPackageLicenses) return btDetails, nil } func getMinSplitFlag(c *cli.Context) int64 { if c.String("min-split") == "" { return 5120 } minSplit, err := strconv.ParseInt(c.String("min-split"), 10, 64) if err != nil { cliutils.PrintHelpAndExitWithError("The '--min-split' option should have a numeric value.", c) } return minSplit } func getSplitCountFlag(c *cli.Context) int { if c.String("split-count") == "" { return 3 } splitCount, err := strconv.Atoi(c.String("split-count")) if err != nil { cliutils.PrintHelpAndExitWithError("The '--split-count' option should have a numeric value.", c) } if splitCount > 15 { cliutils.ExitOnErr(errors.New("The '--split-count' option value is limitted to a maximum of 15.")) } if splitCount < 0 { cliutils.ExitOnErr(errors.New("The '--split-count' option cannot have a negative value.")) } return splitCount }
[ "\"JFROG_CLI_BINTRAY_API_URL\"", "\"JFROG_CLI_BINTRAY_DOWNLOAD_URL\"" ]
[]
[ "JFROG_CLI_BINTRAY_API_URL", "JFROG_CLI_BINTRAY_DOWNLOAD_URL" ]
[]
["JFROG_CLI_BINTRAY_API_URL", "JFROG_CLI_BINTRAY_DOWNLOAD_URL"]
go
2
0
cloudpebble/wsgi.py
""" WSGI config for cloudpebble project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "cloudpebble.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudpebble.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application from dj_static import Cling application = Cling(get_wsgi_application()) # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
src/data/get_raw_data.py
# the content below is written into a file using writefile magic function import os from dotenv import load_dotenv,find_dotenv from requests import session import logging payload = { 'action' : 'login', 'username' : os.environ.get("KAGGLE_USERNAME"), 'password' : os.environ.get("KAGGLE_PASSWORD") } def extract_data(url, file_path): with session() as c: c.post("https://www.kaggle.com/account/login",data=payload) with open(file_path,'wb') as handle: response = c.get(url,stream=True) for block in response.iter_content(1024): handle.write(block) def main(project_dir): logger = logging.getLogger(__name__) # obtaining the instance of logger logger.info('getting raw data') train_url = 'https://www.kaggle.com/c/titanic/download/train.csv' test_url = 'https://www.kaggle.com/c/titanic/download/test.csv' raw_data_path = os.path.join(project_dir,'data','raw') train_data_path = os.path.join(raw_data_path,'train.csv') test_data_path = os.path.join(raw_data_path,'test.csv') extract_data(train_url,train_data_path) extract_data(test_url,test_data_path) logger.info('downloaded raw training and test data') if __name__ == '__main__': project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir) # two levels up -- data and then src and then titanic log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO,format=log_fmt) dotenv_path = find_dotenv() load_dotenv(dotenv_path) main(project_dir)
[]
[]
[ "KAGGLE_PASSWORD", "KAGGLE_USERNAME" ]
[]
["KAGGLE_PASSWORD", "KAGGLE_USERNAME"]
python
2
0
dbo/dbo.go
package dbo import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "github.com/byuoitav/authmiddleware/bearertoken" "github.com/byuoitav/common/log" "github.com/byuoitav/configuration-database-microservice/structs" "github.com/fatih/color" ) // GetData will run a get on the url, and attempt to fill the interface provided from the returned JSON. func GetData(url string, structToFill interface{}) error { log.L.Infof("[dbo] getting data from URL: %s...", url) // Make an HTTP client so we can add custom headers (currently used for adding in the Bearer token for inter-microservice communication) client := &http.Client{} req, err := http.NewRequest("GET", url, nil) if err != nil { return err } err = setToken(req) if err != nil { return err } if req == nil { fmt.Printf("Alert! req is nil!") } resp, err := client.Do(req) if err != nil { color.Set(color.FgHiRed, color.Bold) log.L.Infof("Error on request: %s", err.Error()) color.Unset() return err } b, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { errorBytes, err := ioutil.ReadAll(resp.Body) errorString := fmt.Sprintf("Error Code %v. Error String: %s", resp.StatusCode, errorBytes) if err != nil { return err } return errors.New(string(errorString)) } err = json.Unmarshal(b, structToFill) if err != nil { return err } log.L.Infof("[dbo] done getting data from url: %s", url) return nil } func SendData(url string, structToAdd interface{}, structToFill interface{}, method string) error { body, err := json.Marshal(structToAdd) if err != nil { return err } client := &http.Client{} req, _ := http.NewRequest(method, url, bytes.NewBuffer(body)) req.Header.Set("Content-Type", "application/json") err = setToken(req) if err != nil { return err } response, err := client.Do(req) if err != nil { return err } if response.StatusCode != http.StatusOK { errorString, err := ioutil.ReadAll(response.Body) if err != nil { return err } return errors.New(string(errorString)) } jsonArray, err := ioutil.ReadAll(response.Body) if err != nil { return err } err = json.Unmarshal(jsonArray, structToFill) if err != nil { return err } return nil } //PostData hits POST endpoints func PostData(url string, structToAdd interface{}, structToFill interface{}) error { log.L.Infof("[dbo Posting data to URL: %s...", url) return SendData(url, structToAdd, structToFill, "POST") } //PutData hits PUT endpoints func PutData(url string, structToAdd interface{}, structToFill interface{}) error { log.L.Infof("[dbo] Putting data to URL: %v...", url) return SendData(url, structToAdd, structToFill, "PUT") } func setToken(request *http.Request) error { if len(os.Getenv("LOCAL_ENVIRONMENT")) == 0 { log.L.Info("[dbo] adding the bearer token for inter-service communication") token, err := bearertoken.GetToken() if err != nil { return err } request.Header.Set("Authorization", "Bearer "+token.Token) } return nil } //GetAllRawCommands retrieves all the commands func GetAllRawCommands() (commands []structs.RawCommand, err error) { log.L.Info("[dbo] getting all commands.") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/commands" err = GetData(url, &commands) if err != nil { color.Set(color.FgHiRed, color.Bold) log.L.Infof("[error]: %s", err.Error()) color.Unset() return } log.L.Info("[dbo] Done.") return } func AddRawCommand(toAdd structs.RawCommand) (structs.RawCommand, error) { log.L.Infof("[dbo] adding raw command: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/commands/" + toAdd.Name var toFill structs.RawCommand err := PostData(url, toAdd, &toFill) if err != nil { return structs.RawCommand{}, err } return toFill, nil } func GetRoomByInfo(buildingName string, roomName string) (toReturn structs.Room, err error) { log.L.Infof("[dbo] getting room %s in building %s...", roomName, buildingName) url := fmt.Sprintf("%s/buildings/%s/rooms/%s", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), buildingName, roomName) err = GetData(url, &toReturn) return } func GetRoomById(roomId int) (*structs.Room, error) { url := fmt.Sprintf("%s/rooms/id/%d", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), roomId) var room structs.Room err := GetData(url, &room) if err != nil { return &structs.Room{}, err } return &room, nil } // GetDeviceByName simply retrieves a device's information from the databse. func GetDeviceByName(buildingName string, roomName string, deviceName string) (toReturn structs.Device, err error) { err = GetData(os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS")+"/buildings/"+buildingName+"/rooms/"+roomName+"/devices/"+deviceName, &toReturn) return } func GetDeviceById(id int) (toReturn structs.Device, err error) { url := fmt.Sprintf("%s/devices/%d", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), id) err = GetData(url, &toReturn) return } // GetDevicesByRoom will jut get the devices based on the room. func GetDevicesByRoom(buildingName string, roomName string) (toReturn []structs.Device, err error) { err = GetData(os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS")+"/buildings/"+buildingName+"/rooms/"+roomName+"/devices", &toReturn) return } func GetDevicesByRoomId(roomId int) ([]structs.Device, error) { url := fmt.Sprintf("%s/rooms/%d/devices", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), roomId) var devices []structs.Device err := GetData(url, &devices) if err != nil { return []structs.Device{}, err } return devices, nil } // GetDevicesByBuildingAndRoomAndRole will get the devices with the given role from the DB func GetDevicesByBuildingAndRoomAndRole(building string, room string, roleName string) (toReturn []structs.Device, err error) { err = GetData(os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS")+"/buildings/"+building+"/rooms/"+room+"/devices/roles/"+roleName, &toReturn) if err != nil { log.L.Infof("%s", color.HiRedString("[error] problem getting device by role: %s", err.Error())) } return } func GetDevicesByRoomIdAndRoleId(roomId, roleId int) ([]structs.Device, error) { url := fmt.Sprintf("%s/rooms/%d/roles/%d", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), roomId, roleId) var devices []structs.Device err := GetData(url, &devices) if err != nil { return []structs.Device{}, err } return devices, nil } // GetBuildings will return all buildings func GetBuildings() ([]structs.Building, error) { log.L.Info("[dbo] getting all buildings...") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings" log.L.Infof("[dbo] url: %s", url) var buildings []structs.Building err := GetData(url, &buildings) return buildings, err } func GetRooms() ([]structs.Room, error) { url := fmt.Sprintf("%s/rooms", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS")) var rooms []structs.Room err := GetData(url, &rooms) return rooms, err } // GetRooms returns all the rooms in a given building func GetRoomsByBuilding(building string) ([]structs.Room, error) { log.L.Infof("[dbo] getting all rooms from %v ...", building) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings/" + building + "/rooms" var rooms []structs.Room err := GetData(url, &rooms) return rooms, err } // GetBuildingByShortname returns a building with a given shortname func GetBuildingByShortname(building string) (structs.Building, error) { url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings/shortname/" + building var output structs.Building err := GetData(url, &output) if err != nil { return output, err } return output, nil } // AddBuilding func AddBuilding(buildingToAdd structs.Building) (structs.Building, error) { log.L.Infof("[dbo] adding building %v to database", buildingToAdd.Shortname) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings/" + buildingToAdd.Shortname var buildingToFill structs.Building err := PostData(url, buildingToAdd, &buildingToFill) if err != nil { return structs.Building{}, err } return buildingToFill, nil } func AddRoom(building string, roomToAdd structs.Room) (structs.Room, error) { log.L.Infof("[dbo] adding room %v to building %v in database", roomToAdd.Name, building) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings/" + building + "/rooms/" + roomToAdd.Name var roomToFill structs.Room err := PostData(url, roomToAdd, &roomToFill) if err != nil { return structs.Room{}, err } return roomToFill, nil } func GetDeviceTypes() ([]structs.DeviceType, error) { log.L.Info("[dbo] getting all device types") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/types/" var DeviceTypes []structs.DeviceType err := GetData(url, &DeviceTypes) if err != nil { return []structs.DeviceType{}, err } return DeviceTypes, nil } func AddDeviceType(toAdd structs.DeviceType) (structs.DeviceType, error) { log.L.Infof("[dbo] adding device type: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/types/" + toAdd.Name var toFill structs.DeviceType err := PostData(url, toAdd, &toFill) if err != nil { return structs.DeviceType{}, err } return toFill, nil } func GetPowerStates() ([]structs.PowerState, error) { log.L.Info("[dbo] getting all power states") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/powerstates/" var powerStates []structs.PowerState err := GetData(url, &powerStates) if err != nil { return []structs.PowerState{}, err } return powerStates, nil } func AddPowerState(toAdd structs.PowerState) (structs.PowerState, error) { log.L.Infof("[dbo] adding power state: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/powerstates/" + toAdd.Name var toFill structs.PowerState err := PostData(url, toAdd, &toFill) if err != nil { return structs.PowerState{}, err } return toFill, nil } func GetMicroservices() ([]structs.Microservice, error) { log.L.Info("[dbo] getting all microservices") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/microservices" var microservices []structs.Microservice err := GetData(url, &microservices) if err != nil { return []structs.Microservice{}, err } return microservices, nil } func AddMicroservice(toAdd structs.Microservice) (structs.Microservice, error) { log.L.Infof("[dbo] adding microservice: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/microservices/" + toAdd.Name var toFill structs.Microservice err := PostData(url, toAdd, &toFill) if err != nil { return structs.Microservice{}, err } return toFill, nil } func GetEndpoints() ([]structs.Endpoint, error) { log.L.Info("[dbo] getting all endpoints") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/endpoints" var endpoints []structs.Endpoint err := GetData(url, &endpoints) if err != nil { return []structs.Endpoint{}, err } return endpoints, nil } func AddEndpoint(toAdd structs.Endpoint) (structs.Endpoint, error) { log.L.Infof("[dbo] adding endpoint: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/endpoints/" + toAdd.Name var toFill structs.Endpoint err := PostData(url, toAdd, &toFill) if err != nil { return structs.Endpoint{}, err } return toFill, nil } func GetPortsByClass(class string) ([]structs.DeviceTypePort, error) { log.L.Infof("[dbo] Getting ports for class %v", class) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + fmt.Sprintf("/classes/%v/ports", class) var ports []structs.DeviceTypePort err := GetData(url, &ports) return ports, err } func GetPorts() ([]structs.PortType, error) { log.L.Info("[dbo] getting all ports") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/ports" var ports []structs.PortType err := GetData(url, &ports) if err != nil { return []structs.PortType{}, err } return ports, nil } func AddPort(portToAdd structs.PortType) (structs.PortType, error) { log.L.Infof("[dbo] adding Port: %v to database", portToAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/ports/" + portToAdd.Name var portToFill structs.PortType err := PostData(url, portToAdd, &portToFill) if err != nil { return structs.PortType{}, err } return portToFill, nil } func GetDeviceRoleDefinitions() ([]structs.DeviceRoleDef, error) { log.L.Info("[dbo] getting device role definitions") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/roledefinitions" var definitions []structs.DeviceRoleDef err := GetData(url, &definitions) if err != nil { return []structs.DeviceRoleDef{}, err } return definitions, nil } func GetDeviceRoleDefinitionById(roleId int) (structs.DeviceRoleDef, error) { url := fmt.Sprintf("%s/devices/roledefinitions/%d", os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS"), roleId) var toFill structs.DeviceRoleDef err := GetData(url, &toFill) if err != nil { return structs.DeviceRoleDef{}, err } return toFill, nil } func AddRoleDefinition(toAdd structs.DeviceRoleDef) (structs.DeviceRoleDef, error) { log.L.Infof("[dbo] adding role definition: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/roledefinitions/" + toAdd.Name var toFill structs.DeviceRoleDef err := PostData(url, toAdd, &toFill) if err != nil { return structs.DeviceRoleDef{}, err } return toFill, nil } func GetRoomConfigurations() ([]structs.RoomConfiguration, error) { log.L.Info("[dbo] getting room configurations") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/configurations" var rcs []structs.RoomConfiguration err := GetData(url, &rcs) if err != nil { return []structs.RoomConfiguration{}, err } return rcs, nil } func GetRoomDesignations() ([]string, error) { log.L.Info("[dbo] getting room designations") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/rooms/designations" var toReturn []string err := GetData(url, &toReturn) if err != nil { log.L.Errorf("err: %v", err.Error()) return toReturn, err } return toReturn, nil } func AddDevice(toAdd structs.Device) (structs.Device, error) { log.L.Infof("[dbo] adding device: %v to database", toAdd.Name) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/buildings/" + toAdd.Building.Shortname + "/rooms/" + toAdd.Room.Name + "/devices/" + toAdd.Name var toFill structs.Device err := PostData(url, toAdd, &toFill) if err != nil { return structs.Device{}, err } return toFill, nil } func GetDeviceClasses() ([]structs.DeviceClass, error) { log.L.Info("[dbo] getting all classes") url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + "/devices/classes" var classes []structs.DeviceClass err := GetData(url, &classes) return classes, err } func SetDeviceAttribute(attributeInfo structs.DeviceAttributeInfo) (structs.Device, error) { log.L.Infof("[dbo] Setting device attrbute %v to %v for device %v", attributeInfo.AttributeName, attributeInfo.AttributeValue, attributeInfo.AttributeValue) url := os.Getenv("CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS") + fmt.Sprintf("/devices/attribute") device := structs.Device{} err := PutData(url, attributeInfo, &device) if err != nil { log.L.Errorf("[error] %v", err.Error()) } else { log.L.Info("[dbo] Done.") } return device, err }
[ "\"LOCAL_ENVIRONMENT\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"", "\"CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS\"" ]
[]
[ "CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS", "LOCAL_ENVIRONMENT" ]
[]
["CONFIGURATION_DATABASE_MICROSERVICE_ADDRESS", "LOCAL_ENVIRONMENT"]
go
2
0
Data Structures/Stack/Balanced Bracket/balanced_bracket.py
#!/bin/python3 import math import os import random import re import sys # Complete the isBalanced function below. def isBalanced(s): left_symbol = [ '{', '[', '('] right_symbol = [ '}', ']', ')'] # fast checking of symbol counting equality for i in range(3): left_count = s.count( left_symbol[i] ) right_count = s.count( right_symbol[i] ) if left_count != right_count: return "NO" _stack = [] for i in range( len(s) ): char = s[i] if char in { '{', '[', '(' } : # push into stack _stack.append( char ) if char in { '}', ']', ')' } : # pop from stack and compare with left symbol index_of_right = right_symbol.index( char ) index_of_left = left_symbol.index( _stack.pop(-1) ) if index_of_left == index_of_right: # match of {}, [], or () pass else: return "NO" if len(_stack) == 0: return "YES" else: return "NO" if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') t = int(input()) for t_itr in range(t): s = input() result = isBalanced(s) fptr.write(result + '\n') fptr.close()
[]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
python
1
0
PaddleNLP/Research/ACL2018-DAM/main.py
""" Deep Attention Matching Network """ import sys import os import six import numpy as np import time import multiprocessing import paddle import paddle.fluid as fluid import reader as reader from util import mkdir import evaluation as eva import config try: import cPickle as pickle #python 2 except ImportError as e: import pickle #python 3 from model_check import check_cuda from net import Net def evaluate(score_path, result_file_path): """ Evaluate both douban and ubuntu dataset """ if args.ext_eval: result = eva.evaluate_douban(score_path) else: result = eva.evaluate_ubuntu(score_path) #write evaluation result with open(result_file_path, 'w') as out_file: for p_at in result: out_file.write(p_at + '\t' + str(result[p_at]) + '\n') print('finish evaluation') print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) def test_with_feed(exe, program, feed_names, fetch_list, score_path, batches, batch_num, dev_count): """ Test with feed """ score_file = open(score_path, 'w') for it in six.moves.xrange(batch_num // dev_count): feed_list = [] for dev in six.moves.xrange(dev_count): val_index = it * dev_count + dev batch_data = reader.make_one_batch_input(batches, val_index) feed_dict = dict(zip(feed_names, batch_data)) feed_list.append(feed_dict) predicts = exe.run(feed=feed_list, fetch_list=fetch_list) scores = np.array(predicts[0]) for dev in six.moves.xrange(dev_count): val_index = it * dev_count + dev for i in six.moves.xrange(args.batch_size): score_file.write( str(scores[args.batch_size * dev + i][0]) + '\t' + str( batches["label"][val_index][i]) + '\n') score_file.close() def test_with_pyreader(exe, program, pyreader, fetch_list, score_path, batches, batch_num, dev_count): """ Test with pyreader """ def data_provider(): """ Data reader """ for index in six.moves.xrange(batch_num): yield reader.make_one_batch_input(batches, index) score_file = open(score_path, 'w') pyreader.decorate_tensor_provider(data_provider) it = 0 pyreader.start() while True: try: predicts = exe.run(fetch_list=fetch_list) scores = np.array(predicts[0]) for dev in six.moves.xrange(dev_count): val_index = it * dev_count + dev for i in six.moves.xrange(args.batch_size): score_file.write( str(scores[args.batch_size * dev + i][0]) + '\t' + str( batches["label"][val_index][i]) + '\n') it += 1 except fluid.core.EOFException: pyreader.reset() break score_file.close() def train(args): """ Train Program """ if not os.path.exists(args.save_path): os.makedirs(args.save_path) # data data_config data_conf = { "batch_size": args.batch_size, "max_turn_num": args.max_turn_num, "max_turn_len": args.max_turn_len, "_EOS_": args._EOS_, } dam = Net(args.max_turn_num, args.max_turn_len, args.vocab_size, args.emb_size, args.stack_num, args.channel1_num, args.channel2_num) train_program = fluid.Program() train_startup = fluid.Program() if "CE_MODE_X" in os.environ: train_program.random_seed = 110 train_startup.random_seed = 110 with fluid.program_guard(train_program, train_startup): with fluid.unique_name.guard(): if args.use_pyreader: train_pyreader = dam.create_py_reader( capacity=10, name='train_reader') else: dam.create_data_layers() loss, logits = dam.create_network() loss.persistable = True logits.persistable = True # gradient clipping fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByValue( max=1.0, min=-1.0)) optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.exponential_decay( learning_rate=args.learning_rate, decay_steps=400, decay_rate=0.9, staircase=True)) optimizer.minimize(loss) print("begin memory optimization ...") print( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) fluid.memory_optimize(train_program) print("end memory optimization ...") print( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) test_program = fluid.Program() test_startup = fluid.Program() if "CE_MODE_X" in os.environ: test_program.random_seed = 110 test_startup.random_seed = 110 with fluid.program_guard(test_program, test_startup): with fluid.unique_name.guard(): if args.use_pyreader: test_pyreader = dam.create_py_reader( capacity=10, name='test_reader') else: dam.create_data_layers() loss, logits = dam.create_network() loss.persistable = True logits.persistable = True test_program = test_program.clone(for_test=True) if args.use_cuda: place = fluid.CUDAPlace(0) dev_count = fluid.core.get_cuda_device_count() else: place = fluid.CPUPlace() dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) print("device count %d" % dev_count) print("theoretical memory usage: ") print( fluid.contrib.memory_usage( program=train_program, batch_size=args.batch_size)) exe = fluid.Executor(place) exe.run(train_startup) exe.run(test_startup) train_exe = fluid.ParallelExecutor( use_cuda=args.use_cuda, loss_name=loss.name, main_program=train_program) test_exe = fluid.ParallelExecutor( use_cuda=args.use_cuda, main_program=test_program, share_vars_from=train_exe) if args.word_emb_init is not None: print("start loading word embedding init ...") if six.PY2: word_emb = np.array(pickle.load(open(args.word_emb_init, 'rb'))).astype('float32') else: word_emb = np.array( pickle.load( open(args.word_emb_init, 'rb'), encoding="bytes")).astype( 'float32') dam.set_word_embedding(word_emb, place) print("finish init word embedding ...") print("start loading data ...") with open(args.data_path, 'rb') as f: if six.PY2: train_data, val_data, test_data = pickle.load(f) else: train_data, val_data, test_data = pickle.load(f, encoding="bytes") print("finish loading data ...") val_batches = reader.build_batches(val_data, data_conf) batch_num = len(train_data[six.b('y')]) // args.batch_size val_batch_num = len(val_batches["response"]) print_step = max(1, batch_num // (dev_count * 100)) save_step = max(1, batch_num // (dev_count * 10)) print("begin model training ...") print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) def train_with_feed(step): """ Train on one epoch data by feeding """ ave_cost = 0.0 for it in six.moves.xrange(batch_num // dev_count): feed_list = [] for dev in six.moves.xrange(dev_count): index = it * dev_count + dev batch_data = reader.make_one_batch_input(train_batches, index) feed_dict = dict(zip(dam.get_feed_names(), batch_data)) feed_list.append(feed_dict) cost = train_exe.run(feed=feed_list, fetch_list=[loss.name]) ave_cost += np.array(cost[0]).mean() step = step + 1 if step % print_step == 0: print("processed: [" + str(step * dev_count * 1.0 / batch_num) + "] ave loss: [" + str(ave_cost / print_step) + "]") ave_cost = 0.0 if (args.save_path is not None) and (step % save_step == 0): save_path = os.path.join(args.save_path, "step_" + str(step)) print("Save model at step %d ... " % step) print( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) fluid.io.save_persistables(exe, save_path, train_program) score_path = os.path.join(args.save_path, 'score.' + str(step)) test_with_feed(test_exe, test_program, dam.get_feed_names(), [logits.name], score_path, val_batches, val_batch_num, dev_count) result_file_path = os.path.join(args.save_path, 'result.' + str(step)) evaluate(score_path, result_file_path) return step, np.array(cost[0]).mean() def train_with_pyreader(step): """ Train on one epoch with pyreader """ def data_provider(): """ Data reader """ for index in six.moves.xrange(batch_num): yield reader.make_one_batch_input(train_batches, index) train_pyreader.decorate_tensor_provider(data_provider) ave_cost = 0.0 train_pyreader.start() while True: try: cost = train_exe.run(fetch_list=[loss.name]) ave_cost += np.array(cost[0]).mean() step = step + 1 if step % print_step == 0: print("processed: [" + str(step * dev_count * 1.0 / batch_num) + "] ave loss: [" + str(ave_cost / print_step) + "]") ave_cost = 0.0 if (args.save_path is not None) and (step % save_step == 0): save_path = os.path.join(args.save_path, "step_" + str(step)) print("Save model at step %d ... " % step) print( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) fluid.io.save_persistables(exe, save_path, train_program) score_path = os.path.join(args.save_path, 'score.' + str(step)) test_with_pyreader(test_exe, test_program, test_pyreader, [logits.name], score_path, val_batches, val_batch_num, dev_count) result_file_path = os.path.join(args.save_path, 'result.' + str(step)) evaluate(score_path, result_file_path) except fluid.core.EOFException: train_pyreader.reset() break return step, np.array(cost[0]).mean() # train over different epoches global_step, train_time = 0, 0.0 for epoch in six.moves.xrange(args.num_scan_data): shuffle_train = reader.unison_shuffle( train_data, seed=110 if ("CE_MODE_X" in os.environ) else None) train_batches = reader.build_batches(shuffle_train, data_conf) begin_time = time.time() if args.use_pyreader: global_step, last_cost = train_with_pyreader(global_step) else: global_step, last_cost = train_with_feed(global_step) pass_time_cost = time.time() - begin_time train_time += pass_time_cost print("Pass {0}, pass_time_cost {1}" .format(epoch, "%2.2f sec" % pass_time_cost)) # For internal continuous evaluation if "CE_MODE_X" in os.environ: card_num = get_cards() print("kpis\ttrain_cost_card%d\t%f" % (card_num, last_cost)) print("kpis\ttrain_duration_card%d\t%f" % (card_num, train_time)) def test(args): """ Test """ if not os.path.exists(args.save_path): mkdir(args.save_path) if not os.path.exists(args.model_path): raise ValueError("Invalid model init path %s" % args.model_path) # data data_config data_conf = { "batch_size": args.batch_size, "max_turn_num": args.max_turn_num, "max_turn_len": args.max_turn_len, "_EOS_": args._EOS_, } dam = Net(args.max_turn_num, args.max_turn_len, args.vocab_size, args.emb_size, args.stack_num, args.channel1_num, args.channel2_num) dam.create_data_layers() loss, logits = dam.create_network() loss.persistable = True logits.persistable = True # gradient clipping fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByValue( max=1.0, min=-1.0)) test_program = fluid.default_main_program().clone(for_test=True) optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.exponential_decay( learning_rate=args.learning_rate, decay_steps=400, decay_rate=0.9, staircase=True)) optimizer.minimize(loss) print("begin memory optimization ...") print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) fluid.memory_optimize(fluid.default_main_program()) print("end memory optimization ...") print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) if args.use_cuda: place = fluid.CUDAPlace(0) dev_count = fluid.core.get_cuda_device_count() else: place = fluid.CPUPlace() #dev_count = multiprocessing.cpu_count() dev_count = 1 exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid.io.load_persistables(exe, args.model_path) test_exe = fluid.ParallelExecutor( use_cuda=args.use_cuda, main_program=test_program) print("start loading data ...") with open(args.data_path, 'rb') as f: if six.PY2: train_data, val_data, test_data = pickle.load(f) else: train_data, val_data, test_data = pickle.load(f, encoding="bytes") print("finish loading data ...") test_batches = reader.build_batches(test_data, data_conf) test_batch_num = len(test_batches["response"]) print("test batch num: %d" % test_batch_num) print("begin inference ...") print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) score_path = os.path.join(args.save_path, 'score.txt') score_file = open(score_path, 'w') for it in six.moves.xrange(test_batch_num // dev_count): feed_list = [] for dev in six.moves.xrange(dev_count): index = it * dev_count + dev batch_data = reader.make_one_batch_input(test_batches, index) feed_dict = dict(zip(dam.get_feed_names(), batch_data)) feed_list.append(feed_dict) predicts = test_exe.run(feed=feed_list, fetch_list=[logits.name]) scores = np.array(predicts[0]) print("step = %d" % it) for dev in six.moves.xrange(dev_count): index = it * dev_count + dev for i in six.moves.xrange(args.batch_size): score_file.write( str(scores[args.batch_size * dev + i][0]) + '\t' + str( test_batches["label"][index][i]) + '\n') score_file.close() #write evaluation result if args.ext_eval: result = eva.evaluate_douban(score_path) else: result = eva.evaluate_ubuntu(score_path) result_file_path = os.path.join(args.save_path, 'result.txt') with open(result_file_path, 'w') as out_file: for metric in result: out_file.write(metric + '\t' + str(result[metric]) + '\n') print('finish test') print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) def get_cards(): num = 0 cards = os.environ.get('CUDA_VISIBLE_DEVICES', '') if cards != '': num = len(cards.split(",")) return num if __name__ == '__main__': args = config.parse_args() config.print_arguments(args) check_cuda(args.use_cuda) if args.do_train: train(args) if args.do_test: test(args)
[]
[]
[ "CUDA_VISIBLE_DEVICES", "CPU_NUM" ]
[]
["CUDA_VISIBLE_DEVICES", "CPU_NUM"]
python
2
0
pkg/servicemesh/apis/servicemesh/v1/doc.go
// +k8s:deepcopy-gen=package // +k8s:defaulter-gen=TypeMeta // +groupName=maistra.io package v1
[]
[]
[]
[]
[]
go
null
null
null
vendor/github.com/anacrolix/utp/utp.go
// Package utp implements uTP, the micro transport protocol as used with // Bittorrent. It opts for simplicity and reliability over strict adherence to // the (poor) spec. It allows using the underlying OS-level transport despite // dispatching uTP on top to allow for example, shared socket use with DHT. // Additionally, multiple uTP connections can share the same OS socket, to // truly realize uTP's claim to be light on system and network switching // resources. // // Socket is a wrapper of net.UDPConn, and performs dispatching of uTP packets // to attached uTP Conns. Dial and Accept is done via Socket. Conn implements // net.Conn over uTP, via aforementioned Socket. package utp import ( "context" "errors" "fmt" "net" "os" "strconv" "sync" "time" pprofsync "github.com/anacrolix/sync" ) const ( // Maximum received SYNs that haven't been accepted. If more SYNs are // received, a pseudo randomly selected SYN is replied to with a reset to // make room. backlog = 50 // IPv6 min MTU is 1280, -40 for IPv6 header, and ~8 for fragment header? minMTU = 1438 // Why? // uTP header of 20, +2 for the next extension, and an optional selective // ACK. maxHeaderSize = 20 + 2 + (((maxUnackedInbound+7)/8)+3)/4*4 maxPayloadSize = minMTU - maxHeaderSize maxRecvSize = 0x2000 // Maximum out-of-order packets to buffer. maxUnackedInbound = 256 maxUnackedSends = 256 readBufferLen = 1 << 20 // ~1MiB // How long to wait before sending a state packet, after one is required. // This prevents spamming a state packet for every packet received, and // non-state packets that are being sent also fill the role. pendingSendStateDelay = 500 * time.Microsecond ) var ( sendBufferPool = sync.Pool{ New: func() interface{} { return make([]byte, minMTU) }, } // This is the latency we assume on new connections. It should be higher // than the latency we expect on most connections to prevent excessive // resending to peers that take a long time to respond, before we've got a // better idea of their actual latency. initialLatency time.Duration // If a write isn't acked within this period, destroy the connection. writeTimeout time.Duration // Assume the connection has been closed by the peer getting no packets of // any kind for this time. packetReadTimeout time.Duration ) func setDefaultDurations() { // An approximate upper bound for most connections across the world. initialLatency = 400 * time.Millisecond // Getting no reply for this period for a packet, we can probably rule out // latency and client lag. writeTimeout = 15 * time.Second // Somewhere longer than the BitTorrent grace period (90-120s), and less // than default TCP reset (4min). packetReadTimeout = 2 * time.Minute } func init() { setDefaultDurations() } // Strongly-type guarantee of resolved network address. type resolvedAddrStr string type read struct { data []byte from net.Addr } type syn struct { seq_nr, conn_id uint16 // net.Addr.String() of a Socket's real net.PacketConn. addr string } var ( mu pprofsync.RWMutex sockets = map[*Socket]struct{}{} logLevel = 0 artificialPacketDropChance = 0.0 ) func init() { logLevel, _ = strconv.Atoi(os.Getenv("GO_UTP_LOGGING")) fmt.Sscanf(os.Getenv("GO_UTP_PACKET_DROP"), "%f", &artificialPacketDropChance) } var ( errClosed = errors.New("closed") errTimeout net.Error = timeoutError{"i/o timeout"} errAckTimeout = timeoutError{"timed out waiting for ack"} ) type timeoutError struct { msg string } func (me timeoutError) Timeout() bool { return true } func (me timeoutError) Error() string { return me.msg } func (me timeoutError) Temporary() bool { return false } type st int func (me st) String() string { switch me { case stData: return "stData" case stFin: return "stFin" case stState: return "stState" case stReset: return "stReset" case stSyn: return "stSyn" default: panic(fmt.Sprintf("%d", me)) } } const ( stData st = 0 stFin = 1 stState = 2 stReset = 3 stSyn = 4 // Used for validating packet headers. stMax = stSyn ) type recv struct { seen bool data []byte Type st } // Attempt to connect to a remote uTP listener, creating a Socket just for // this connection. func Dial(addr string) (net.Conn, error) { return DialContext(context.Background(), addr) } // Same as Dial with a timeout parameter. Creates a Socket just for the // connection, which will be closed with the Conn is. To reuse another Socket, // see Socket.Dial. func DialContext(ctx context.Context, addr string) (nc net.Conn, err error) { s, err := NewSocket("udp", ":0") if err != nil { return } defer s.Close() return s.DialContext(ctx, addr) } // Listen creates listener Socket to accept incoming connections. func Listen(laddr string) (net.Listener, error) { return NewSocket("udp", laddr) } func nowTimestamp() uint32 { return uint32(time.Now().UnixNano() / int64(time.Microsecond)) } func seqLess(a, b uint16) bool { if b < 0x8000 { return a < b || a >= b-0x8000 } else { return a < b && a >= b-0x8000 } }
[ "\"GO_UTP_LOGGING\"", "\"GO_UTP_PACKET_DROP\"" ]
[]
[ "GO_UTP_PACKET_DROP", "GO_UTP_LOGGING" ]
[]
["GO_UTP_PACKET_DROP", "GO_UTP_LOGGING"]
go
2
0
api/api/wsgi.py
""" WSGI config for api project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py
import numpy as np import sys import os import re import ntpath from subprocess import call #### DISCLAIMER: This script uses the `pythonSubmit.py` format #### that has been replaced by the `runSubmit.py` and #### `compasConfigDefault.yaml` combo as of v02.25.10. #### The `pythonSubmit.py` format will eventually become deprecated. # Check if we are using python 3 python_version = sys.version_info[0] print("python_version =", python_version) class pythonProgramOptions: """ A class to store and access COMPAS program options in python """ # Do './COMPAS --help' to see all options #-- Define variables # environment variable COMPAS_EXECUTABLE_PATH is used for docker runs # if COMPAS_EXECUTABLE_PATH is not set (== None) we assume this is an # interactive run with python3 # if COMPAS_EXECUTABLE_PATH is set (!= None) we assume this is a run # inside a docker container - we have different directories inside a # docker container (src, obj, bin), and the COMPAS executable resides # in the bin directory (rather than the src directory) compas_executable_override = os.environ.get('COMPAS_EXECUTABLE_PATH') if (compas_executable_override is None): # we should fix this one day - we should not assume that the COMPAS executable # is in the 'src' directory. The standard is to put the object files created # by the compile into the 'obj' directory, and the executable files created by # the link in the 'bin' directory. # # for now though, because this is how everybody expects it to be, we'll just check # that the path to the root directory (the parent directory of the directory in # which we expect the executable to reside - for now, 'src') is set to something. compas_root_dir = os.environ.get('COMPAS_ROOT_DIR') assert compas_root_dir is not None, "Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists." # construct path to executable # # ideally we wouldn't have the 'src' directory name (or any other directory name) # prepended to the executable name - if we just execute the executable name on its # own, as long as the user navigates to the directory in which the executable resides # they don't need to set the COMPAS_ROOT_DIR environment variable compas_executable = os.path.join(compas_root_dir, 'src/COMPAS') else: compas_executable = compas_executable_override # check that a file with the correct name exists where we expect it to assert os.path.isfile(compas_executable), "Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists." enable_warnings = False # option to enable/disable warning messages number_of_systems = 10 # number of systems per batch populationPrinting = False randomSeedFileName = 'randomSeed.txt' if os.path.isfile(randomSeedFileName): random_seed = int(np.loadtxt(randomSeedFileName)) else: random_seed = 0 # If you want a random seed, use: np.random.randint(2,2**63-1) # environment variable COMPAS_LOGS_OUTPUT_DIR_PATH is used primarily for docker runs # if COMPAS_LOGS_OUTPUT_DIR_PATH is set (!= None) it is used as the value for the # --output-path option # if COMPAS_LOGS_OUTPUT_DIR_PATH is not set (== None) the current working directory # is used as the value for the --output-path option compas_logs_output_override = os.environ.get('COMPAS_LOGS_OUTPUT_DIR_PATH') if (compas_logs_output_override is None): output = os.getcwd() output_container = None # names the directory to be created and in which log files are created. Default in COMPAS is "COMPAS_Output" else: output = compas_logs_output_override output_container = None # environment variable COMPAS_INPUT_DIR_PATH is used primarily for docker runs # if COMPAS_INPUT_DIR_PATH is set (!= None) it is prepended to input filenames # (such as grid_filename and logfile_definitions) # if COMPAS_INPUT_DIR_PATH is not set (== None) the current working directory # is prepended to input filenames compas_input_path_override = os.environ.get('COMPAS_INPUT_DIR_PATH') #-- option to make a grid of hyperparameter values at which to produce populations. #-- If this is set to true, it will divide the number_of_binaries parameter equally #-- amoungst the grid points (as closely as possible). See the hyperparameterGrid method below #-- for more details. If this is set to True, some hyperparameter values defined in this method'gridOutputs/'+str(i) #-- will be overwritten hyperparameterGrid = False hyperparameterList = False shareSeeds = False notes_hdrs = None # no annotations header strings (no annotations) notes = None # no annotations mode = 'BSE' # evolving single stars (SSE) or binaries (BSE)? grid_filename = 'grid.txt' # grid file name (e.g. 'mygrid.txt') if grid_filename != None: # if the grid filename supplied is already fully-qualified, leave it as is head, tail = ntpath.split(grid_filename) # split into pathname and base filename if head == '' or head == '.': # no path (or CWD) - add path as required grid_filename = tail or ntpath.basename(head) if compas_input_path_override == None: grid_filename = os.getcwd() + '/' + grid_filename.strip("'\"") else: grid_filename = compas_input_path_override + '/' + grid_filename.strip("'\"") logfile_definitions = None # logfile record definitions file name (e.g. 'logdefs.txt') if logfile_definitions != None: # if the grid filename supplied is already fully-qualified, leave it as is head, tail = ntpath.split(logfile_definitions) # split into pathname and base filename if head == '' or head == '.': # no path (or CWD) - add path as required logfile_definitions = tail or ntpath.basename(head) if compas_input_path_override == None: logfile_definitions = os.getcwd() + '/' + logfile_definitions.strip("'\"") else: logfile_definitions = compas_input_path_override + '/' + logfile_definitions.strip("'\"") initial_mass = None # initial mass for SSE initial_mass_1 = None # primary initial mass for BSE initial_mass_2 = None # secondary initial mass for BSE mass_ratio = None eccentricity = None # eccentricity for BSE semi_major_axis = None # semi-major axis for BSE orbital_period = None # orbital period for BSE use_mass_loss = True mass_transfer = True detailed_output = True # WARNING: this creates a data heavy file RLOFPrinting = True evolve_unbound_systems = False quiet = False metallicity = 0.0142 # metallicity for both SSE and BSE - Solar metallicity Asplund+2010 allow_rlof_at_birth = True # allow binaries that have one or both stars in RLOF at birth to evolve? allow_touching_at_birth = False # record binaries that have stars touching at birth in output files? chemically_homogeneous_evolution = 'PESSIMISTIC' # chemically homogeneous evolution. Options are 'NONE', 'OPTIMISTIC' and 'PESSIMISTIC' switch_log = False common_envelope_alpha = 1.0 common_envelope_lambda = 0.1 # Only if using 'LAMBDA_FIXED' common_envelope_lambda_prescription = 'LAMBDA_NANJING' # Xu & Li 2010 common_envelope_slope_Kruckow = -5.0/6.0 stellar_zeta_prescription = 'SOBERMAN' common_envelope_revised_energy_formalism = False common_envelope_maximum_donor_mass_revised_energy_formalism = 2.0 common_envelope_recombination_energy_density = 1.5E13 common_envelope_alpha_thermal = 1.0 # lambda = alpha_th*lambda_b + (1-alpha_th)*lambda_g common_envelope_lambda_multiplier = 1.0 # Multiply common envelope lambda by some constant common_envelope_allow_main_sequence_survive = True # Allow main sequence stars to survive CE. Was previously False by default common_envelope_mass_accretion_prescription = 'ZERO' common_envelope_mass_accretion_min = 0.04 # For 'MACLEOD+2014' [Msol] common_envelope_mass_accretion_max = 0.10 # For 'MACLEOD+2014' [Msol] envelope_state_prescription = 'LEGACY' common_envelope_allow_radiative_envelope_survive = False common_envelope_allow_immediate_RLOF_post_CE_survive = False mass_loss_prescription = 'VINK' luminous_blue_variable_prescription = 'HURLEY_ADD' luminous_blue_variable_multiplier = 1.5 overall_wind_mass_loss_multiplier = 1.0 wolf_rayet_multiplier = 1.0 cool_wind_mass_loss_multiplier = 1.0 check_photon_tiring_limit = False circularise_binary_during_mass_transfer = True angular_momentum_conservation_during_circularisation = False mass_transfer_angular_momentum_loss_prescription = 'ISOTROPIC' mass_transfer_accretion_efficiency_prescription = 'THERMAL' mass_transfer_fa = 0.5 # Only if using mass_transfer_accretion_efficiency_prescription = 'FIXED' mass_transfer_jloss = 1.0 # Only if using mass_transfer_angular_momentum_loss_prescription = 'FIXED' mass_transfer_rejuvenation_prescription = 'STARTRACK' mass_transfer_thermal_limit_accretor= 'CFACTOR' mass_transfer_thermal_limit_C= 10.0 eddington_accretion_factor = 1 # multiplication Factor for eddington accretion onto NS&BH case_BB_stability_prescription = 'ALWAYS_STABLE' zeta_Main_Sequence = 2.0 zeta_Radiative_Envelope_Giant = 6.5 maximum_evolution_time = 13700.0 # Maximum physical time a system can be evolved [Myrs] maximum_number_timesteps = 99999 timestep_multiplier = 0.1 # Optional multiplier relative to default time step duration initial_mass_function = 'KROUPA' initial_mass_min = 5.0 # Use 1.0 for LRNe, 5.0 for DCOs [Msol] initial_mass_max = 150.0 # Stellar tracks extrapolated above 50 Msol (Hurley+2000) [Msol] initial_mass_power = 0.0 semi_major_axis_distribution = 'FLATINLOG' semi_major_axis_min = 0.01 # [AU] semi_major_axis_max = 1000.0 # [AU] orbital_period_distribution = 'FLATINLOG' orbital_period_min = 1.1 # [days] orbital_period_max = 1000 # [days] mass_ratio_distribution = 'FLAT' mass_ratio_min = 0.01 mass_ratio_max = 1.0 minimum_secondary_mass = 0.1 # Brown dwarf limit [Msol] eccentricity_distribution = 'ZERO' eccentricity_min = 0.0 eccentricity_max = 1.0 metallicity_distribution = 'ZSOLAR' metallicity_min = 0.0001 metallicity_max = 0.03 pulsar_birth_magnetic_field_distribution = 'ZERO' pulsar_birth_magnetic_field_min = 11.0 # [log10(B/G)] pulsar_birth_magnetic_field_max = 13.0 # [log10(B/G)] pulsar_birth_spin_period_distribution = "ZERO" pulsar_birth_spin_period_min = 10.0 # [ms] pulsar_birth_spin_period_max = 100.0 # [ms] pulsar_magnetic_field_decay_timescale = 1000.0 # [Myr] pulsar_magnetic_field_decay_massscale = 0.025 # [Msol] pulsar_minimum_magnetic_field = 8.0 # [log10(B/G)] evolvePulsars = False rotational_velocity_distribution = 'ZERO' neutron_star_equation_of_state = 'SSE' neutrino_mass_loss_BH_formation = "FIXED_MASS" # "FIXED_FRACTION" neutrino_mass_loss_BH_formation_value = 0.1 # Either fraction or mass (Msol) to lose remnant_mass_prescription = 'FRYER2012' # fryer_supernova_engine = 'DELAYED' black_hole_kicks = 'FALLBACK' kick_magnitude_distribution = 'MAXWELLIAN' kick_magnitude_sigma_CCSN_NS = 265.0 # [km/s] kick_magnitude_sigma_CCSN_BH = 265.0 # [km/s] kick_magnitude_sigma_ECSN = 30.0 # [km/s] kick_magnitude_sigma_USSN = 30.0 # [km/s] fix_dimensionless_kick_magnitude = -1 kick_direction = 'ISOTROPIC' kick_direction_power = 0.0 kick_scaling_factor = 1.0 kick_magnitude_maximum = -1.0 kick_magnitude_random = None # (SSE) used to draw the kick magnitude for the star should it undergo a supernova event kick_magnitude = None # (SSE) (drawn) kick magnitude for the star should it undergo a supernova event [km/s] kick_magnitude_random_1 = None # (BSE) used to draw the kick magnitude for the primary star should it undergo a supernova event kick_magnitude_1 = None # (BSE) (drawn) kick magnitude for the primary star should it undergo a supernova event [km/s] kick_theta_1 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the primary star should it undergo a supernova event [radians] kick_phi_1 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the primary star should it undergo a supernova event [radians] kick_mean_anomaly_1 = None # (BSE) mean anomaly at the instant of the supernova for the primary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians] kick_magnitude_random_2 = None # (BSE) used to draw the kick velocity for the secondary star should it undergo a supernova event kick_magnitude_2 = None # (BSE) (drawn) kick magnitude for the secondary star should it undergo a supernova event [km/s] kick_theta_2 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the secondary star should it undergo a supernova event [radians] kick_phi_2 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the secondary star should it undergo a supernova event [radians] kick_mean_anomaly_2 = None # (BSE) mean anomaly at the instant of the supernova for the secondary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians] muller_mandel_kick_multiplier_BH = 200.0 # scaling prefactor for BH kicks when using the 'MULLERMANDEL' kick magnitude distribution muller_mandel_kick_multiplier_NS = 400.0 # scaling prefactor for NS kicks when using the 'MULLERMANDEL' kick magnitude distribution pair_instability_supernovae = True PISN_lower_limit = 60.0 # Minimum core mass for PISN [Msol] PISN_upper_limit = 135.0 # Maximum core mass for PISN [Msol] pulsation_pair_instability = True PPI_lower_limit = 35.0 # Minimum core mass for PPI [Msol] PPI_upper_limit = 60.0 # Maximum core mass for PPI [Msol] pulsational_pair_instability_prescription = 'MARCHANT' maximum_neutron_star_mass = 2.5 # [Msol] add_options_to_sysparms = 'GRID' # should all option values be added to system parameters files? options are 'ALWAYS', 'GRID', and 'NEVER' log_level = 0 log_classes = [] debug_level = 0 debug_classes = [] logfile_name_prefix = None logfile_type = 'HDF5' hdf5_chunk_size = 100000 hdf5_buffer_size = 1 # set the logfile names here # # set to None (e.g. logfile_BSE_supernovae = None) to use the default filename # set to a string (e.g. logfile_BSE_supernovae = 'mySNfilename') to use that string as the filename # set to empty string (e.g. logfile_BSE_supernovae = '""') to disable logging for that file (the file will not be created) # # We don't really need the 'BSE' or 'SSE' prefixes any more - they were put there because # prior to the implementation of the containing folder it was too hard to locate the files # created by a COMPAS run - especially the detailed output files. Now that the output # files are created inside a containing folder for each run there is really no need for # the prefixes - and if we don't have the prefixes we can share some of the options # (e.g. specifying the supernovae filename doesn't need to have separate options for # SSE and BSE - we really just need one (we only ever run in one mode or the other)) # # For now though, I'll leave them as is - we can change this when (if) we decide to # drop the prefixes logfile_common_envelopes = None logfile_detailed_output = None logfile_double_compact_objects = None logfile_rlof_parameters = None logfile_pulsar_evolution = None logfile_supernovae = None logfile_switch_log = None logfile_system_parameters = None debug_to_file = False errors_to_file = False def booleanChoices(self): booleanChoices = [ self.enable_warnings, self.use_mass_loss, self.mass_transfer, self.detailed_output, self.evolve_unbound_systems, self.populationPrinting, self.RLOFPrinting, self.circularise_binary_during_mass_transfer, self.angular_momentum_conservation_during_circularisation, self.pair_instability_supernovae, self.pulsation_pair_instability, self.quiet, self.common_envelope_allow_main_sequence_survive, self.common_envelope_allow_radiative_envelope_survive, self.common_envelope_allow_immediate_RLOF_post_CE_survive, self.evolvePulsars, self.debug_to_file, self.errors_to_file, self.allow_rlof_at_birth, self.allow_touching_at_birth, self.switch_log, self.check_photon_tiring_limit ] return booleanChoices def booleanCommands(self): booleanCommands = [ '--enable-warnings', '--use-mass-loss', '--mass-transfer', '--detailed-output', '--evolve-unbound-systems', '--population-data-printing', '--rlof-printing', '--circularise-binary-during-mass-transfer', '--angular-momentum-conservation-during-circularisation', '--pair-instability-supernovae', '--pulsational-pair-instability', '--quiet', '--common-envelope-allow-main-sequence-survive', '--common-envelope-allow-radiative-envelope-survive', '--common-envelope-allow-immediate-rlof-post-ce-survive', '--evolve-pulsars', '--debug-to-file', '--errors-to-file', '--allow-rlof-at-birth', '--allow-touching-at-birth', '--switch-log', '--check-photon-tiring-limit' ] return booleanCommands def numericalChoices(self): numericalChoices = [ self.number_of_systems, self.initial_mass, self.initial_mass_1, self.initial_mass_2, self.eccentricity, self.semi_major_axis, self.orbital_period, self.metallicity, self.common_envelope_alpha, self.common_envelope_lambda, self.common_envelope_slope_Kruckow, self.common_envelope_alpha_thermal, self.common_envelope_lambda_multiplier, self.luminous_blue_variable_multiplier, self.overall_wind_mass_loss_multiplier, self.wolf_rayet_multiplier, self.cool_wind_mass_loss_multiplier, self.mass_transfer_fa, self.mass_transfer_jloss, self.maximum_evolution_time, self.maximum_number_timesteps, self.timestep_multiplier, self.initial_mass_min, self.initial_mass_max, self.initial_mass_power, self.semi_major_axis_min, self.semi_major_axis_max, self.mass_ratio, self.mass_ratio_min, self.mass_ratio_max, self.minimum_secondary_mass, self.eccentricity_min, self.eccentricity_max, self.metallicity_min, self.metallicity_max, self.pulsar_birth_magnetic_field_min, self.pulsar_birth_magnetic_field_max, self.pulsar_birth_spin_period_min, self.pulsar_birth_spin_period_max, self.pulsar_magnetic_field_decay_timescale, self.pulsar_magnetic_field_decay_massscale, self.pulsar_minimum_magnetic_field, self.orbital_period_min, self.orbital_period_max, self.kick_magnitude_sigma_CCSN_NS, self.kick_magnitude_sigma_CCSN_BH, self.fix_dimensionless_kick_magnitude, self.kick_direction_power, self.random_seed, self.mass_transfer_thermal_limit_C, self.eddington_accretion_factor, self.PISN_lower_limit, self.PISN_upper_limit, self.PPI_lower_limit, self.PPI_upper_limit, self.maximum_neutron_star_mass, self.kick_magnitude_sigma_ECSN, self.kick_magnitude_sigma_USSN, self.kick_scaling_factor, self.common_envelope_maximum_donor_mass_revised_energy_formalism, self.common_envelope_recombination_energy_density, self.common_envelope_mass_accretion_max, self.common_envelope_mass_accretion_min, self.zeta_Main_Sequence, self.zeta_Radiative_Envelope_Giant, self.kick_magnitude_maximum, self.kick_magnitude_random, self.kick_magnitude, self.kick_magnitude_random_1, self.kick_magnitude_1, self.kick_theta_1, self.kick_phi_1, self.kick_mean_anomaly_1, self.kick_magnitude_random_2, self.kick_magnitude_2, self.kick_theta_2, self.kick_phi_2, self.kick_mean_anomaly_2, self.muller_mandel_kick_multiplier_BH, self.muller_mandel_kick_multiplier_NS, self.log_level, self.debug_level, self.hdf5_chunk_size, self.hdf5_buffer_size, self.neutrino_mass_loss_BH_formation_value ] return numericalChoices def numericalCommands(self): numericalCommands = [ '--number-of-systems', '--initial-mass', '--initial-mass-1', '--initial-mass-2', '--eccentricity', '--semi-major-axis', '--orbital-period', '--metallicity', '--common-envelope-alpha', '--common-envelope-lambda', '--common-envelope-slope-kruckow', '--common-envelope-alpha-thermal', '--common-envelope-lambda-multiplier', '--luminous-blue-variable-multiplier', '--overall-wind-mass-loss-multiplier', '--wolf-rayet-multiplier', '--cool-wind-mass-loss-multiplier', '--mass-transfer-fa', '--mass-transfer-jloss', '--maximum-evolution-time', '--maximum-number-timestep-iterations', '--timestep-multiplier', '--initial-mass-min', '--initial-mass-max', '--initial-mass-power', '--semi-major-axis-min', '--semi-major-axis-max', '--mass-ratio', '--mass-ratio-min', '--mass-ratio-max', '--minimum-secondary-mass', '--eccentricity-min', '--eccentricity-max', '--metallicity-min', '--metallicity-max', '--pulsar-birth-magnetic-field-distribution-min', '--pulsar-birth-magnetic-field-distribution-max', '--pulsar-birth-spin-period-distribution-min', '--pulsar-birth-spin-period-distribution-max', '--pulsar-magnetic-field-decay-timescale', '--pulsar-magnetic-field-decay-massscale', '--pulsar-minimum-magnetic-field', '--orbital-period-min', '--orbital-period-max', '--kick-magnitude-sigma-CCSN-NS', '--kick-magnitude-sigma-CCSN-BH', '--fix-dimensionless-kick-magnitude', '--kick-direction-power', '--random-seed', '--mass-transfer-thermal-limit-C', '--eddington-accretion-factor', '--pisn-lower-limit', '--pisn-upper-limit', '--ppi-lower-limit', '--ppi-upper-limit', '--maximum-neutron-star-mass', '--kick-magnitude-sigma-ECSN', '--kick-magnitude-sigma-USSN', '--kick-scaling-factor', '--maximum-mass-donor-nandez-ivanova', '--common-envelope-recombination-energy-density', '--common-envelope-mass-accretion-max', '--common-envelope-mass-accretion-min', '--zeta-main-sequence', '--zeta-radiative-envelope-giant', '--kick-magnitude-max', '--kick-magnitude-random', '--kick-magnitude', '--kick-magnitude-random-1', '--kick-magnitude-1', '--kick-theta-1', '--kick-phi-1', '--kick-mean-anomaly-1', '--kick-magnitude-random-2', '--kick-magnitude-2', '--kick-theta-2', '--kick-phi-2', '--kick-mean-anomaly-2', '--muller-mandel-kick-multiplier-BH', '--muller-mandel-kick-multiplier-NS', '--log-level', '--debug-level', '--hdf5-chunk-size', '--hdf5-buffer-size', '--neutrino-mass-loss-BH-formation-value' ] return numericalCommands def stringChoices(self): stringChoices = [ self.notes_hdrs, self.notes, self.mode, self.case_BB_stability_prescription, self.chemically_homogeneous_evolution, self.luminous_blue_variable_prescription, self.mass_loss_prescription, self.mass_transfer_angular_momentum_loss_prescription, self.mass_transfer_accretion_efficiency_prescription, self.mass_transfer_rejuvenation_prescription, self.initial_mass_function, self.semi_major_axis_distribution, self.orbital_period_distribution, self.mass_ratio_distribution, self.eccentricity_distribution, self.metallicity_distribution, self.rotational_velocity_distribution, self.remnant_mass_prescription, self.fryer_supernova_engine, self.black_hole_kicks, self.kick_magnitude_distribution, self.kick_direction, self.output, self.output_container, self.common_envelope_lambda_prescription, self.stellar_zeta_prescription, self.mass_transfer_thermal_limit_accretor, self.pulsational_pair_instability_prescription, self.neutron_star_equation_of_state, self.pulsar_birth_magnetic_field_distribution, self.pulsar_birth_spin_period_distribution, self.common_envelope_mass_accretion_prescription, self.envelope_state_prescription, self.logfile_name_prefix, self.logfile_type, self.logfile_definitions, self.grid_filename, self.logfile_common_envelopes, self.logfile_detailed_output, self.logfile_double_compact_objects, self.logfile_pulsar_evolution, self.logfile_rlof_parameters, self.logfile_supernovae, self.logfile_switch_log, self.logfile_system_parameters, self.neutrino_mass_loss_BH_formation, self.add_options_to_sysparms ] return stringChoices def stringCommands(self): stringCommands = [ '--notes-hdrs', '--notes', '--mode', '--case-BB-stability-prescription', '--chemically-homogeneous-evolution', '--luminous-blue-variable-prescription', '--mass-loss-prescription', '--mass-transfer-angular-momentum-loss-prescription', '--mass-transfer-accretion-efficiency-prescription', '--mass-transfer-rejuvenation-prescription', '--initial-mass-function', '--semi-major-axis-distribution', '--orbital-period-distribution', '--mass-ratio-distribution', '--eccentricity-distribution', '--metallicity-distribution', '--rotational-velocity-distribution', '--remnant-mass-prescription', '--fryer-supernova-engine', '--black-hole-kicks', '--kick-magnitude-distribution', '--kick-direction', '--output-path', '--output-container', '--common-envelope-lambda-prescription', '--stellar-zeta-prescription', '--mass-transfer-thermal-limit-accretor', '--pulsational-pair-instability-prescription', '--neutron-star-equation-of-state', '--pulsar-birth-magnetic-field-distribution', '--pulsar-birth-spin-period-distribution', '--common-envelope-mass-accretion-prescription', '--envelope-state-prescription', '--logfile-name-prefix', '--logfile-type', '--logfile-definitions', '--grid', '--logfile-common-envelopes', '--logfile-detailed-output', '--logfile-double-compact-objects', '--logfile-pulsar-evolution', '--logfile-rlof-parameters', '--logfile-supernovae', '--logfile-switch-log', '--logfile-system-parameters', '--neutrino-mass-loss-BH-formation', '--add-options-to-sysparms' ] return stringCommands def listChoices(self): listChoices = [ self.log_classes, self.debug_classes ] return listChoices def listCommands(self): listCommands = [ '--log-classes', '--debug-classes' ] return listCommands def generateCommandLineOptionsDict(self): """ This function generates a dictionary mapping COMPAS options to their specified values (or empty strings for boolean options). These can be combined into a string and run directly as a terminal command, or passed to the stroopwafel interface where some of them may be overwritten. Options not to be included in the command line should be set to pythons None (except booleans, which should be set to False) Parameters ----------- self : pythonProgramOptions Contains program options Returns -------- commands : str or list of strs """ booleanChoices = self.booleanChoices() booleanCommands = self.booleanCommands() nBoolean = len(booleanChoices) assert len(booleanCommands) == nBoolean numericalChoices = self.numericalChoices() numericalCommands = self.numericalCommands() nNumerical = len(numericalChoices) assert len(numericalCommands) == nNumerical stringChoices = self.stringChoices() stringCommands = self.stringCommands() nString = len(stringChoices) assert len(stringCommands) == nString listChoices = self.listChoices() listCommands = self.listCommands() nList = len(listChoices) assert len(listCommands) == nList ### Collect all options into a dictionary mapping option name to option value command = {'compas_executable' : self.compas_executable} for i in range(nBoolean): if booleanChoices[i] == True: command.update({booleanCommands[i] : ''}) elif booleanChoices[i] == False: command.update({booleanCommands[i] : 'False'}) for i in range(nNumerical): if not numericalChoices[i] == None: command.update({numericalCommands[i] : str(numericalChoices[i])}) for i in range(nString): if not stringChoices[i] == None: command.update({stringCommands[i] : cleanStringParameter(stringChoices[i])}) for i in range(nList): if listChoices[i]: command.update({listCommands[i] : ' '.join(map(str,listChoices[i]))}) return command def combineCommandLineOptionsDictIntoShellCommand(commandOptions): """ Write out the compas input parameters into a shell string. Ensure the Compas executable is first, and not repeated. Options are non-ordered. """ shellCommand = commandOptions['compas_executable'] del commandOptions['compas_executable'] for key, val in commandOptions.items(): shellCommand += ' ' + key + ' ' + val return shellCommand def cleanStringParameter(str_param): """ clean up string parameters to avoid confusing Boost """ if str_param is not None: # strip any quotes from the ends of the string str_param = str_param.strip("'\"") # escape any unescaped spaces or quotes within the string escapes = [" ", "'", "\""] for escape in escapes: str_param = re.sub(r"(?<!\\){}".format(escape), r"\{}".format(escape), str_param) return str_param if __name__ == "__main__": #-- Get the program options programOptions = pythonProgramOptions() commandOptions = programOptions.generateCommandLineOptionsDict() #-- Convert options into a shell string shellCommand = combineCommandLineOptionsDictIntoShellCommand(commandOptions) #-- Run exectute COMPAS shell string print(shellCommand) call(shellCommand,shell=True)
[]
[]
[ "COMPAS_INPUT_DIR_PATH", "COMPAS_EXECUTABLE_PATH", "COMPAS_ROOT_DIR", "COMPAS_LOGS_OUTPUT_DIR_PATH" ]
[]
["COMPAS_INPUT_DIR_PATH", "COMPAS_EXECUTABLE_PATH", "COMPAS_ROOT_DIR", "COMPAS_LOGS_OUTPUT_DIR_PATH"]
python
4
0
profile_test.go
package gosquare import ( "os" "testing" ) func TestGetProfile(t *testing.T) { api := new(Profile) api.Get(os.Getenv("FOURSQUARE_TOKEN")) if api.Meta.Code != 200 { t.Errorf("Has errors %v", api) } }
[ "\"FOURSQUARE_TOKEN\"" ]
[]
[ "FOURSQUARE_TOKEN" ]
[]
["FOURSQUARE_TOKEN"]
go
1
0
src/main/java/com/vicchern/deliveryservice/security/AuthenticationFilter.java
package com.vicchern.deliveryservice.security; import javax.servlet.FilterChain; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.xml.bind.DatatypeConverter; import java.io.IOException; import java.util.ArrayList; import java.util.List; import io.jsonwebtoken.Claims; import io.jsonwebtoken.Jwts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.AuthenticationException; import org.springframework.security.core.GrantedAuthority; import org.springframework.security.core.authority.SimpleGrantedAuthority; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.web.authentication.AbstractAuthenticationProcessingFilter; import org.springframework.security.web.util.matcher.RequestMatcher; import static org.springframework.http.HttpHeaders.AUTHORIZATION; public class AuthenticationFilter extends AbstractAuthenticationProcessingFilter { private static final Logger logger = LoggerFactory.getLogger(AuthenticationFilter.class); AuthenticationFilter(final RequestMatcher requiresAuth) { super(requiresAuth); } @Override public Authentication attemptAuthentication(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) throws AuthenticationException, IOException { String token = httpServletRequest.getHeader(AUTHORIZATION); List<GrantedAuthority> roles = new ArrayList<>(); Claims claims = Jwts.parser() .setSigningKey(DatatypeConverter.parseBase64Binary(System.getenv("DeliveryServiceSecurityKey"))) .parseClaimsJws(token).getBody(); String authorities = claims.get("authorities").toString(); if (authorities.contains("ROLE_USER")) { roles.add(new SimpleGrantedAuthority("ROLE_USER")); } if (authorities.contains("ROLE_TENANT")) { roles.add(new SimpleGrantedAuthority("ROLE_TENANT")); } if (authorities.contains("ROLE_ACTOR")) { roles.add(new SimpleGrantedAuthority("ROLE_ACTOR")); } if (authorities.contains("ROLE_ADMIN")) { roles.add(new SimpleGrantedAuthority("ROLE_ADMIN")); } Authentication requestAuthentication = new UsernamePasswordAuthenticationToken(claims.get("email"), null, roles); return getAuthenticationManager().authenticate(requestAuthentication); } @Override protected void successfulAuthentication(final HttpServletRequest request, final HttpServletResponse response, final FilterChain chain, final Authentication authResult) throws IOException, ServletException { SecurityContextHolder.getContext().setAuthentication(authResult); chain.doFilter(request, response); } }
[ "\"DeliveryServiceSecurityKey\"" ]
[]
[ "DeliveryServiceSecurityKey" ]
[]
["DeliveryServiceSecurityKey"]
java
1
0
data/rapid_bq_to_pg.py
from google.cloud import bigquery from sqlalchemy import create_engine from io import StringIO import csv import numpy as np import os from decouple import config from score_update import sentiment_score import tqdm os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = config('GOOGLE_APPLICATION_CREDENTIALS') client = bigquery.Client() hn_dataset_ref = client.dataset('hacker_news', project='bigquery-public-data') comment_ref = hn_dataset_ref.table('comments') comments = client.get_table(comment_ref) engine = create_engine('postgres://<user>:<password>@<host>:5432/<dbname>') def psql_insert_copy(table, conn, keys, data_iter): """Uses postgres insert method to convert a dataframe to a csv and copy directly into a sql table. Refs: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#io-sql-method https://www.postgresql.org/docs/current/static/sql-copy.html """ # gets a DBAPI connection that can provide a cursor dbapi_conn = conn.connection with dbapi_conn.cursor() as cur: s_buf = StringIO() writer = csv.writer(s_buf) writer.writerows(data_iter) s_buf.seek(0) columns = ', '.join('"{}"'.format(k) for k in keys) if table.schema: table_name = '{}.{}'.format(table.schema, table.name) else: table_name = table.name sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format( table_name, columns) cur.copy_expert(sql=sql, file=s_buf) def rapid_merge(client, ds_ref, table_name='comments', n_rows=30000, range=5): """ Copy portions of a database num_rows long into another database. Resulting tables can be UNION'd to create the final table equal to the initial table. """ for n in tqdm.tqdm(range): df = client.list_rows(table_name, max_results=n_rows, start_index=n * n_rows).to_dataframe() df['score'] = df['text'].apply(sentiment_score) df.to_sql(f'table_{n:02}', engine, method=psql_insert_copy)
[]
[]
[ "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_APPLICATION_CREDENTIALS"]
python
1
0
IPython/frontend/terminal/interactiveshell.py
# -*- coding: utf-8 -*- """Subclass of InteractiveShell for terminal based frontends.""" #----------------------------------------------------------------------------- # Copyright (C) 2001 Janko Hauser <[email protected]> # Copyright (C) 2001-2007 Fernando Perez. <[email protected]> # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function import bdb import os import re import sys import textwrap # We need to use nested to support python 2.6, once we move to >=2.7, we can # use the with keyword's new builtin support for nested managers try: from contextlib import nested except: from IPython.utils.nested_context import nested from IPython.core.error import TryNext, UsageError from IPython.core.usage import interactive_usage, default_banner from IPython.core.inputsplitter import IPythonInputSplitter from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC from IPython.core.magic import Magics, magics_class, line_magic from IPython.testing.skipdoctest import skip_doctest from IPython.utils.encoding import get_stream_enc from IPython.utils import py3compat from IPython.utils.terminal import toggle_set_term_title, set_term_title from IPython.utils.process import abbrev_cwd from IPython.utils.warn import warn, error from IPython.utils.text import num_ini_spaces, SList, strip_email_quotes from IPython.utils.traitlets import Integer, CBool, Unicode #----------------------------------------------------------------------------- # Utilities #----------------------------------------------------------------------------- def get_default_editor(): try: ed = os.environ['EDITOR'] except KeyError: if os.name == 'posix': ed = 'vi' # the only one guaranteed to be there! else: ed = 'notepad' # same in Windows! return ed def get_pasted_lines(sentinel, l_input=py3compat.input): """ Yield pasted lines until the user enters the given sentinel value. """ print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \ % sentinel) while True: try: l = l_input(':') if l == sentinel: return else: yield l except EOFError: print('<EOF>') return #------------------------------------------------------------------------ # Terminal-specific magics #------------------------------------------------------------------------ @magics_class class TerminalMagics(Magics): def __init__(self, shell): super(TerminalMagics, self).__init__(shell) self.input_splitter = IPythonInputSplitter(input_mode='line') def cleanup_input(self, block): """Apply all possible IPython cleanups to an input block. This means: - remove any global leading whitespace (dedent) - remove any email quotes ('>') if they are present in *all* lines - apply all static inputsplitter transforms and break into sub-blocks - apply prefilter() to each sub-block that is a single line. Parameters ---------- block : str A possibly multiline input string of code. Returns ------- transformed block : str The input, with all transformations above applied. """ # We have to effectively implement client-side the loop that is done by # the terminal frontend, and furthermore do it on a block that can # possibly contain multiple statments pasted in one go. # First, run the input through the block splitting code. We should # eventually make this a self-contained method in the inputsplitter. isp = self.input_splitter isp.reset() b = textwrap.dedent(block) # Remove email quotes first. These must be consistently applied to # *all* lines to be removed b = strip_email_quotes(b) # Split the input into independent sub-blocks so we can later do # prefiltering (which must be done *only* to single-line inputs) blocks = [] last_block = [] for line in b.splitlines(): isp.push(line) last_block.append(line) if not isp.push_accepts_more(): blocks.append(isp.source_reset()) last_block = [] if last_block: blocks.append('\n'.join(last_block)) # Now, apply prefiltering to any one-line block to match the behavior # of the interactive terminal final_blocks = [] for block in blocks: lines = block.splitlines() if len(lines) == 1: final_blocks.append(self.shell.prefilter(lines[0])) else: final_blocks.append(block) # We now have the final version of the input code as a list of blocks, # with all inputsplitter transformations applied and single-line blocks # run through prefilter. For further processing, turn into a single # string as the rest of our apis use string inputs. return '\n'.join(final_blocks) def store_or_execute(self, block, name): """ Execute a block, or store it in a variable, per the user's request. """ b = self.cleanup_input(block) if name: # If storing it for further editing self.shell.user_ns[name] = SList(b.splitlines()) print("Block assigned to '%s'" % name) else: self.shell.user_ns['pasted_block'] = b self.shell.using_paste_magics = True try: self.shell.run_cell(b) finally: self.shell.using_paste_magics = False def rerun_pasted(self, name='pasted_block'): """ Rerun a previously pasted command. """ b = self.shell.user_ns.get(name) # Sanity checks if b is None: raise UsageError('No previous pasted block available') if not isinstance(b, basestring): raise UsageError( "Variable 'pasted_block' is not a string, can't execute") print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b))) self.shell.run_cell(b) @line_magic def autoindent(self, parameter_s = ''): """Toggle autoindent on/off (if available).""" self.shell.set_autoindent() print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent]) @skip_doctest @line_magic def cpaste(self, parameter_s=''): """Paste & execute a pre-formatted code block from clipboard. You must terminate the block with '--' (two minus-signs) or Ctrl-D alone on the line. You can also provide your own sentinel with '%paste -s %%' ('%%' is the new sentinel for this operation) The block is dedented prior to execution to enable execution of method definitions. '>' and '+' characters at the beginning of a line are ignored, to allow pasting directly from e-mails, diff files and doctests (the '...' continuation prompt is also stripped). The executed block is also assigned to variable named 'pasted_block' for later editing with '%edit pasted_block'. You can also pass a variable name as an argument, e.g. '%cpaste foo'. This assigns the pasted block to variable 'foo' as string, without dedenting or executing it (preceding >>> and + is still stripped) '%cpaste -r' re-executes the block previously entered by cpaste. Do not be alarmed by garbled output on Windows (it's a readline bug). Just press enter and type -- (and press enter again) and the block will be what was just pasted. IPython statements (magics, shell escapes) are not supported (yet). See also -------- paste: automatically pull code from clipboard. Examples -------- :: In [8]: %cpaste Pasting code; enter '--' alone on the line to stop. :>>> a = ["world!", "Hello"] :>>> print " ".join(sorted(a)) :-- Hello world! """ opts, name = self.parse_options(parameter_s, 'rs:', mode='string') if 'r' in opts: self.rerun_pasted() return sentinel = opts.get('s', '--') block = '\n'.join(get_pasted_lines(sentinel)) self.store_or_execute(block, name) @line_magic def paste(self, parameter_s=''): """Paste & execute a pre-formatted code block from clipboard. The text is pulled directly from the clipboard without user intervention and printed back on the screen before execution (unless the -q flag is given to force quiet mode). The block is dedented prior to execution to enable execution of method definitions. '>' and '+' characters at the beginning of a line are ignored, to allow pasting directly from e-mails, diff files and doctests (the '...' continuation prompt is also stripped). The executed block is also assigned to variable named 'pasted_block' for later editing with '%edit pasted_block'. You can also pass a variable name as an argument, e.g. '%paste foo'. This assigns the pasted block to variable 'foo' as string, without executing it (preceding >>> and + is still stripped). Options ------- -r: re-executes the block previously entered by cpaste. -q: quiet mode: do not echo the pasted text back to the terminal. IPython statements (magics, shell escapes) are not supported (yet). See also -------- cpaste: manually paste code into terminal until you mark its end. """ opts, name = self.parse_options(parameter_s, 'rq', mode='string') if 'r' in opts: self.rerun_pasted() return try: block = self.shell.hooks.clipboard_get() except TryNext as clipboard_exc: message = getattr(clipboard_exc, 'args') if message: error(message[0]) else: error('Could not get text from the clipboard.') return # By default, echo back to terminal unless quiet mode is requested if 'q' not in opts: write = self.shell.write write(self.shell.pycolorize(block)) if not block.endswith('\n'): write('\n') write("## -- End pasted text --\n") self.store_or_execute(block, name) # Class-level: add a '%cls' magic only on Windows if sys.platform == 'win32': @line_magic def cls(self, s): """Clear screen. """ os.system("cls") #----------------------------------------------------------------------------- # Main class #----------------------------------------------------------------------------- class TerminalInteractiveShell(InteractiveShell): autoedit_syntax = CBool(False, config=True, help="auto editing of files with syntax errors.") banner = Unicode('') banner1 = Unicode(default_banner, config=True, help="""The part of the banner to be printed before the profile""" ) banner2 = Unicode('', config=True, help="""The part of the banner to be printed after the profile""" ) confirm_exit = CBool(True, config=True, help=""" Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a direct exit without any confirmation.""", ) # This display_banner only controls whether or not self.show_banner() # is called when mainloop/interact are called. The default is False # because for the terminal based application, the banner behavior # is controlled by Global.display_banner, which IPythonApp looks at # to determine if *it* should call show_banner() by hand or not. display_banner = CBool(False) # This isn't configurable! embedded = CBool(False) embedded_active = CBool(False) editor = Unicode(get_default_editor(), config=True, help="Set the editor used by IPython (default to $EDITOR/vi/notepad)." ) pager = Unicode('less', config=True, help="The shell program to be used for paging.") screen_length = Integer(0, config=True, help= """Number of lines of your screen, used to control printing of very long strings. Strings longer than this number of lines will be sent through a pager instead of directly printed. The default value for this is 0, which means IPython will auto-detect your screen size every time it needs to print certain potentially long strings (this doesn't change the behavior of the 'print' keyword, it's only triggered internally). If for some reason this isn't working well (it needs curses support), specify it yourself. Otherwise don't change the default.""", ) term_title = CBool(False, config=True, help="Enable auto setting the terminal title." ) # This `using_paste_magics` is used to detect whether the code is being # executed via paste magics functions using_paste_magics = CBool(False) # In the terminal, GUI control is done via PyOS_InputHook @staticmethod def enable_gui(gui=None, app=None): """Switch amongst GUI input hooks by name. """ # Deferred import from IPython.lib.inputhook import enable_gui as real_enable_gui return real_enable_gui(gui, app) def __init__(self, config=None, ipython_dir=None, profile_dir=None, user_ns=None, user_module=None, custom_exceptions=((),None), usage=None, banner1=None, banner2=None, display_banner=None): super(TerminalInteractiveShell, self).__init__( config=config, ipython_dir=ipython_dir, profile_dir=profile_dir, user_ns=user_ns, user_module=user_module, custom_exceptions=custom_exceptions ) # use os.system instead of utils.process.system by default, # because piped system doesn't make sense in the Terminal: self.system = self.system_raw self.init_term_title() self.init_usage(usage) self.init_banner(banner1, banner2, display_banner) #------------------------------------------------------------------------- # Overrides of init stages #------------------------------------------------------------------------- def init_display_formatter(self): super(TerminalInteractiveShell, self).init_display_formatter() # terminal only supports plaintext self.display_formatter.active_types = ['text/plain'] #------------------------------------------------------------------------- # Things related to the terminal #------------------------------------------------------------------------- @property def usable_screen_length(self): if self.screen_length == 0: return 0 else: num_lines_bot = self.separate_in.count('\n')+1 return self.screen_length - num_lines_bot def init_term_title(self): # Enable or disable the terminal title. if self.term_title: toggle_set_term_title(True) set_term_title('IPython: ' + abbrev_cwd()) else: toggle_set_term_title(False) #------------------------------------------------------------------------- # Things related to aliases #------------------------------------------------------------------------- def init_alias(self): # The parent class defines aliases that can be safely used with any # frontend. super(TerminalInteractiveShell, self).init_alias() # Now define aliases that only make sense on the terminal, because they # need direct access to the console in a way that we can't emulate in # GUI or web frontend if os.name == 'posix': aliases = [('clear', 'clear'), ('more', 'more'), ('less', 'less'), ('man', 'man')] elif os.name == 'nt': aliases = [('cls', 'cls')] for name, cmd in aliases: self.alias_manager.define_alias(name, cmd) #------------------------------------------------------------------------- # Things related to the banner and usage #------------------------------------------------------------------------- def _banner1_changed(self): self.compute_banner() def _banner2_changed(self): self.compute_banner() def _term_title_changed(self, name, new_value): self.init_term_title() def init_banner(self, banner1, banner2, display_banner): if banner1 is not None: self.banner1 = banner1 if banner2 is not None: self.banner2 = banner2 if display_banner is not None: self.display_banner = display_banner self.compute_banner() def show_banner(self, banner=None): if banner is None: banner = self.banner self.write(banner) def compute_banner(self): self.banner = self.banner1 if self.profile and self.profile != 'default': self.banner += '\nIPython profile: %s\n' % self.profile if self.banner2: self.banner += '\n' + self.banner2 def init_usage(self, usage=None): if usage is None: self.usage = interactive_usage else: self.usage = usage #------------------------------------------------------------------------- # Mainloop and code execution logic #------------------------------------------------------------------------- def mainloop(self, display_banner=None): """Start the mainloop. If an optional banner argument is given, it will override the internally created default banner. """ with nested(self.builtin_trap, self.display_trap): while 1: try: self.interact(display_banner=display_banner) #self.interact_with_readline() # XXX for testing of a readline-decoupled repl loop, call # interact_with_readline above break except KeyboardInterrupt: # this should not be necessary, but KeyboardInterrupt # handling seems rather unpredictable... self.write("\nKeyboardInterrupt in interact()\n") def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length() def interact(self, display_banner=None): """Closely emulate the interactive Python console.""" # batch run -> do not interact if self.exit_now: return if display_banner is None: display_banner = self.display_banner if isinstance(display_banner, basestring): self.show_banner(display_banner) elif display_banner: self.show_banner() more = False if self.has_readline: self.readline_startup_hook(self.pre_readline) hlen_b4_cell = self.readline.get_current_history_length() else: hlen_b4_cell = 0 # exit_now is set by a call to %Exit or %Quit, through the # ask_exit callback. while not self.exit_now: self.hooks.pre_prompt_hook() if more: try: prompt = self.prompt_manager.render('in2') except: self.showtraceback() if self.autoindent: self.rl_do_indent = True else: try: prompt = self.separate_in + self.prompt_manager.render('in') except: self.showtraceback() try: line = self.raw_input(prompt) if self.exit_now: # quick exit on sys.std[in|out] close break if self.autoindent: self.rl_do_indent = False except KeyboardInterrupt: #double-guard against keyboardinterrupts during kbdint handling try: self.write('\nKeyboardInterrupt\n') source_raw = self.input_splitter.source_raw_reset()[1] hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) more = False except KeyboardInterrupt: pass except EOFError: if self.autoindent: self.rl_do_indent = False if self.has_readline: self.readline_startup_hook(None) self.write('\n') self.exit() except bdb.BdbQuit: warn('The Python debugger has exited with a BdbQuit exception.\n' 'Because of how pdb handles the stack, it is impossible\n' 'for IPython to properly format this particular exception.\n' 'IPython will resume normal operation.') except: # exceptions here are VERY RARE, but they can be triggered # asynchronously by signal handlers, for example. self.showtraceback() else: self.input_splitter.push(line) more = self.input_splitter.push_accepts_more() if (self.SyntaxTB.last_syntax_error and self.autoedit_syntax): self.edit_syntax_error() if not more: source_raw = self.input_splitter.source_raw_reset()[1] self.run_cell(source_raw, store_history=True) hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) # Turn off the exit flag, so the mainloop can be restarted if desired self.exit_now = False def raw_input(self, prompt=''): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. Optional inputs: - prompt(''): a string to be printed to prompt the user. - continue_prompt(False): whether this line is the first one or a continuation in a sequence of inputs. """ # Code run by the user may have modified the readline completer state. # We must ensure that our completer is back in place. if self.has_readline: self.set_readline_completer() # raw_input expects str, but we pass it unicode sometimes prompt = py3compat.cast_bytes_py2(prompt) try: line = py3compat.str_to_unicode(self.raw_input_original(prompt)) except ValueError: warn("\n********\nYou or a %run:ed script called sys.stdin.close()" " or sys.stdout.close()!\nExiting IPython!\n") self.ask_exit() return "" # Try to be reasonably smart about not re-indenting pasted input more # than necessary. We do this by trimming out the auto-indent initial # spaces, if the user's actual input started itself with whitespace. if self.autoindent: if num_ini_spaces(line) > self.indent_current_nsp: line = line[self.indent_current_nsp:] self.indent_current_nsp = 0 return line #------------------------------------------------------------------------- # Methods to support auto-editing of SyntaxErrors. #------------------------------------------------------------------------- def edit_syntax_error(self): """The bottom half of the syntax error handler called in the main loop. Loop until syntax error is fixed or user cancels. """ while self.SyntaxTB.last_syntax_error: # copy and clear last_syntax_error err = self.SyntaxTB.clear_err_state() if not self._should_recompile(err): return try: # may set last_syntax_error again if a SyntaxError is raised self.safe_execfile(err.filename,self.user_ns) except: self.showtraceback() else: try: f = open(err.filename) try: # This should be inside a display_trap block and I # think it is. sys.displayhook(f.read()) finally: f.close() except: self.showtraceback() def _should_recompile(self,e): """Utility routine for edit_syntax_error""" if e.filename in ('<ipython console>','<input>','<string>', '<console>','<BackgroundJob compilation>', None): return False try: if (self.autoedit_syntax and not self.ask_yes_no('Return to editor to correct syntax error? ' '[Y/n] ','y')): return False except EOFError: return False def int0(x): try: return int(x) except TypeError: return 0 # always pass integer line and offset values to editor hook try: self.hooks.fix_error_editor(e.filename, int0(e.lineno),int0(e.offset),e.msg) except TryNext: warn('Could not open editor') return False return True #------------------------------------------------------------------------- # Things related to exiting #------------------------------------------------------------------------- def ask_exit(self): """ Ask the shell to exit. Can be overiden and used as a callback. """ self.exit_now = True def exit(self): """Handle interactive exit. This method calls the ask_exit callback.""" if self.confirm_exit: if self.ask_yes_no('Do you really want to exit ([y]/n)?','y'): self.ask_exit() else: self.ask_exit() #------------------------------------------------------------------------- # Things related to magics #------------------------------------------------------------------------- def init_magics(self): super(TerminalInteractiveShell, self).init_magics() self.register_magics(TerminalMagics) def showindentationerror(self): super(TerminalInteractiveShell, self).showindentationerror() if not self.using_paste_magics: print("If you want to paste code into IPython, try the " "%paste and %cpaste magic functions.") InteractiveShellABC.register(TerminalInteractiveShell)
[]
[]
[ "EDITOR" ]
[]
["EDITOR"]
python
1
0
examples/FasterRCNN/config.py
# -*- coding: utf-8 -*- # File: config.py import numpy as np import os import pprint import six from tensorpack.utils import logger from tensorpack.utils.gpu import get_num_gpu __all__ = ['config', 'finalize_configs'] class AttrDict(): _freezed = False """ Avoid accidental creation of new hierarchies. """ def __getattr__(self, name): if self._freezed: raise AttributeError(name) if name.startswith('_'): # Do not mess with internals. Otherwise copy/pickle will fail raise AttributeError(name) ret = AttrDict() setattr(self, name, ret) return ret def __setattr__(self, name, value): if self._freezed and name not in self.__dict__: raise AttributeError( "Config was freezed! Unknown config: {}".format(name)) super().__setattr__(name, value) def __str__(self): return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True) __repr__ = __str__ def to_dict(self): """Convert to a nested dict. """ return {k: v.to_dict() if isinstance(v, AttrDict) else v for k, v in self.__dict__.items() if not k.startswith('_')} def from_dict(self, d): self.freeze(False) for k, v in d.items(): self_v = getattr(self, k) if isinstance(self_v, AttrDict): self_v.from_dict(v) else: setattr(self, k, v) def update_args(self, args): """Update from command line args. """ for cfg in args: keys, v = cfg.split('=', maxsplit=1) keylist = keys.split('.') dic = self for k in keylist[:-1]: assert k in dir(dic), "Unknown config key: {}".format(keys) dic = getattr(dic, k) key = keylist[-1] oldv = getattr(dic, key) if not isinstance(oldv, str): v = eval(v) setattr(dic, key, v) def freeze(self, freezed=True): self._freezed = freezed for v in self.__dict__.values(): if isinstance(v, AttrDict): v.freeze(freezed) # avoid silent bugs def __eq__(self, _): raise NotImplementedError() def __ne__(self, _): raise NotImplementedError() config = AttrDict() _C = config # short alias to avoid coding # mode flags --------------------- _C.TRAINER = 'replicated' # options: 'horovod', 'replicated' _C.MODE_MASK = True # Faster R-CNN or Mask R-CNN _C.MODE_FPN = True # dataset ----------------------- _C.DATA.BASEDIR = '/path/to/your/DATA/DIR' # All available dataset names are defined in `dataset/coco.py:register_coco`. # All TRAIN dataset will be concatenated for training. _C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k # Each VAL dataset will be evaluated separately (instead of concatenated) _C.DATA.VAL = ('coco_val2017',) # AKA minival2014 # These two configs will be populated later inside `finalize_configs`. _C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO) _C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG". # whether the coordinates in your registered dataset are # absolute pixel values in range [0, W or H] or relative values in [0, 1] _C.DATA.ABSOLUTE_COORD = True # Filter Negative Samples from dataset _C.DATA.FILTER_EMPTY_ANNOTATIONS = True # Number of data loading workers. # In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number). # Set to 0 to disable parallel data loading _C.DATA.NUM_WORKERS = 10 # backbone ---------------------- _C.BACKBONE.WEIGHTS = '' # To train from scratch, set it to empty, and set FREEZE_AT to 0 # To train from ImageNet pre-trained models, use the one that matches your # architecture from http://models.tensorpack.com under the 'FasterRCNN' section. # To train from an existing COCO model, use the path to that file, and change # the other configurations according to that model. _C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet50 # RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101 _C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers _C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None _C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training) # Use a base model with TF-preferred padding mode, # which may pad more pixels on right/bottom than top/left. # See https://github.com/tensorflow/tensorflow/issues/18213 # In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding". # All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True. # Using either one should probably give the same performance. # We use the "AlignPadding" one just to be consistent with caffe2. _C.BACKBONE.TF_PAD_MODE = False _C.BACKBONE.STRIDE_1X1 = False # True for MSRA models # schedule ----------------------- _C.TRAIN.NUM_GPUS = None # by default, will be set from code _C.TRAIN.WEIGHT_DECAY = 1e-4 _C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically _C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs _C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically _C.TRAIN.STEPS_PER_EPOCH = 500 _C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training # LR_SCHEDULE means equivalent steps when the total batch size is 8. # It can be either a string like "3x" that refers to standard convention, or a list of int. # LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which # means to decrease LR at steps 420k and 500k and stop training at 540k. # When the total bs!=8, the actual iterations to decrease learning rate, and # the base learning rate are computed from BASE_LR and LR_SCHEDULE. # Therefore, there is *no need* to modify the config if you only change the number of GPUs. _C.TRAIN.LR_SCHEDULE = "3x" # "1x" schedule in detectron _C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation _C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model # preprocessing -------------------- # Alternative old (worse & faster) setting: 600 _C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [640, 800] # [min, max] to sample from _C.PREPROC.TEST_SHORT_EDGE_SIZE = 1600 _C.PREPROC.MAX_SIZE = 1600 # mean and std in RGB order. # Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] _C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53] _C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375] # anchors ------------------------- _C.RPN.ANCHOR_STRIDE = 16 _C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box _C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.) _C.RPN.POSITIVE_ANCHOR_THRESH = 0.7 _C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3 # rpn training ------------------------- _C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors _C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid _C.RPN.MIN_SIZE = 0 _C.RPN.PROPOSAL_NMS_THRESH = 0.7 # Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored. # Setting this to a value larger than 1.0 will disable the feature. # It is disabled by default because Detectron does not do this. _C.RPN.CROWD_OVERLAP_THRESH = 9.99 _C.RPN.HEAD_DIM = 1024 # used in C4 only # RPN proposal selection ------------------------------- # for C4 _C.RPN.TRAIN_PRE_NMS_TOPK = 12000 _C.RPN.TRAIN_POST_NMS_TOPK = 2000 _C.RPN.TEST_PRE_NMS_TOPK = 6000 _C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number # for FPN, #proposals per-level and #proposals after merging are (for now) the same # if FPN.PROPOSAL_MODE = 'Joint', these options have no effect _C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000 _C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000 # fastrcnn training --------------------- _C.FRCNN.BATCH_PER_IM = 512 _C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10 _C.FRCNN.FG_THRESH = 0.5 _C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch # FPN ------------------------- _C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES _C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint' _C.FPN.NUM_CHANNEL = 256 _C.FPN.NORM = 'None' # 'None', 'GN' # The head option is only used in FPN. For C4 models, the head is C5 _C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head' # choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head _C.FPN.FRCNN_CONV_HEAD_DIM = 256 _C.FPN.FRCNN_FC_HEAD_DIM = 1024 _C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head # Mask R-CNN _C.MRCNN.HEAD_DIM = 256 _C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy # Cascade R-CNN, only available in FPN mode _C.FPN.CASCADE = True _C.CASCADE.IOUS = [0.5, 0.6, 0.7] _C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]] # testing ----------------------- _C.TEST.FRCNN_NMS_THRESH = 0.5 # Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron. # mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa _C.TEST.RESULT_SCORE_THRESH = 1e-4 _C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results _C.TEST.RESULTS_PER_IM = 100 _C.freeze() # avoid typo / wrong config keys def finalize_configs(is_training): """ Run some sanity checks, and populate some configs from others """ _C.freeze(False) # populate new keys now if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well _C.DATA.VAL = (_C.DATA.VAL, ) if isinstance(_C.DATA.TRAIN, six.string_types): # support single string _C.DATA.TRAIN = (_C.DATA.TRAIN, ) # finalize dataset definitions ... from dataset import DatasetRegistry datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL) _C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names") _C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1 assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM if _C.BACKBONE.NORM != 'FreezeBN': assert not _C.BACKBONE.FREEZE_AFFINE assert _C.BACKBONE.FREEZE_AT in [0, 1, 2] _C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS) assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES) # image size into the backbone has to be multiple of this number _C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5 if _C.MODE_FPN: size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1. _C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint'] assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head') assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head') assert _C.FPN.NORM in ['None', 'GN'] if _C.FPN.CASCADE: # the first threshold is the proposal sampling threshold assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS) if is_training: train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100: # don't autotune if augmentation is on os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0' os.environ['TF_AUTOTUNE_THRESHOLD'] = '1' assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER lr = _C.TRAIN.LR_SCHEDULE if isinstance(lr, six.string_types): if lr.endswith("x"): LR_SCHEDULE_KITER = { "{}x".format(k): [180 * k - 120, 180 * k - 40, 180 * k] for k in range(2, 10)} LR_SCHEDULE_KITER["1x"] = [120, 160, 180] _C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]] else: _C.TRAIN.LR_SCHEDULE = eval(lr) # setup NUM_GPUS if _C.TRAINER == 'horovod': import horovod.tensorflow as hvd ngpu = hvd.size() logger.info("Horovod Rank={}, Size={}, LocalRank={}".format( hvd.rank(), hvd.size(), hvd.local_rank())) else: assert 'OMPI_COMM_WORLD_SIZE' not in os.environ ngpu = get_num_gpu() assert ngpu > 0, "Has to train with GPU!" assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu) else: # autotune is too slow for inference os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0' ngpu = get_num_gpu() if _C.TRAIN.NUM_GPUS is None: _C.TRAIN.NUM_GPUS = ngpu else: if _C.TRAINER == 'horovod': assert _C.TRAIN.NUM_GPUS == ngpu else: assert _C.TRAIN.NUM_GPUS <= ngpu _C.freeze() logger.info("Config: ------------------------------------------\n" + str(_C))
[]
[]
[ "TF_AUTOTUNE_THRESHOLD", "TF_CUDNN_USE_AUTOTUNE" ]
[]
["TF_AUTOTUNE_THRESHOLD", "TF_CUDNN_USE_AUTOTUNE"]
python
2
0
github_team_organizer/classes/github.py
import os from cached_property import cached_property from github import Github as PyGithub from github.Organization import Organization class GitHubWrapper(PyGithub): __instance = None def __new__(cls, *args, **kwargs): if GitHubWrapper.__instance is None: GitHubWrapper.__instance = super().__new__(cls, *args, **kwargs) return GitHubWrapper.__instance def __init__(self, login_or_token: str = None): if not hasattr(self, 'login_or_token'): self.login_or_token = login_or_token or os.getenv('GITHUB_API_KEY') super().__init__(login_or_token=self.login_or_token) @cached_property def default_organization(self) -> Organization: return self.get_organization(os.getenv('GITHUB_ORGANIZATION'))
[]
[]
[ "GITHUB_API_KEY", "GITHUB_ORGANIZATION" ]
[]
["GITHUB_API_KEY", "GITHUB_ORGANIZATION"]
python
2
0
mpm/bin/build.py
import argparse import logging import os import subprocess as sp import sys import zipfile import path_helpers as ph import yaml logger = logging.getLogger(__name__) def parse_args(args=None): ''' Parses arguments, returns ``(options, args)``. .. versionchanged:: 0.24.1 Fix handling of optional :data:`args`. ''' if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(description='MicroDrop plugin ' 'Conda recipe builder') parser.add_argument('-s', '--source-dir', type=ph.path, nargs='?') parser.add_argument('-t', '--target-dir', type=ph.path, nargs='?') parser.add_argument('-p', '--package-name', nargs='?') # Use `-V` for version (from [common Unix flags][1]). # # [1]: https://unix.stackexchange.com/a/108141/187716 parser.add_argument('-V', '--version-number', nargs='?') parsed_args = parser.parse_args() if not parsed_args.source_dir: parsed_args.source_dir = ph.path(os.environ['SRC_DIR']) if not parsed_args.target_dir: prefix_dir = ph.path(os.environ['PREFIX']) # Extract module name from Conda package name. # # For example, the module name for a package named # `microdrop.droplet_planning_plugin` would be # `droplet_planning_plugin`. module_name = os.environ['PKG_NAME'].split('.')[-1].replace('-', '_') parsed_args.target_dir = prefix_dir.joinpath('share', 'microdrop', 'plugins', 'available', module_name) if not parsed_args.package_name: parsed_args.package_name = os.environ['PKG_NAME'] return parsed_args def build(source_dir, target_dir, package_name=None, version_number=None): ''' Create a release of a MicroDrop plugin source directory in the target directory path. Skip the following patterns: - ``bld.bat`` - ``.conda-recipe/*`` - ``.git/*`` .. versionchanged:: 0.24.1 Remove temporary archive after extraction. Change directory into source directory before running ``git archive``. .. versionchanged:: 0.25 Add optional :data:`version_number` argument. Parameters ---------- source_dir : str Source directory. target_dir : str Target directory. package_name : str, optional Name of plugin Conda package (defaults to name of :data:`target_dir`). version_number : str, optional Package version number. If not specified, assume version package exposes version using `versioneer <https://github.com/warner/python-versioneer>`_. ''' source_dir = ph.path(source_dir).realpath() target_dir = ph.path(target_dir).realpath() target_dir.makedirs_p() source_archive = source_dir.joinpath(source_dir.name + '.zip') if package_name is None: package_name = str(target_dir.name) logger.info('Source directory: %s', source_dir) logger.info('Source archive: %s', source_archive) logger.info('Target directory: %s', target_dir) logger.info('Package name: %s', package_name) # Export git archive, which substitutes version expressions in # `_version.py` to reflect the state (i.e., revision and tag info) of the # git repository. original_dir = ph.path(os.getcwd()) try: os.chdir(source_dir) sp.check_call(['git', 'archive', '-o', source_archive, 'HEAD'], shell=True) finally: os.chdir(original_dir) # Extract exported git archive to Conda MicroDrop plugins directory. with zipfile.ZipFile(source_archive, 'r') as zip_ref: zip_ref.extractall(target_dir) # Extraction is complete. Remove temporary archive. source_archive.remove() # Delete Conda build recipe from installed package. target_dir.joinpath('.conda-recipe').rmtree() # Delete Conda build recipe from installed package. for p in target_dir.files('.git*'): p.remove() # Write package information to (legacy) `properties.yml` file. original_dir = ph.path(os.getcwd()) try: os.chdir(source_dir) if version_number is None: # Assume versioneer is being used for managing version. import _version as v version_info = {'version': v.get_versions()['version'], 'versioneer': v.get_versions()} else: # Version number was specified explicitly. version_info = {'version': version_number} finally: os.chdir(original_dir) # Create properties dictionary object (cast types, e.g., `ph.path`, to # strings for cleaner YAML dump). properties = {'package_name': package_name, 'plugin_name': str(target_dir.name)} properties.update(version_info) with target_dir.joinpath('properties.yml').open('w') as properties_yml: # Dump properties to YAML-formatted file. # Setting `default_flow_style=False` writes each property on a separate # line (cosmetic change only). yaml.dump(properties, properties_yml, default_flow_style=False) def main(args=None): if args is None: args = parse_args() logger.debug('Arguments: %s', args) build(args.source_dir, args.target_dir, package_name=args.package_name, version_number=args.version_number) if __name__ == '__main__': import logging logging.basicConfig(level=logging.DEBUG) main()
[]
[]
[ "PREFIX", "SRC_DIR", "PKG_NAME" ]
[]
["PREFIX", "SRC_DIR", "PKG_NAME"]
python
3
0
vendor/github.com/armory/go-yaml-tools/pkg/secrets/vault.go
package secrets import ( "fmt" "github.com/hashicorp/vault/api" "io/ioutil" "os" "strings" ) type VaultConfig struct { Enabled bool `json:"enabled" yaml:"enabled"` Url string `json:"url" yaml:"url"` AuthMethod string `json:"authMethod" yaml:"authMethod"` Role string `json:"role" yaml:"role"` Path string `json:"path" yaml:"path"` Token string } type VaultSecret struct { engine string path string key string base64Encoded string } type VaultDecrypter struct { params map[string]string } func NewVaultDecrypter(params map[string]string) *VaultDecrypter { return &VaultDecrypter{params} } func (decrypter *VaultDecrypter) Decrypt() (string, error) { if (VaultConfig{}) == Registry.VaultConfig { return "", fmt.Errorf("error: vault secrets configuration not found") } //vaultSecret, err := ParseVaultSecret(v.encryptedSecret) vaultSecret, err := ParseVaultSecret(decrypter.params) if err != nil { return "", fmt.Errorf("error parsing vault secret syntax - %s", err) } if Registry.VaultConfig.Token == "" { token, err := decrypter.FetchVaultToken() if err != nil { return "", fmt.Errorf("error fetching vault token - %s", err) } Registry.VaultConfig.Token = token } secret, err := decrypter.FetchSecret(vaultSecret) if err != nil && strings.Contains(err.Error(), "403") { // get new token and retry in case our saved token is no longer valid return decrypter.RetryFetchSecret(vaultSecret) } return secret, err } func ValidateVaultConfig(vaultConfig VaultConfig) error { if (VaultConfig{}) == vaultConfig { return fmt.Errorf("vault secrets not configured in service profile yaml") } if vaultConfig.Enabled == false { return fmt.Errorf("vault secrets disabled") } if vaultConfig.Url == "" { return fmt.Errorf("vault url required") } if vaultConfig.AuthMethod == "" { return fmt.Errorf("auth method required") } if vaultConfig.AuthMethod == "TOKEN" { if token := os.Getenv("VAULT_TOKEN"); token == "" { return fmt.Errorf("VAULT_TOKEN environment variable not set") } } else if vaultConfig.AuthMethod == "KUBERNETES" { if vaultConfig.Path == "" || vaultConfig.Role == "" { return fmt.Errorf("path and role both required for Kubernetes auth method") } } else { return fmt.Errorf("unknown Vault secrets auth method: %q", vaultConfig.AuthMethod) } return nil } func ParseVaultSecret(params map[string]string) (VaultSecret, error) { var vaultSecret VaultSecret engine, ok := params["e"] if !ok { return VaultSecret{}, fmt.Errorf("secret format error - 'r' for region is required") } vaultSecret.engine = engine path, ok := params["n"] if !ok { return VaultSecret{}, fmt.Errorf("secret format error - 'b' for bucket is required") } vaultSecret.path = path key, ok := params["k"] if !ok { return VaultSecret{}, fmt.Errorf("secret format error - 'f' for file is required") } vaultSecret.key = key base64, ok := params["b"] if ok { vaultSecret.base64Encoded = base64 } return vaultSecret, nil } func (decrypter *VaultDecrypter) FetchVaultToken() (string, error) { if Registry.VaultConfig.AuthMethod == "TOKEN" { return os.Getenv("VAULT_TOKEN"), nil } else if Registry.VaultConfig.AuthMethod == "KUBERNETES" { return decrypter.FetchServiceAccountToken() } else { return "", fmt.Errorf("unknown Vault auth method: %q", Registry.VaultConfig.AuthMethod) } } func (decrypter *VaultDecrypter) FetchServiceAccountToken() (string, error) { client, err := api.NewClient(&api.Config{ Address: Registry.VaultConfig.Url, }) if err != nil { return "", fmt.Errorf("error fetching vault client: %s", err) } tokenFile, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") if err != nil { return "", fmt.Errorf("error reading service account token: %s", err) } token := string(tokenFile) data := map[string]interface{}{ "role": Registry.VaultConfig.Role, "jwt": token, } secret, err := client.Logical().Write("auth/" + Registry.VaultConfig.Path + "/login", data) if err != nil { return "", fmt.Errorf("error logging into vault using kubernetes auth: %s", err) } return secret.Auth.ClientToken, nil } func (decrypter *VaultDecrypter) FetchVaultClient(token string) (*api.Client, error) { client, err := api.NewClient(&api.Config{ Address: Registry.VaultConfig.Url, }) if err != nil { return nil, err } client.SetToken(token) return client, nil } func (decrypter *VaultDecrypter) FetchSecret(secret VaultSecret) (string, error) { client, err := decrypter.FetchVaultClient(Registry.VaultConfig.Token) if err != nil { return "", fmt.Errorf("error fetching vault client - %s", err) } secretMapping, err := client.Logical().Read(secret.engine + "/" + secret.path) if err != nil { if strings.Contains(err.Error(), "invalid character '<' looking for beginning of value") { // some connection errors aren't properly caught, and the vault client tries to parse <nil> return "", fmt.Errorf("error fetching secret from vault - check connection to the server: %s", Registry.VaultConfig.Url) } return "", fmt.Errorf("error fetching secret from vault: %s", err) } warnings := secretMapping.Warnings if warnings != nil { for i := range warnings { if strings.Contains(warnings[i], "Invalid path for a versioned K/V secrets engine") { // try again using K/V v2 path secretMapping, err = client.Logical().Read(secret.engine + "/data/" + secret.path) if err != nil { return "", fmt.Errorf("error fetching secret from vault: %s", err) } else if secretMapping == nil { return "", fmt.Errorf("couldn't find vault path %q under engine %q", secret.path, secret.engine) } break } } } if secretMapping != nil { mapping := secretMapping.Data if data, ok := mapping["data"]; ok { // one more nesting of "data" if using K/V v2 if submap, ok := data.(map[string]interface{}); ok { mapping = submap } } decrypted, ok := mapping[secret.key].(string) if !ok { return "", fmt.Errorf("error fetching key %q", secret.key) } return decrypted, nil } return "", nil } func (decrypter *VaultDecrypter) RetryFetchSecret(secret VaultSecret) (string, error) { token, err := decrypter.FetchVaultToken() if err != nil { return "", fmt.Errorf("error fetching vault token - %s", err) } Registry.VaultConfig.Token = token return decrypter.FetchSecret(secret) }
[ "\"VAULT_TOKEN\"", "\"VAULT_TOKEN\"" ]
[]
[ "VAULT_TOKEN" ]
[]
["VAULT_TOKEN"]
go
1
0
main/main.go
package main import ( "log" "os" "github.com/TerrexTech/go-agg-framer/framer" "github.com/TerrexTech/go-kafkautils/kafka" "github.com/TerrexTech/agg-warning-cmd/warning" "github.com/TerrexTech/go-commonutils/commonutil" "github.com/TerrexTech/go-eventspoll/poll" "github.com/joho/godotenv" "github.com/pkg/errors" ) func validateEnv() error { missingVar, err := commonutil.ValidateEnv( "ETCD_HOSTS", "KAFKA_BROKERS", "KAFKA_CONSUMER_EVENT_GROUP", "KAFKA_CONSUMER_EVENT_QUERY_GROUP", "KAFKA_CONSUMER_EVENT_TOPIC", "KAFKA_CONSUMER_EVENT_QUERY_TOPIC", "KAFKA_PRODUCER_EVENT_QUERY_TOPIC", "KAFKA_PRODUCER_RESPONSE_TOPIC", "MONGO_HOSTS", "MONGO_DATABASE", "MONGO_AGG_COLLECTION", "MONGO_META_COLLECTION", "MONGO_CONNECTION_TIMEOUT_MS", "MONGO_RESOURCE_TIMEOUT_MS", ) if err != nil { err = errors.Wrapf(err, "Env-var %s is required for testing, but is not set", missingVar) return err } return nil } func main() { log.Println("Reading environment file") err := godotenv.Load("./.env") if err != nil { err = errors.Wrap(err, ".env file not found, env-vars will be read as set in environment", ) log.Println(err) } err = validateEnv() if err != nil { log.Fatalln(err) } kc, err := loadKafkaConfig() if err != nil { err = errors.Wrap(err, "Error in KafkaConfig") log.Fatalln(err) } mc, err := loadMongoConfig() if err != nil { err = errors.Wrap(err, "Error in MongoConfig") log.Fatalln(err) } ioConfig := poll.IOConfig{ ReadConfig: poll.ReadConfig{ EnableInsert: true, EnableUpdate: true, EnableDelete: true, }, KafkaConfig: *kc, MongoConfig: *mc, } eventPoll, err := poll.Init(ioConfig) if err != nil { err = errors.Wrap(err, "Error creating EventPoll service") log.Fatalln(err) } // etcdHostsStr := os.Getenv("ETCD_HOSTS") // etcdConfig := clientv3.Config{ // DialTimeout: 5 * time.Second, // Endpoints: *commonutil.ParseHosts(etcdHostsStr), // } // etcdUsername := os.Getenv("ETCD_USERNAME") // etcdPassword := os.Getenv("ETCD_PASSWORD") // if etcdUsername != "" { // etcdConfig.Username = etcdUsername // } // if etcdPassword != "" { // etcdConfig.Password = etcdPassword // } // etcd, err := clientv3.New(etcdConfig) // if err != nil { // err = errors.Wrap(err, "Failed to connect to ETCD") // log.Fatalln(err) // } // log.Println("ETCD Ready") kafkaBrokers := *commonutil.ParseHosts( os.Getenv("KAFKA_BROKERS"), ) producerConfig := &kafka.ProducerConfig{ KafkaBrokers: kafkaBrokers, } topicConfig := &framer.TopicConfig{ DocumentTopic: os.Getenv("KAFKA_PRODUCER_RESPONSE_TOPIC"), } frm, err := framer.New(eventPoll.Context(), producerConfig, topicConfig) if err != nil { err = errors.Wrap(err, "Failed initializing Framer") log.Fatalln(err) } for { select { case <-eventPoll.Context().Done(): err = errors.New("service-context closed") log.Fatalln(err) case eventResp := <-eventPoll.Delete(): go func(eventResp *poll.EventResponse) { if eventResp == nil { return } err := eventResp.Error if err != nil { err = errors.Wrap(err, "Error in Delete-EventResponse") log.Println(err) return } frm.Document <- warning.Delete(mc.AggCollection, &eventResp.Event) }(eventResp) case eventResp := <-eventPoll.Insert(): go func(eventResp *poll.EventResponse) { if eventResp == nil { return } err := eventResp.Error if err != nil { err = errors.Wrap(eventResp.Error, "Error in Insert-EventResponse") log.Println(err) return } frm.Document <- warning.Insert(mc.AggCollection, &eventResp.Event) }(eventResp) case eventResp := <-eventPoll.Update(): go func(eventResp *poll.EventResponse) { if eventResp == nil { return } err := eventResp.Error if err != nil { err = errors.Wrap(err, "Error in Update-EventResponse") log.Println(err) return } frm.Document <- warning.Update(mc.AggCollection, &eventResp.Event) }(eventResp) } } }
[ "\"ETCD_HOSTS\"", "\"ETCD_USERNAME\"", "\"ETCD_PASSWORD\"", "\"KAFKA_BROKERS\"", "\"KAFKA_PRODUCER_RESPONSE_TOPIC\"" ]
[]
[ "ETCD_HOSTS", "ETCD_USERNAME", "ETCD_PASSWORD", "KAFKA_PRODUCER_RESPONSE_TOPIC", "KAFKA_BROKERS" ]
[]
["ETCD_HOSTS", "ETCD_USERNAME", "ETCD_PASSWORD", "KAFKA_PRODUCER_RESPONSE_TOPIC", "KAFKA_BROKERS"]
go
5
0
smoketest/harness/harness.go
package harness import ( "bytes" "context" "encoding/json" "fmt" "io" stdlog "log" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" "sort" "strings" "sync" "testing" "text/template" "time" "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/stdlib" "github.com/pkg/errors" uuid "github.com/satori/go.uuid" "github.com/target/goalert/alert" "github.com/target/goalert/app" "github.com/target/goalert/config" "github.com/target/goalert/devtools/mockslack" "github.com/target/goalert/devtools/mocktwilio" "github.com/target/goalert/devtools/pgdump-lite" "github.com/target/goalert/migrate" "github.com/target/goalert/notification/twilio" "github.com/target/goalert/permission" "github.com/target/goalert/user" "github.com/target/goalert/user/notificationrule" "github.com/target/goalert/util/log" "github.com/target/goalert/util/sqlutil" ) const dbTimeFormat = "2006-01-02 15:04:05.999999-07:00" var ( dbURLStr string dbURL *url.URL ) func init() { dbURLStr = os.Getenv("DB_URL") if dbURLStr == "" { dbURLStr = "postgres://[email protected]:5432?sslmode=disable" } var err error dbURL, err = url.Parse(dbURLStr) if err != nil { panic(err) } } func DBURL(name string) string { if name == "" { return dbURLStr } u := *dbURL u.Path = "/" + url.PathEscape(name) return u.String() } // Harness is a helper for smoketests. It deals with assertions, database management, and backend monitoring during tests. type Harness struct { phoneCCG, uuidG, emailG *DataGen t *testing.T closing bool tw *twilioAssertionAPI twS *httptest.Server cfg config.Config email *emailServer slack *slackServer slackS *httptest.Server slackApp mockslack.AppInfo slackUser mockslack.UserInfo ignoreErrors []string backend *app.App backendLogs io.Closer dbURL string dbName string delayOffset time.Duration mx sync.Mutex start time.Time resumed time.Time lastTimeChange time.Time pgResume time.Time db *pgxpool.Pool userGeneratedIndex int gqlSessions map[string]string } func (h *Harness) Config() config.Config { return h.cfg } // NewHarness will create a new database, perform `migrateSteps` migrations, inject `initSQL` and return a new Harness bound to // the result. It starts a backend process pre-configured to a mock twilio server for monitoring notifications as well. func NewHarness(t *testing.T, initSQL, migrationName string) *Harness { stdlog.SetOutput(io.Discard) t.Helper() h := NewStoppedHarness(t, initSQL, nil, migrationName) h.Start() return h } func (h *Harness) App() *app.App { return h.backend } func NewHarnessWithData(t *testing.T, initSQL string, sqlData interface{}, migrationName string) *Harness { t.Helper() h := NewStoppedHarness(t, initSQL, sqlData, migrationName) h.Start() return h } // NewHarnessDebugDB works like NewHarness, but fails the test immediately after // migrations have been run. It is used to debug data & queries from a smoketest. // // Note that the now() function will be locked to the init timestamp for inspection. func NewHarnessDebugDB(t *testing.T, initSQL, migrationName string) *Harness { t.Helper() h := NewStoppedHarness(t, initSQL, nil, migrationName) h.Migrate("") t.Fatal("DEBUG DB ::", h.dbURL) return nil } const ( twilioAuthToken = "11111111111111111111111111111111" twilioAccountSID = "AC00000000000000000000000000000000" mailgunAPIKey = "key-00000000000000000000000000000000" ) // NewStoppedHarness will create a NewHarness, but will not call Start. func NewStoppedHarness(t *testing.T, initSQL string, sqlData interface{}, migrationName string) *Harness { t.Helper() if testing.Short() { t.Skip("skipping Harness tests for short mode") } t.Logf("Using DB URL: %s", dbURL) start := time.Now() name := strings.Replace("smoketest_"+time.Now().Format("2006_01_02_15_04_05")+uuid.NewV4().String(), "-", "", -1) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() conn, err := pgx.Connect(ctx, DBURL("")) if err != nil { t.Fatal("connect to db:", err) } defer conn.Close(ctx) _, err = conn.Exec(ctx, "create database "+sqlutil.QuoteID(name)) if err != nil { t.Fatal("create db:", err) } conn.Close(ctx) t.Logf("created test database '%s': %s", name, dbURL) twCfg := mocktwilio.Config{ AuthToken: twilioAuthToken, AccountSID: twilioAccountSID, MinQueueTime: 100 * time.Millisecond, // until we have a stateless backend for answering calls } h := &Harness{ uuidG: NewDataGen(t, "UUID", DataGenFunc(GenUUID)), phoneCCG: NewDataGen(t, "Phone", DataGenArgFunc(GenPhoneCC)), emailG: NewDataGen(t, "Email", DataGenFunc(func() string { return GenUUID() + "@example.com" })), dbName: name, dbURL: DBURL(name), lastTimeChange: start, start: start, gqlSessions: make(map[string]string), t: t, } h.email = newEmailServer(h) h.tw = newTwilioAssertionAPI(func() { h.FastForward(time.Minute) h.Trigger() }, func(num string) string { id, ok := h.phoneCCG.names[num] if !ok { return num } return fmt.Sprintf("%s/Phone(%s)", num, id) }, mocktwilio.NewServer(twCfg), h.phoneCCG.Get("twilio")) h.twS = httptest.NewServer(h.tw) // freeze DB time until backend starts h.execQuery(` create schema testing_overrides; alter database `+sqlutil.QuoteID(name)+` set search_path = "$user", public,testing_overrides, pg_catalog; create or replace function testing_overrides.now() returns timestamp with time zone as $$ begin return '`+start.Format(dbTimeFormat)+`'; end; $$ language plpgsql; `, nil) h.Migrate(migrationName) h.initSlack() h.execQuery(initSQL, sqlData) return h } func (h *Harness) Start() { h.t.Helper() var cfg config.Config cfg.Slack.Enable = true cfg.Slack.AccessToken = h.slackApp.AccessToken cfg.Slack.ClientID = h.slackApp.ClientID cfg.Slack.ClientSecret = h.slackApp.ClientSecret cfg.Twilio.Enable = true cfg.Twilio.AccountSID = twilioAccountSID cfg.Twilio.AuthToken = twilioAuthToken cfg.Twilio.FromNumber = h.phoneCCG.Get("twilio") cfg.SMTP.Enable = true cfg.SMTP.Address = h.email.Addr() cfg.SMTP.DisableTLS = true cfg.SMTP.From = "goalert-test@localhost" cfg.Mailgun.Enable = true cfg.Mailgun.APIKey = mailgunAPIKey cfg.Mailgun.EmailDomain = "smoketest.example.com" h.cfg = cfg _, err := migrate.ApplyAll(context.Background(), h.dbURL) if err != nil { h.t.Fatalf("failed to migrate backend: %v\n", err) } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() poolCfg, err := pgxpool.ParseConfig(h.dbURL) if err != nil { h.t.Fatalf("failed to parse db url: %v", err) } poolCfg.MaxConns = 2 h.db, err = pgxpool.ConnectConfig(ctx, poolCfg) if err != nil { h.t.Fatalf("failed to connect to db: %v", err) } // resume the flow of time err = h.db.QueryRow(ctx, `select pg_catalog.now()`).Scan(&h.pgResume) if err != nil { h.t.Fatalf("failed to get postgres timestamp: %v", err) } h.resumed = time.Now() h.lastTimeChange = time.Now().Add(100 * time.Millisecond) h.modifyDBOffset(0) appCfg := app.Defaults() appCfg.ListenAddr = "localhost:0" appCfg.Verbose = true appCfg.JSON = true appCfg.DBURL = h.dbURL appCfg.TwilioBaseURL = h.twS.URL appCfg.DBMaxOpen = 5 appCfg.SlackBaseURL = h.slackS.URL appCfg.InitialConfig = &h.cfg r, w := io.Pipe() h.backendLogs = w log.EnableJSON() log.SetOutput(w) go h.watchBackendLogs(r) dbCfg, err := pgx.ParseConfig(h.dbURL) if err != nil { h.t.Fatalf("failed to parse db url: %v", err) } h.backend, err = app.NewApp(appCfg, stdlib.OpenDB(*dbCfg)) if err != nil { h.t.Fatalf("failed to start backend: %v", err) } h.TwilioNumber("") // register default number go h.backend.Run(context.Background()) err = h.backend.WaitForStartup(ctx) if err != nil { h.t.Fatalf("failed to start backend: %v", err) } } // URL returns the backend server's URL func (h *Harness) URL() string { return h.backend.URL() } // Migrate will perform `steps` number of migrations. func (h *Harness) Migrate(migrationName string) { h.t.Helper() h.t.Logf("Running migrations (target: %s)", migrationName) _, err := migrate.Up(context.Background(), h.dbURL, migrationName) if err != nil { h.t.Fatalf("failed to run migration: %v", err) } } // IgnoreErrorsWith will cause the Harness to ignore backend errors containing the specified substring. func (h *Harness) IgnoreErrorsWith(substr string) { h.mx.Lock() defer h.mx.Unlock() h.ignoreErrors = append(h.ignoreErrors, substr) } func (h *Harness) modifyDBOffset(d time.Duration) { n := time.Now() d -= n.Sub(h.lastTimeChange) if n.After(h.lastTimeChange) { h.lastTimeChange = n } h.delayOffset += d h.setDBOffset(h.delayOffset) } func (h *Harness) setDBOffset(d time.Duration) { h.mx.Lock() defer h.mx.Unlock() elapsed := time.Since(h.resumed) h.t.Logf("Updating DB time offset to: %s (+ %s elapsed = %s since test start)", h.delayOffset.String(), elapsed.String(), (h.delayOffset + elapsed).String()) h.execQuery(fmt.Sprintf(` create or replace function testing_overrides.now() returns timestamp with time zone as $$ begin return cast('%s' as timestamp with time zone) + (pg_catalog.now() - cast('%s' as timestamp with time zone))::interval; end; $$ language plpgsql; `, h.start.Add(d).Format(dbTimeFormat), h.pgResume.Format(dbTimeFormat), ), nil) } func (h *Harness) FastForward(d time.Duration) { h.t.Helper() h.t.Logf("Fast-forward %s", d.String()) h.delayOffset += d h.setDBOffset(h.delayOffset) } func (h *Harness) execQuery(sql string, data interface{}) { h.t.Helper() t := template.New("sql") t.Funcs(template.FuncMap{ "uuidJSON": func(id string) string { return fmt.Sprintf(`"%s"`, h.uuidG.Get(id)) }, "uuid": func(id string) string { return fmt.Sprintf("'%s'", h.uuidG.Get(id)) }, "phone": func(id string) string { return fmt.Sprintf("'%s'", h.phoneCCG.Get(id)) }, "email": func(id string) string { return fmt.Sprintf("'%s'", h.emailG.Get(id)) }, "phoneCC": func(cc, id string) string { return fmt.Sprintf("'%s'", h.phoneCCG.GetWithArg(cc, id)) }, "slackChannelID": func(name string) string { return fmt.Sprintf("'%s'", h.Slack().Channel(name).ID()) }, }) _, err := t.Parse(sql) if err != nil { h.t.Fatalf("failed to parse query template: %v", err) } b := new(bytes.Buffer) err = t.Execute(b, data) if err != nil { h.t.Fatalf("failed to render query template: %v", err) } err = ExecSQLBatch(context.Background(), h.dbURL, b.String()) if err != nil { h.t.Fatalf("failed to exec query: %v", err) } } // CreateAlert will create one or more unacknowledged alerts for a service. func (h *Harness) CreateAlert(serviceID string, summary ...string) { h.t.Helper() permission.SudoContext(context.Background(), func(ctx context.Context) { h.t.Helper() tx, err := h.backend.DB().BeginTx(ctx, nil) if err != nil { h.t.Fatalf("failed to start tx: %v", err) } defer tx.Rollback() for _, sum := range summary { a := &alert.Alert{ ServiceID: serviceID, Summary: sum, } h.t.Logf("insert alert: %v", a) _, isNew, err := h.backend.AlertStore.CreateOrUpdateTx(ctx, tx, a) if err != nil { h.t.Fatalf("failed to insert alert: %v", err) } if !isNew { h.t.Fatal("could not create duplicate alert with summary: " + sum) } } err = tx.Commit() if err != nil { h.t.Fatalf("failed to commit tx: %v", err) } }) } // CreateManyAlert will create multiple new unacknowledged alerts for a given service. func (h *Harness) CreateManyAlert(serviceID, summary string) { h.t.Helper() a := &alert.Alert{ ServiceID: serviceID, Summary: summary, } h.t.Logf("insert alert: %v", a) permission.SudoContext(context.Background(), func(ctx context.Context) { h.t.Helper() _, err := h.backend.AlertStore.Create(ctx, a) if err != nil { h.t.Fatalf("failed to insert alert: %v", err) } }) } // AddNotificationRule will add a notification rule to the database. func (h *Harness) AddNotificationRule(userID, cmID string, delayMinutes int) { h.t.Helper() nr := &notificationrule.NotificationRule{ DelayMinutes: delayMinutes, UserID: userID, ContactMethodID: cmID, } h.t.Logf("insert notification rule: %v", nr) permission.SudoContext(context.Background(), func(ctx context.Context) { h.t.Helper() _, err := h.backend.NotificationRuleStore.Insert(ctx, nr) if err != nil { h.t.Fatalf("failed to insert notification rule: %v", err) } }) } // Trigger will trigger, and wait for, an engine cycle. func (h *Harness) Trigger() { h.backend.Engine.TriggerAndWaitNextCycle(context.Background()) } // Escalate will escalate an alert in the database, when 'level' matches. func (h *Harness) Escalate(alertID, level int) { h.t.Helper() h.t.Logf("escalate alert #%d (from level %d)", alertID, level) permission.SudoContext(context.Background(), func(ctx context.Context) { err := h.backend.AlertStore.Escalate(ctx, alertID, level) if err != nil { h.t.Fatalf("failed to escalate alert: %v", err) } }) } // Phone will return the generated phone number for the id provided. func (h *Harness) Phone(id string) string { return h.phoneCCG.Get(id) } // PhoneCC will return the generated phone number for the id provided. func (h *Harness) PhoneCC(cc, id string) string { return h.phoneCCG.GetWithArg(cc, id) } // UUID will return the generated UUID for the id provided. func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) } func (h *Harness) isClosing() bool { h.mx.Lock() defer h.mx.Unlock() return h.closing } func (h *Harness) dumpDB() { testName := reflect.ValueOf(h.t).Elem().FieldByName("name").String() file := filepath.Join("smoketest_db_dump", testName+".sql") file, err := filepath.Abs(file) if err != nil { h.t.Fatalf("failed to get abs dump path: %v", err) } os.MkdirAll(filepath.Dir(file), 0755) var t time.Time err = h.db.QueryRow(context.Background(), "select now()").Scan(&t) if err != nil { h.t.Fatalf("failed to get current timestamp: %v", err) } conn, err := h.db.Acquire(context.Background()) if err != nil { h.t.Fatalf("failed to get db connection: %v", err) } defer conn.Release() fd, err := os.Create(file) if err != nil { h.t.Fatalf("failed to open dump file: %v", err) } defer fd.Close() err = pgdump.DumpData(context.Background(), conn.Conn(), fd) if err != nil { h.t.Errorf("failed to dump database '%s': %v", h.dbName, err) } _, err = fmt.Fprintf(fd, "\n-- Last Timestamp: %s\n", t.Format(time.RFC3339Nano)) if err != nil { h.t.Fatalf("failed to open DB dump: %v", err) } } // Close terminates any background processes, and drops the testing database. // It should be called at the end of all tests (usually with `defer h.Close()`). func (h *Harness) Close() error { h.t.Helper() if recErr := recover(); recErr != nil { defer panic(recErr) } h.tw.WaitAndAssert(h.t) h.slack.WaitAndAssert() h.email.WaitAndAssert() h.mx.Lock() h.closing = true h.mx.Unlock() ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err := h.backend.Shutdown(ctx) if err != nil { h.t.Error("failed to shutdown backend cleanly:", err) } h.backendLogs.Close() h.slackS.Close() h.twS.Close() h.tw.Close() h.dumpDB() h.db.Close() conn, err := pgx.Connect(ctx, DBURL("")) if err != nil { h.t.Error("failed to connect to DB:", err) } defer conn.Close(ctx) _, err = conn.Exec(ctx, "drop database "+sqlutil.QuoteID(h.dbName)) if err != nil { h.t.Errorf("failed to drop database '%s': %v", h.dbName, err) } return nil } // SetCarrierName will set the carrier name for the given phone number. func (h *Harness) SetCarrierName(number, name string) { h.tw.Server.SetCarrierInfo(number, twilio.CarrierInfo{Name: name}) } // TwilioNumber will return a registered (or register if missing) Twilio number for the given ID. // The default FromNumber will always be the empty ID. func (h *Harness) TwilioNumber(id string) string { num := h.phoneCCG.Get("twilio" + id) err := h.tw.RegisterSMSCallback(num, h.URL()+"/v1/twilio/sms/messages") if err != nil { h.t.Fatalf("failed to init twilio (SMS callback): %v", err) } err = h.tw.RegisterVoiceCallback(num, h.URL()+"/v1/twilio/voice/call") if err != nil { h.t.Fatalf("failed to init twilio (voice callback): %v", err) } return num } // CreateUser generates a random user. func (h *Harness) CreateUser() (u *user.User) { h.t.Helper() var err error permission.SudoContext(context.Background(), func(ctx context.Context) { u, err = h.backend.UserStore.Insert(ctx, &user.User{ Name: fmt.Sprintf("Generated%d", h.userGeneratedIndex), ID: uuid.NewV4().String(), Role: permission.RoleUser, Email: fmt.Sprintf("generated%[email protected]", h.userGeneratedIndex), }) }) if err != nil { h.t.Fatal(errors.Wrap(err, "generate random user")) } h.userGeneratedIndex++ return u } // WaitAndAssertOnCallUsers will ensure the correct set of users as on-call for the given serviceID. func (h *Harness) WaitAndAssertOnCallUsers(serviceID string, userIDs ...string) { h.t.Helper() doQL := func(query string, res interface{}) { g := h.GraphQLQuery2(query) for _, err := range g.Errors { h.t.Error("GraphQL Error:", err.Message) } if len(g.Errors) > 0 { h.t.Fatal("errors returned from GraphQL") } if res == nil { return } err := json.Unmarshal(g.Data, &res) if err != nil { h.t.Fatal("failed to parse response:", err) } } getUsers := func() []string { var result struct { Service struct { OnCallUsers []struct { UserID string UserName string } } } doQL(fmt.Sprintf(` query{ service(id: "%s"){ onCallUsers{ userID userName } } } `, serviceID), &result) var ids []string for _, oc := range result.Service.OnCallUsers { ids = append(ids, oc.UserID) } if len(ids) == 0 { return nil } sort.Strings(ids) uniq := ids[:1] last := ids[0] for _, id := range ids[1:] { if id == last { continue } uniq = append(uniq, id) last = id } return uniq } sort.Strings(userIDs) match := func(final bool) bool { ids := getUsers() if len(ids) != len(userIDs) { if final { h.t.Fatalf("got %d on-call users; want %d", len(ids), len(userIDs)) } return false } for i, id := range userIDs { if ids[i] != id { if final { h.t.Fatalf("on-call[%d] = %s; want %s", i, ids[i], id) } return false } } return true } h.Trigger() // run engine cycle match(true) // assert result }
[ "\"DB_URL\"" ]
[]
[ "DB_URL" ]
[]
["DB_URL"]
go
1
0
src/main/java/com/hacker/rank/algorithms/easy/SherlockSquares.java
/* * Copyright (C) 2020 Iván Camilo Sanabria. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hacker.rank.algorithms.easy; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.util.Scanner; /** * Class that is executed in hacker rank website as solution. * * @author Iván Camilo Sanabria ([email protected]) * @since 1.0.0 */ public class SherlockSquares { /** * Return the number of squares (1,4,9..) of given range. * * @param a Start of the range. * @param b End of the range. * @return Number of squares in the given range. */ private static int squares(int a, int b) { final int highSqrt = (int) Math.floor(Math.sqrt(b)); int lowSqrt = (int) Math.floor(Math.sqrt(a)); if (lowSqrt * lowSqrt == a) lowSqrt--; return highSqrt - lowSqrt; } /** * Main function provided by hacker rank website. * * @param args Arguments of the program. * @throws IOException Thrown when the application is not able to read or write data in the OUTPUT_PATH. */ @SuppressWarnings("Duplicates") public static void main(String[] args) throws IOException { final Scanner scanner = new Scanner(System.in); final FileWriter fileWriter = new FileWriter(System.getenv("OUTPUT_PATH")); final BufferedWriter bufferedWriter = new BufferedWriter(fileWriter); final int q = scanner.nextInt(); for (int qItr = 0; qItr < q; qItr++) { final int a = scanner.nextInt(); final int b = scanner.nextInt(); final int result = squares(a, b); bufferedWriter.write( String.valueOf(result)); bufferedWriter.newLine(); } bufferedWriter.close(); scanner.close(); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
store/cockroach/cockroach_test.go
package cockroach import ( "database/sql" "fmt" "os" "testing" "time" "github.com/kr/pretty" "github.com/micro/go-micro/v3/store" ) func TestSQL(t *testing.T) { if len(os.Getenv("IN_TRAVIS_CI")) != 0 { t.Skip() } connection := fmt.Sprintf( "host=%s port=%d user=%s sslmode=disable dbname=%s", "localhost", 26257, "root", "test", ) db, err := sql.Open("postgres", connection) if err != nil { t.Fatal(err) } if err := db.Ping(); err != nil { t.Skip("store/cockroach: can't connect to db") } db.Close() sqlStore := NewStore( store.Database("testsql"), store.Nodes(connection), ) if err := sqlStore.Init(); err != nil { t.Fatal(err) } keys, err := sqlStore.List() if err != nil { t.Error(err) } else { t.Logf("%# v\n", pretty.Formatter(keys)) } err = sqlStore.Write( &store.Record{ Key: "test", Value: []byte("foo"), }, ) if err != nil { t.Error(err) } err = sqlStore.Write( &store.Record{ Key: "bar", Value: []byte("baz"), }, ) if err != nil { t.Error(err) } err = sqlStore.Write( &store.Record{ Key: "qux", Value: []byte("aasad"), }, ) if err != nil { t.Error(err) } err = sqlStore.Delete("qux") if err != nil { t.Error(err) } err = sqlStore.Write(&store.Record{ Key: "test", Value: []byte("bar"), Expiry: time.Second * 10, }) if err != nil { t.Error(err) } records, err := sqlStore.Read("test") if err != nil { t.Error(err) } t.Logf("%# v\n", pretty.Formatter(records)) if string(records[0].Value) != "bar" { t.Error("Expected bar, got ", string(records[0].Value)) } time.Sleep(11 * time.Second) _, err = sqlStore.Read("test") switch err { case nil: t.Error("Key test should have expired") default: t.Error(err) case store.ErrNotFound: break } sqlStore.Delete("bar") sqlStore.Write(&store.Record{Key: "aaa", Value: []byte("bbb"), Expiry: 5 * time.Second}) sqlStore.Write(&store.Record{Key: "aaaa", Value: []byte("bbb"), Expiry: 5 * time.Second}) sqlStore.Write(&store.Record{Key: "aaaaa", Value: []byte("bbb"), Expiry: 5 * time.Second}) results, err := sqlStore.Read("a", store.ReadPrefix()) if err != nil { t.Error(err) } if len(results) != 3 { t.Fatal("Results should have returned 3 records") } time.Sleep(6 * time.Second) results, err = sqlStore.Read("a", store.ReadPrefix()) if err != nil { t.Error(err) } if len(results) != 0 { t.Fatal("Results should have returned 0 records") } }
[ "\"IN_TRAVIS_CI\"" ]
[]
[ "IN_TRAVIS_CI" ]
[]
["IN_TRAVIS_CI"]
go
1
0
Java/Java-CachingSamples/AirportSample/src/com/ibm/websphere/xs/sample/airport/client/AirportClient.java
/* This sample program is provided AS IS and may be used, executed, copied and modified without royalty payment by customer (a) for its own instruction and study, (b) in order to develop applications designed to run with an IBM WebSphere product, either for customer's own internal use or for redistribution by customer, as part of such an application, in customer's own products. " 5724-J34 (C) COPYRIGHT International Business Machines Corp. 2014 All Rights Reserved * Licensed Materials - Property of IBM */ package com.ibm.websphere.xs.sample.airport.client; import java.io.IOException; import java.util.Properties; import java.util.SortedSet; import java.util.TreeSet; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import com.ibm.websphere.objectgrid.ClientClusterContext; import com.ibm.websphere.objectgrid.ConnectException; import com.ibm.websphere.objectgrid.ObjectGrid; import com.ibm.websphere.objectgrid.ObjectGridException; import com.ibm.websphere.objectgrid.ObjectGridManager; import com.ibm.websphere.objectgrid.ObjectGridManagerFactory; import com.ibm.websphere.objectgrid.ObjectMap; import com.ibm.websphere.objectgrid.Session; import com.ibm.websphere.objectgrid.plugins.TransactionCallbackException; import com.ibm.websphere.objectgrid.security.config.ClientSecurityConfiguration; import com.ibm.websphere.objectgrid.security.config.ClientSecurityConfigurationFactory; import com.ibm.websphere.objectgrid.security.plugins.builtins.UserPasswordCredentialGenerator; import com.ibm.websphere.xs.sample.airport.domain.AirportCodes; import com.ibm.websphere.xs.sample.airport.domain.AirportData; /** * This AirportClient class provides functions * to access Data Cache service * */ public class AirportClient { private String mapName = null; private ObjectGrid ivObjectGrid; private final static String dataServiceMapName = "sample.NONE.P"; private final static String testGridName = "Airports"; private final static String testCSEndpoints = "localhost:2809"; private final static String testMapName = "AirportCodeInfo"; /** * Connects to a remote data cache specified by the passed-in string, or * using VCAP environment variables if the passed-in string is null. * * @param catalogServiceEndpoints * @return */ public void connect(String catalogServiceEndpoints) throws ConnectException { String username = null; String password = null; String endpoint = catalogServiceEndpoints; String gridName = null; // If there is no CS end points given if (endpoint == null) { // Try to get connection information from environment java.util.Map<String, String> env = System.getenv(); String vcap = env.get("VCAP_SERVICES"); if (vcap != null) { try { JSONObject obj = new JSONObject(vcap); String[] names = JSONObject.getNames(obj); if (names != null) { for (String name:names) { if (name.startsWith("DataCache")) { JSONArray val = obj.getJSONArray(name); JSONObject serviceAttr = val.getJSONObject(0); JSONObject credentials = serviceAttr.getJSONObject("credentials"); username = credentials.getString("username"); password = credentials.getString("password"); endpoint = credentials.getString("catalogEndPoint"); gridName = credentials.getString("gridName"); mapName = dataServiceMapName; break; } } } } catch (JSONException e) { System.err.println("Error reading VCAP Variables: " + vcap); } } else { endpoint = testCSEndpoints; gridName = testGridName; mapName = testMapName; } } else { gridName = testGridName; mapName = testMapName; } System.out.println("Endpoints: " + endpoint + ", Gridname: " + gridName + ", MapName: " + mapName); // Create an ObjectGridManager instance. ObjectGridManager ogm = ObjectGridManagerFactory.getObjectGridManager(); // Create client security configuration objects ClientSecurityConfiguration csc = null; if (username != null) { csc = ClientSecurityConfigurationFactory.getClientSecurityConfiguration(); csc.setCredentialGenerator(new UserPasswordCredentialGenerator(username,password)); csc.setSecurityEnabled(true); } // Obtain a ClientClusterContext by connecting to data cache service's catalog server ClientClusterContext ccc = ogm.connect(endpoint, csc, null); // Obtain a distributed ObjectGrid using ObjectGridManager and providing // the ClientClusterContext. ObjectGrid og = ogm.getObjectGrid(ccc, gridName); ivObjectGrid = og; } /** * Return true if the data cache is pre-loaded with airport information * * @return */ public boolean isMapPreloaded() { boolean isPreloaded = false; try { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); // Try to get the airport codes from the data cache. // If it is available, the data cache is pre-loaded String allCodeString = (String) map.get(AirportCodes.ALL_AIRPORT_CODE); if (allCodeString != null && !"".equals(allCodeString)) { isPreloaded = true; } } catch (Exception e) { isPreloaded = false; } return isPreloaded; } /** * pre-load the data cache with airport information. * * @return * @throws ObjectGridException * @throws IOException */ public AirportCodes preloadMap() throws ObjectGridException, IOException { AirportCodes apCodes = new AirportCodes(); Properties ivData = new Properties(); ivData.load(this.getClass().getClassLoader().getResourceAsStream("WEB-INF/airport.props")); if (ivObjectGrid == null) { connect(null); } // Create a session to the specified grid Session sess = ivObjectGrid.getSession(); // Create a map to load data in to ObjectMap map = sess.getMap(mapName); // Load the airport information into the map, entry by entry for (Object key : ivData.keySet()) { String raw = key + "=" + ivData.getProperty((String) key); AirportData data = AirportData.newAirportData(raw); if (data != null) { try { map.insert(key, data); apCodes.addCode(key.toString()); } catch (ObjectGridException e) { System.err.println("Problem inserting " + raw + " into the map. " + e.getClass().getName()); } } } return apCodes; } /** * Return the airport information of the given airport code * * @param airportCode * @return * @throws ObjectGridException */ public AirportData getAirportData(String airportCode) throws ObjectGridException { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); Object o = map.get(airportCode); AirportData data = null; if (o instanceof AirportData) { data = (AirportData) o; } return data; } /** * Add the given airport information to data cache * * @param code * @param data * @throws ObjectGridException */ public void addNewAirport(String code, AirportData data) throws ObjectGridException { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); map.insert(code, data); } /** * Update the given airport information in data cache * * @param code * @param data * @throws ObjectGridException */ public void updateAirport(String code, AirportData data) throws ObjectGridException { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); map.update(code, data); } /** * Delete airport information corresponding the given airport code from data cache * * @param code * @throws ObjectGridException */ public void deleteAirport(String code) throws ObjectGridException { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); map.remove(code); } /** * Add all airport codes into data cache as a single entry * * @param allCodes * @throws ObjectGridException */ public void addAllCodes(AirportCodes allCodes) throws ObjectGridException { if (ivObjectGrid == null) { connect(null); } Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); map.insert(AirportCodes.ALL_AIRPORT_CODE, allCodes.toString()); } /** * Return all airport codes from data cache * * @return * @throws TransactionCallbackException * @throws ObjectGridException */ public SortedSet<String> getAllCodes() throws TransactionCallbackException, ObjectGridException { if (ivObjectGrid == null) { connect(null); } SortedSet<String> allCodes = null; Session session = ivObjectGrid.getSession(); ObjectMap map = session.getMap(mapName); String allCodeString = (String) map.get(AirportCodes.ALL_AIRPORT_CODE); AirportCodes apCode = AirportCodes.load(allCodeString); allCodes = apCode.getAllCodes(); if (allCodes == null) { allCodes = new TreeSet<String>(); } return allCodes; } }
[]
[]
[]
[]
[]
java
0
0
server/data/cache.go
package data import ( "fmt" "log" "os" "strconv" "github.com/go-redis/redis/v8" ) var RS *redis.Client func ConnectCache() { dbn, e := strconv.Atoi(os.Getenv("REDIS_DB")) if e != nil { log.Fatal(e.Error()) } if rdb := redis.NewClient(&redis.Options{ Addr: fmt.Sprintf("%s:%s", os.Getenv("REDIS_HOST"), os.Getenv("REDIS_PORT")), Password: os.Getenv("REDIS_PASSWORD"), DB: dbn, }); rdb == nil { log.Fatal("Failed to connect to redis host") } else { RS = rdb fmt.Println("Successfully connected to Redis server") } }
[ "\"REDIS_DB\"", "\"REDIS_HOST\"", "\"REDIS_PORT\"", "\"REDIS_PASSWORD\"" ]
[]
[ "REDIS_PASSWORD", "REDIS_DB", "REDIS_PORT", "REDIS_HOST" ]
[]
["REDIS_PASSWORD", "REDIS_DB", "REDIS_PORT", "REDIS_HOST"]
go
4
0
src/version.py
__version__ = '0.7.3-dev'
[]
[]
[]
[]
[]
python
null
null
null
utils/config-generator.py
import argparse import collections import json import os import requests import socket from hashlib import blake2b from json.decoder import JSONDecodeError from operator import itemgetter from pathlib import Path from re import sub from pytezos import pytezos from base58 import b58encode_check ACCOUNTS = json.loads(os.environ["ACCOUNTS"]) CHAIN_PARAMS = json.loads(os.environ["CHAIN_PARAMS"]) NODES = json.loads(os.environ["NODES"]) SIGNERS = json.loads(os.environ["SIGNERS"]) OPEN_ACLS = os.environ["OPEN_ACLS"] MY_POD_NAME = os.environ["MY_POD_NAME"] MY_POD_TYPE = os.environ["MY_POD_TYPE"] MY_POD_CONFIG = None ALL_NODES = {} BAKING_NODES = {} for cl, val in NODES.items(): if val != None: for i, inst in enumerate(val["instances"]): name = f"{cl}-{i}" ALL_NODES[name] = inst if name == MY_POD_NAME: MY_POD_CONFIG = inst if "runs" in val: if "baker" in val["runs"]: BAKING_NODES[name] = inst if MY_POD_TYPE == "signing": MY_POD_CONFIG = SIGNERS[MY_POD_NAME] NETWORK_CONFIG = CHAIN_PARAMS["network"] SHOULD_GENERATE_UNSAFE_DETERMINISTIC_DATA = CHAIN_PARAMS.get( "should_generate_unsafe_deterministic_data" ) # If there are no genesis params, this is a public chain. THIS_IS_A_PUBLIC_NET = True if not NETWORK_CONFIG.get("genesis") else False def main(): all_accounts = ACCOUNTS if SHOULD_GENERATE_UNSAFE_DETERMINISTIC_DATA: fill_in_missing_genesis_block() all_accounts = fill_in_missing_baker_accounts() fill_in_missing_keys(all_accounts) import_keys(all_accounts) if MY_POD_NAME in BAKING_NODES: # If this node is a baker, it must have an account with a secret key. verify_this_bakers_account(all_accounts) main_parser = argparse.ArgumentParser() main_parser.add_argument( "--generate-parameters-json", action="store_true", help="generate parameters.json", ) main_parser.add_argument( "--generate-config-json", action="store_true", help="generate config.json" ) main_args = main_parser.parse_args() # Create parameters.json if main_args.generate_parameters_json: print("Starting parameters.json file generation") protocol_parameters = create_protocol_parameters_json(all_accounts) protocol_params_json = json.dumps(protocol_parameters, indent=2) with open("/etc/tezos/parameters.json", "w") as json_file: print(protocol_params_json, file=json_file) # Create config.json if main_args.generate_config_json: print("\nStarting config.json file generation") bootstrap_peers = CHAIN_PARAMS.get("bootstrap_peers", []) my_zerotier_ip = None zerotier_data_file_path = Path("/var/tezos/zerotier_data.json") if is_chain_running_on_zerotier_net(zerotier_data_file_path): my_zerotier_ip = get_my_pods_zerotier_ip(zerotier_data_file_path) if bootstrap_peers == []: bootstrap_peers.extend(get_zerotier_bootstrap_peer_ips()) if THIS_IS_A_PUBLIC_NET: with open("/etc/tezos/data/config.json", "r") as f: bootstrap_peers.extend(json.load(f)["p2p"]["bootstrap-peers"]) else: local_bootstrap_peers = [] for name, settings in ALL_NODES.items(): print(" -- is " + name + " a bootstrap peer?\n") my_pod_fqdn_with_port = f"{socket.getfqdn()}:9732" if ( settings.get("is_bootstrap_node", False) and name not in my_pod_fqdn_with_port ): # Construct the FBN of the bootstrap node for all node's bootstrap_peers print(" -- YES!\n") bootstrap_peer_domain = sub(r"-\d+$", "", name) bootstrap_peer_fbn_with_port = ( f"{name}.{bootstrap_peer_domain}:9732" ) local_bootstrap_peers.append(bootstrap_peer_fbn_with_port) bootstrap_peers.extend(local_bootstrap_peers) if not bootstrap_peers and not MY_POD_CONFIG.get("is_bootstrap_node", False): raise Exception( "ERROR: No bootstrap peers found for this non-bootstrap node" ) config_json = json.dumps( create_node_config_json( bootstrap_peers, my_zerotier_ip, ), indent=2, ) print("Generated config.json :") print(config_json) with open("/etc/tezos/config.json", "w") as json_file: print(config_json, file=json_file) # If NETWORK_CONFIG["genesis"]["block"] hasn't been specified, we generate a # deterministic one. def fill_in_missing_genesis_block(): print("\nEnsure that we have genesis_block") genesis_config = NETWORK_CONFIG["genesis"] genesis_block_placeholder = "YOUR_GENESIS_BLOCK_HASH_HERE" if ( genesis_config.get("block", genesis_block_placeholder) == genesis_block_placeholder ): print("Deterministically generating missing genesis_block") seed = "foo" gbk = blake2b(seed.encode(), digest_size=32).digest() gbk_b58 = b58encode_check(b"\x01\x34" + gbk).decode("utf-8") genesis_config["block"] = gbk_b58 def get_baking_accounts(baker_values): acct = baker_values.get("bake_using_account") accts = baker_values.get("bake_using_accounts") if acct and accts: raise ValueError( 'Mustn\'t specify both "bake_using_account" and "bake_using_accounts"' ) if acct: accts = [acct] return accts # Secret and public keys are matches and need be processed together. Neither key # must be specified, as later code will fill in the details if they are not. # # We create any missing accounts that are refered to by a node at # BAKING_NODES to ensure that all named accounts exist. def fill_in_missing_baker_accounts(): print("\nFilling in any missing baker accounts...") new_accounts = {} init_balance = CHAIN_PARAMS["default_bootstrap_mutez"] for baker_name, baker_values in BAKING_NODES.items(): accts = get_baking_accounts(baker_values) if not accts: print(f"Defaulting to baking with account: {baker_name}") accts = [baker_name] baker_values["bake_using_account"] = None baker_values["bake_using_accounts"] = accts for acct in accts: if acct not in ACCOUNTS: print(f"Creating account: {acct}") new_accounts[acct] = { "bootstrap_balance": init_balance, "is_bootstrap_baker_account": True, } return {**new_accounts, **ACCOUNTS} # Verify that the current baker has a baker account with secret key def verify_this_bakers_account(accounts): accts = get_baking_accounts(MY_POD_CONFIG) if not accts or len(accts) < 1: raise Exception("ERROR: No baker accounts specified") for acct in accts: if not accounts.get(acct): raise Exception(f"ERROR: No account named {acct} found.") # We can count on accounts[acct]["type"] because import_keys will # fill it in when it is missing. if accounts[acct]["type"] != "secret": raise Exception(f"ERROR: Either a secret key was not provided for {acct}") # # import_keys() creates three files in /var/tezos/client which specify # the keys for each of the accounts: secret_keys, public_keys, and # public_key_hashs. # # We iterate over fill_in_missing_baker_accounts() which ensures that we # have a full set of accounts for which to write keys. # # If the account has a private key specified, we parse it and use it to # derive the public key and its hash. If a public key is also specified, # we check to ensure that it matches the secret key. If neither a secret # nor a public key are specified, then we derive one from a hash of # the account name and the gensis_block (which may be generated above.) # # Both specified and generated keys are stable for the same _values.yaml # files. The specified keys for obvious reasons. The generated keys # are stable because we take care not to use any information that is not # specified in the _values.yaml file in the seed used to generate them. # # import_keys() also fills in "pk" and "pkh" as the public key and # public key hash as a side-effect. These are used later. edsk = b"\x0d\x0f\x3a\x07" def fill_in_missing_keys(all_accounts): print("\nFill in missing keys") for account_name, account_values in all_accounts.items(): account_key_type = account_values.get("type") account_key = account_values.get("key") if account_key == None and account_key_type != None: raise Exception( f"ERROR: {account_name} specifies " + f"type {account_key_type} without " + f"a key" ) if account_key == None: print( f" Deriving secret key for account " + f"{account_name} from genesis_block" ) seed = account_name + ":" + NETWORK_CONFIG["genesis"]["block"] sk = blake2b(seed.encode(), digest_size=32).digest() sk_b58 = b58encode_check(edsk + sk).decode("utf-8") account_values["key"] = sk_b58 account_values["type"] = "secret" # # expose_secret_key() decides if an account needs to have its secret # key exposed on the current pod. It returns the obvious Boolean. def expose_secret_key(account_name): if MY_POD_TYPE == "activating": return NETWORK_CONFIG["activation_account_name"] == account_name if MY_POD_TYPE == "signing": return account_name in MY_POD_CONFIG.get("sign_for_accounts") if MY_POD_TYPE == "node": if MY_POD_CONFIG.get("bake_using_account", "") == account_name: return True return account_name in MY_POD_CONFIG.get("bake_using_accounts", {}) return False # # pod_requires_secret_key() decides if a pod requires the secret key, # regardless of a remote_signer being present. E.g. the remote signer # needs to have the keys not a URL to itself. def pod_requires_secret_key(account_name): return MY_POD_TYPE in ["activating", "signing"] # # remote_signer() picks the first signer, if any, that claims to sign # for account_name and returns a URL to locate it. def remote_signer(account_name, key): for k, v in SIGNERS.items(): if account_name in v["sign_for_accounts"]: return f"http://{k}.tezos-signer:6732/{key.public_key_hash()}" return None def import_keys(all_accounts): print("\nImporting keys") tezdir = "/var/tezos/client" secret_keys = [] public_keys = [] public_key_hashs = [] for account_name, account_values in all_accounts.items(): print("\n Importing keys for account: " + account_name) account_key_type = account_values.get("type") account_key = account_values.get("key") if account_key == None: raise Exception(f"{account_name} defined w/o a key") key = pytezos.key.from_encoded_key(account_key) try: key.secret_key() except ValueError: account_values["type"] = "public" if account_key_type == "secret": raise ValueError( account_name + "'s key marked as " + "secret, but it is public" ) else: account_values["type"] = "secret" if account_key_type == "public": raise ValueError( account_name + "'s key marked as " + "public, but it is secret" ) # restrict which private key is exposed to which pod if expose_secret_key(account_name): sk = remote_signer(account_name, key) if sk == None or pod_requires_secret_key(account_name): try: sk = "unencrypted:" + key.secret_key() except ValueError: raise ("Secret key required but not provided.") print(" Appending secret key") else: print(" Using remote signer: " + sk) secret_keys.append({"name": account_name, "value": sk}) pk_b58 = key.public_key() print(f" Appending public key: {pk_b58}") public_keys.append( { "name": account_name, "value": {"locator": "unencrypted:" + pk_b58, "key": pk_b58}, } ) account_values["pk"] = pk_b58 pkh_b58 = key.public_key_hash() print(f" Appending public key hash: {pkh_b58}") public_key_hashs.append({"name": account_name, "value": pkh_b58}) account_values["pkh"] = pkh_b58 # XXXrcd: fix this print! print(f" Account key type: {account_values.get('type')}") print( f" Account bootstrap balance: " + f"{account_values.get('bootstrap_balance')}" ) print( f" Is account a bootstrap baker: " + f"{account_values.get('is_bootstrap_baker_account', False)}" ) print("\n Writing " + tezdir + "/secret_keys") json.dump(secret_keys, open(tezdir + "/secret_keys", "w"), indent=4) print(" Writing " + tezdir + "/public_keys") json.dump(public_keys, open(tezdir + "/public_keys", "w"), indent=4) print(" Writing " + tezdir + "/public_key_hashs") json.dump(public_key_hashs, open(tezdir + "/public_key_hashs", "w"), indent=4) # # get_genesis_accounts_pubkey_and_balance(accounts) returns a list # of lists: [ [key1, balance2], [key2, balance2], ... ] for all of # the accounts prepopulated on our new chain. Currently, if a public # key is provided then the account is signed up as a baker from the # start. If just a public key hash is provided, then it is not. We # use a public key if the property "is_bootstrap_baker_account" is # either absent or true. def get_genesis_accounts_pubkey_and_balance(accounts): pubkey_and_balance_pairs = [] for v in accounts.values(): if "bootstrap_balance" in v and v["bootstrap_balance"] != "0": if not v.get("is_bootstrap_baker_account", True): key = v.get("pkh") else: key = v.get("pk") pubkey_and_balance_pairs.append([key, v["bootstrap_balance"]]) return pubkey_and_balance_pairs # # commitments and bootstrap_accounts are not part of # `CHAIN_PARAMS["protocol_parameters"]`. The commitment size for Florence was # too large to load from Helm to k8s. So we are mounting a file containing them. # bootstrap accounts always needs massaging so they are passed as arguments. def create_protocol_parameters_json(accounts): """Create the protocol's parameters.json file""" pubkeys_with_balances = get_genesis_accounts_pubkey_and_balance(accounts) protocol_activation = CHAIN_PARAMS["protocol_activation"] protocol_params = protocol_activation["protocol_parameters"] protocol_params["bootstrap_accounts"] = pubkeys_with_balances print(json.dumps(protocol_activation, indent=4)) # genesis contracts and commitments are downloaded from a http location (like a bucket) # they are typically too big to be passed directly to helm if protocol_activation.get("bootstrap_contract_urls"): protocol_params["bootstrap_contracts"] = [] for url in protocol_activation["bootstrap_contract_urls"]: print(f"Injecting bootstrap contract from {url}") protocol_params["bootstrap_contracts"].append(requests.get(url).json()) if protocol_activation.get("commitments_url"): print( f"Injecting commitments (faucet account precursors) from {protocol_activation['commitments_url']}" ) protocol_params["commitments"] = requests.get( protocol_activation["commitments_url"] ).json() return protocol_params def is_chain_running_on_zerotier_net(file): return file.is_file() def get_my_pods_zerotier_ip(zerotier_data_file_path): with open(zerotier_data_file_path, "r") as f: return json.load(f)[0]["assignedAddresses"][0].split("/")[0] def get_zerotier_bootstrap_peer_ips(): with open("/var/tezos/zerotier_network_members.json", "r") as f: network_members = json.load(f) return [ n["config"]["ipAssignments"][0] for n in network_members if "ipAssignments" in n["config"] and n["name"] == f"{CHAIN_PARAMS['network']['chain_name']}_bootstrap" ] def get_genesis_pubkey(): with open("/var/tezos/client/public_keys", "r") as f: pubkeys = json.load(f) genesis_pubkey = None for _, pubkey in enumerate(pubkeys): if pubkey["name"] == NETWORK_CONFIG["activation_account_name"]: genesis_pubkey = pubkey["value"]["key"] break if not genesis_pubkey: raise Exception("ERROR: Couldn't find the genesis_pubkey") return genesis_pubkey def recursive_update(d, u): """ Recursive dict update Used to merge node's config passed as chart values and computed values https://stackoverflow.com/a/3233356/207209 """ for k, v in u.items(): if isinstance(v, collections.abc.Mapping): d[k] = recursive_update(d.get(k, {}), v) else: d[k] = v return d def create_node_config_json( bootstrap_peers, net_addr=None, ): """Create the node's config.json file""" values_node_config = MY_POD_CONFIG.get("config", {}) computed_node_config = { "data-dir": "/var/tezos/node/data", "rpc": { "listen-addrs": [f"{os.getenv('MY_POD_IP')}:8732", "127.0.0.1:8732"], }, "p2p": { "bootstrap-peers": bootstrap_peers, "listen-addr": (net_addr + ":9732" if net_addr else "[::]:9732"), }, # "log": {"level": "debug"}, } if OPEN_ACLS == "true": computed_node_config["rpc"]["acl"] = [ { "address": os.getenv('MY_POD_IP'), "blacklist": [] } ] node_config = recursive_update(values_node_config, computed_node_config) if THIS_IS_A_PUBLIC_NET: # `tezos-node config --network ...` will have been run in config-init.sh # producing a config.json. The value passed to the `--network` flag may # have been the chain name or a url to the config.json of the chain. # Either way, set the `network` field here as the `network` object of the # produced config.json. with open("/etc/tezos/data/config.json", "r") as f: node_config["network"] = json.load(f)["network"] else: if CHAIN_PARAMS.get("expected-proof-of-work") != None: node_config["p2p"]["expected-proof-of-work"] = CHAIN_PARAMS[ "expected-proof-of-work" ] node_config["network"] = NETWORK_CONFIG node_config["network"]["sandboxed_chain_name"] = "SANDBOXED_TEZOS" node_config["network"]["default_bootstrap_peers"] = [] node_config["network"]["genesis_parameters"] = { "values": {"genesis_pubkey": get_genesis_pubkey()} } node_config["network"].pop("activation_account_name") return node_config if __name__ == "__main__": main()
[]
[]
[ "SIGNERS", "MY_POD_IP", "OPEN_ACLS", "MY_POD_TYPE", "ACCOUNTS", "MY_POD_NAME", "CHAIN_PARAMS", "NODES" ]
[]
["SIGNERS", "MY_POD_IP", "OPEN_ACLS", "MY_POD_TYPE", "ACCOUNTS", "MY_POD_NAME", "CHAIN_PARAMS", "NODES"]
python
8
0
templates/root.go
package cmd import ( "fmt" "os" {{ if .CLI.EnablePProf }} "log" "runtime/pprof" {{end}} {{ if .CLI.CustomHelp }} "strings" {{ end }} "github.com/hofstadter-io/hof/script/runtime" "github.com/spf13/cobra" {{ if .CLI.Imports }} {{ range $i, $I := .CLI.Imports }} {{ $I.As }} "{{ $I.Path }}" {{ end }} {{ end }} {{/* hack */}} {{ if .CLI.Flags }} "{{ .CLI.Package }}/flags" {{ else if .CLI.Pflags }} "{{ .CLI.Package }}/flags" {{ else if .CLI.Topics }} "{{ .CLI.Package }}/flags" {{ else if .CLI.Examples }} "{{ .CLI.Package }}/flags" {{ else if .CLI.Tutorials }} "{{ .CLI.Package }}/flags" {{ end }} {{ if .CLI.Telemetry }} "{{ .CLI.Package }}/ga" {{end}} ) {{ if .CLI.Long }} var {{ .CLI.Name }}Long = `{{ .CLI.Long }}` {{ end }} {{ template "flag-init" .CLI }} {{ template "pflag-init" .CLI }} {{ if .CLI.PersistentPrerun }} func RootPersistentPreRun({{- template "lib-args.go" . -}}) (err error) { {{ if .CLI.PersistentPrerunBody }} {{ .CLI.PersistentPrerunBody }} {{ end }} return err } {{ end }} {{ if .CLI.Prerun }} func RootPreRun({{- template "lib-args.go" . -}}) (err error) { {{ if .CLI.PrerunBody }} {{ .CLI.PrerunBody }} {{ end }} return err } {{ end }} {{ if not .CLI.OmitRun}} func RootRun({{ template "lib-args.go" . -}}) (err error) { {{ if .CLI.Body}} {{ .CLI.Body}} {{ end }} return err } {{ end }} {{ if or .CLI.PersistentPostrun .CLI.Updates}} func RootPersistentPostRun({{- template "lib-args.go" . -}}) (err error) { {{ if .CLI.Updates }} WaitPrintUpdateAvailable() {{ end }} {{ if .CLI.PersistentPostrunBody}} {{ .CLI.PersistentPostrunBody}} {{ end }} return err } {{ end }} {{ if .CLI.Postrun}} func RootPostRun({{- template "lib-args.go" . -}}) (err error) { {{ if .CLI.PostrunBody }} {{ .CLI.PostrunBody }} {{ end }} return err } {{ end }} var RootCmd = &cobra.Command{ {{ if .CLI.Usage}} Use: "{{ .CLI.Usage }}", {{ else }} Use: "{{ .CLI.Name }}", {{ end }} {{ if .CLI.Short}} Short: "{{ .CLI.Short }}", {{ end }} {{ if .CLI.Long }} Long: {{ .CLI.Name }}Long, {{ end }} {{ if .CLI.PersistentPrerun }} PersistentPreRun: func(cmd *cobra.Command, args []string) { var err error {{ template "args-parse" .CLI.Args }} err = RootPersistentPreRun({{ template "lib-call.go" .CLI.Args }}) if err != nil { fmt.Println(err) os.Exit(1) } }, {{ end }} {{ if or .CLI.Prerun .CLI.Telemetry}} PreRun: func(cmd *cobra.Command, args []string) { {{ if .CLI.Telemetry }} ga.SendCommandPath("root") {{ end }} {{ if .CLI.Prerun}} var err error {{ template "args-parse" .CLI.Args }} err = RootPreRun({{ template "lib-call.go" .CLI.Args }}) if err != nil { fmt.Println(err) os.Exit(1) } {{ end }} }, {{ end }} {{ if not .CLI.OmitRun}} Run: func(cmd *cobra.Command, args []string) { var err error {{ template "args-parse" .CLI.Args }} err = RootRun({{ template "lib-call.go" .CLI.Args }}) if err != nil { fmt.Println(err) os.Exit(1) } }, {{ end }} {{ if or .CLI.PersistentPostrun .CLI.Updates }} PersistentPostRun: func(cmd *cobra.Command, args []string) { var err error {{ template "args-parse" .CLI.Args }} err = RootPersistentPostRun({{ template "lib-call.go" .CLI.Args }}) if err != nil { fmt.Println(err) os.Exit(1) } }, {{ end }} {{ if .CLI.Postrun }} PostRun: func(cmd *cobra.Command, args []string) { var err error {{ template "args-parse" .CLI.Args }} err = RootPostRun({{ template "lib-call.go" .CLI.Args }}) if err != nil { fmt.Println(err) os.Exit(1) } }, {{ end }} } func RootInit() { extra := func(cmd *cobra.Command) bool { {{ if .CLI.Topics }} if flags.PrintSubject("Topics", " ", flags.RootPflags.Topic, RootTopics) { return true } {{ end }} {{ if .CLI.Examples }} if flags.PrintSubject("Examples", " ", flags.RootPflags.Example, RootExamples) { return true } {{ end }} {{ if .CLI.Tutorials }} if flags.PrintSubject("Tutorials", " ", flags.RootPflags.Tutorial, RootTutorials) { return true } {{ end }} return false } {{ if .CLI.CustomHelp }} help := func (cmd *cobra.Command, args []string) { if extra(cmd) { return } fu := RootCmd.Flags().FlagUsages() rh := strings.Replace(RootCustomHelp, "<<flag-usage>>", fu, 1) fmt.Println(rh) } usage := func(cmd *cobra.Command) error { if extra(cmd) { return nil } fu := RootCmd.Flags().FlagUsages() rh := strings.Replace(RootCustomHelp, "<<flag-usage>>", fu, 1) fmt.Println(rh) return fmt.Errorf("unknown {{ .CLI.cliName }} command") } {{ else }} ohelp := RootCmd.HelpFunc() ousage := RootCmd.UsageFunc() help := func (cmd *cobra.Command, args []string) { if extra(cmd) { return } ohelp(cmd, args) } usage := func(cmd *cobra.Command) error { if extra(cmd) { return nil } return ousage(cmd) } {{ end }} {{ if .CLI.Telemetry }} thelp := func (cmd *cobra.Command, args []string) { if RootCmd.Name() == cmd.Name() { ga.SendCommandPath("root help") } help(cmd, args) } tusage := func (cmd *cobra.Command) error { if RootCmd.Name() == cmd.Name() { ga.SendCommandPath("root usage") } return usage(cmd) } RootCmd.SetHelpFunc(thelp) RootCmd.SetUsageFunc(tusage) {{ else }} RootCmd.SetHelpFunc(help) RootCmd.SetUsageFunc(usage) {{ end }} {{if .CLI.Updates}} RootCmd.AddCommand(UpdateCmd) {{end}} {{if .CLI.VersionCommand}} RootCmd.AddCommand(VersionCmd) {{end}} {{if .CLI.CompletionCommands}} RootCmd.AddCommand(CompletionCmd) {{end}} {{if .CLI.Commands}} {{range $i, $C := .CLI.Commands }} RootCmd.AddCommand({{ $C.CmdName }}Cmd) {{- end }} {{ end }} } func RunExit() { if err := RunErr(); err != nil { fmt.Println(err) os.Exit(1) } } func RunInt() int { if err := RunErr(); err != nil { fmt.Println(err) return 1 } return 0 } func RunErr() error { {{ if .CLI.EnablePProf }} if fn := os.Getenv("{{.CLI.CLI_NAME}}_CPU_PROFILE"); fn != "" { f, err := os.Create(fn) if err != nil { log.Fatal("Could not create file for CPU profile:", err) } defer f.Close() err = pprof.StartCPUProfile(f) if err != nil { log.Fatal("Could not start CPU profile process:", err) } defer pprof.StopCPUProfile() } {{ end }} RootInit() return RootCmd.Execute() } func CallTS(ts *runtime.Script, args[]string) error { RootCmd.SetArgs(args) err := RootCmd.Execute() ts.Check(err) return err } {{ if .CLI.CustomHelp }} const RootCustomHelp = `{{ .CLI.CustomHelp }}` {{ end }} {{ if .CLI.Topics }} var RootTopics = map[string]string { {{- range $k, $v := .CLI.Topics }} "{{ $k }}": `{{ replace $v "`" "¡" -1 }}`, {{- end}} } {{ end }} {{ if .CLI.Examples }} var RootExamples = map[string]string { {{- range $k, $v := .CLI.Examples }} "{{ $k }}": `{{ replace $v "`" "¡" -1 }}`, {{- end}} } {{ end }} {{ if .CLI.Tutorials }} var RootTutorials = map[string]string { {{- range $k, $v := .CLI.Tutorials }} "{{ $k }}": `{{ replace $v "`" "¡" -1 }}`, {{- end}} } {{ end }}
[ "\"{{.CLI.CLI_NAME}}_CPU_PROFILE\"" ]
[]
[ "{{.CLI.CLI_NAME}}_CPU_PROFILE" ]
[]
["{{.CLI.CLI_NAME}}_CPU_PROFILE"]
go
1
0
chezmoi2/internal/chezmoitest/chezmoitest.go
package chezmoitest import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "testing" "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" "github.com/twpayne/go-vfs" "github.com/twpayne/go-vfs/vfst" "github.com/twpayne/chezmoi/chezmoi2/internal/chezmoilog" ) var ( agePublicKeyRx = regexp.MustCompile(`(?m)^Public key: ([0-9a-z]+)\s*$`) gpgKeyMarkedAsUltimatelyTrustedRx = regexp.MustCompile(`(?m)^gpg: key ([0-9A-F]+) marked as ultimately trusted\s*$`) ) // AGEGenerateKey generates and returns an age public key and the path to the // private key. If filename is non-zero then the private key is written to it, // otherwise a new file is created in a temporary directory and the caller is // responsible for removing the temporary directory. func AGEGenerateKey(filename string) (publicKey, privateKeyFile string, err error) { if filename == "" { var tempDir string tempDir, err = ioutil.TempDir("", "chezmoi-test-age-key") if err != nil { return "", "", err } defer func() { if err != nil { os.RemoveAll(tempDir) } }() if runtime.GOOS != "windows" { if err = os.Chmod(tempDir, 0o700); err != nil { return } } filename = filepath.Join(tempDir, "key.txt") } privateKeyFile = filename var output []byte cmd := exec.Command("age-keygen", "--output", privateKeyFile) output, err = chezmoilog.LogCmdCombinedOutput(log.Logger, cmd) if err != nil { return } match := agePublicKeyRx.FindSubmatch(output) if match == nil { err = fmt.Errorf("public key not found in %q", output) return } publicKey = string(match[1]) return } // GPGCommand returns the GPG command, if it can be found. func GPGCommand() (string, error) { return exec.LookPath("gpg") } // GPGGenerateKey generates and returns a GPG key in homeDir. func GPGGenerateKey(command, homeDir string) (key, passphrase string, err error) { //nolint:gosec passphrase = "chezmoi-test-gpg-passphrase" cmd := exec.Command( command, "--batch", "--homedir", homeDir, "--no-tty", "--passphrase", passphrase, "--pinentry-mode", "loopback", "--quick-generate-key", "chezmoi-test-gpg-key", ) output, err := chezmoilog.LogCmdCombinedOutput(log.Logger, cmd) if err != nil { return "", "", err } submatch := gpgKeyMarkedAsUltimatelyTrustedRx.FindSubmatch(output) if submatch == nil { return "", "", fmt.Errorf("key not found in %q", output) } return string(submatch[1]), passphrase, nil } // GitHubActionsOnWindows returns if running in GitHub Actions on Windows. func GitHubActionsOnWindows() bool { return runtime.GOOS == "windows" && os.Getenv("GITHUB_ACTIONS") == "true" } // HomeDir returns the home directory. func HomeDir() string { switch runtime.GOOS { case "windows": return "C:/home/user" default: return "/home/user" } } // JoinLines joins lines with newlines. func JoinLines(lines ...string) string { return strings.Join(lines, "\n") + "\n" } // SkipUnlessGOOS calls t.Skip() if name does not match runtime.GOOS. func SkipUnlessGOOS(t *testing.T, name string) { t.Helper() switch { case strings.HasSuffix(name, "_windows") && runtime.GOOS != "windows": t.Skip("skipping Windows test on UNIX") case strings.HasSuffix(name, "_unix") && runtime.GOOS == "windows": t.Skip("skipping UNIX test on Windows") } } // WithTestFS calls f with a test filesystem populated with root. func WithTestFS(t *testing.T, root interface{}, f func(fs vfs.FS)) { t.Helper() fs, cleanup, err := vfst.NewTestFS(root) require.NoError(t, err) t.Cleanup(cleanup) f(fs) }
[ "\"GITHUB_ACTIONS\"" ]
[]
[ "GITHUB_ACTIONS" ]
[]
["GITHUB_ACTIONS"]
go
1
0
src/main/java/com/newrelic/as400/GetMemoryStatus.java
// ---------------------------------------------------------------------- // // Name: GetMemoryStatus // // ---------------------------------------------------------------------- // Modification Log // ---------------------------------------------------------------------- // 9/2/2020 - Initial creation - EK // ---------------------------------------------------------------------- /////////////////////////////////////////////////////////////////////////// package com.newrelic.as400; import java.io.*; import java.nio.ByteBuffer; import java.util.*; import com.ibm.as400.access.*; public class GetMemoryStatus { private static ProgramCall s_getSystemStatus = null; private static boolean s_getSystemStatus_Result = false; // SSTS0400 Common Header Area private static int s_ssts0400_bytesAvailable; // offset: 0 private static int s_ssts0400_bytesReturned; // offset: 4 private static Date s_ssts0400_currentDateAndTime; // offset: 8 private static String s_ssts0400_systemName; // offset: 16 private static String s_ssts0400_elapsedTime; // offset: 24 private static int s_ssts0400_mainStorageSize; // offset: 32 private static int s_ssts0400_minimumMachinePoolSize; // offset: 36 private static int s_ssts0400_minimumBasePoolSize; // offset: 40 private static int s_ssts0400_numberOfPools; // offset: 44 private static int s_ssts0400_offsetToPoolInformation; // offset: 48 private static int s_ssts0400_lengthOfPoolInformationEntry; // offset: 52 private static Double s_ssts0400_mainStorageSize_long; // offset: 56 private static Double s_ssts0400_minimumMachinePoolSize_long; // offset: 64 private static Double s_ssts0400_minimumBasePoolSize_long; // offset: 72 private static SSTS0400_PoolEntry[] s_ssts0400_poolEntries; public static void main(String[] args) throws AS400SecurityException, ErrorCompletingRequestException, InterruptedException, IOException, ObjectDoesNotExistException { String strAs400 = System.getenv("AS400HOST"); String strUser = System.getenv("USERID"); String strPass = System.getenv("PASSWD"); AS400 as400 = new AS400(strAs400, strUser, strPass); try { // Run the program then sleep. We run the program twice because // the first set of results are inflated. If we discard the first // set of results and run the command again five seconds later the // number will be more accurate. byte[] as400Data;// = retrieveSystemStatus(as400, "SSTS0400", 2400, true); Thread.sleep(5000); // Run the program as400Data = retrieveSystemStatus(as400, "SSTS0400", 2400, false); if (s_getSystemStatus_Result != true) { // If the program did not run get the list of error messages // from the program object and display the messages. The error // would be something like program-not-found or not-authorized // to the program. if (s_getSystemStatus != null) { AS400Message[] msgList = s_getSystemStatus.getMessageList(); System.err.println("The program did not run. Server messages:"); for (int i=0; i<msgList.length; i++) { System.err.println(msgList[i].getText()); } } else { System.err.println("The program did not run. Unknown reason."); } System.exit(-1); } // Else the program did run. else { getFormat0400_Data(as400, as400Data); } String strJson = getJsonText(); System.out.println(strJson); // This program is done running program so disconnect from // the command server on the server. Program call and command // call use the same server on the server. as400.disconnectService(AS400.COMMAND); } catch (Exception e) { // If any of the above operations failed say the program failed // and output the exception. System.err.println("Program call failed"); System.err.println(e); } } private static byte[] retrieveSystemStatus(AS400 as400, String format, int bufferLth, boolean resetStats) throws AS400SecurityException, ErrorCompletingRequestException, InterruptedException, IOException, ObjectDoesNotExistException { try { s_getSystemStatus_Result = false; // Create the path to the program. QSYSObjectPathName programName = new QSYSObjectPathName("QSYS", "QWCRSSTS", "PGM"); // Create the program call object. Associate the object with the // AS400 object that represents the server we get status from. //ProgramCall getSystemStatus = new ProgramCall(as400); s_getSystemStatus = new ProgramCall(as400); // Create the program parameter list. This program has five // parameters that will be added to this list. int numOfParms = 5; if (format.equals("SSTS0400")) { numOfParms = 7; } ProgramParameter[] parmlist = new ProgramParameter[numOfParms]; // The server program returns data in parameter 1. It is an output // parameter. Allocate bufferLth bytes for this parameter. parmlist[0] = new ProgramParameter( bufferLth ); // Parameter 2 is the buffer size of parm 1. It is a numeric input // parameter. Sets its value to bufferLth, convert it to the server format, // then add the parm to the parm list. AS400Bin4 bin4 = new AS400Bin4( ); int iStatusLength = bufferLth; byte[] statusLength = bin4.toBytes( iStatusLength ); parmlist[1] = new ProgramParameter( statusLength ); // Parameter 3 is the status-format parameter. It is a string input // parameter. Set the string value, convert it to the server format, // then add the parameter to the parm list. AS400Text text1 = new AS400Text(8, as400); byte[] statusFormat = text1.toBytes(format); parmlist[2] = new ProgramParameter( statusFormat ); // Parameter 4 is the reset-statistics parameter. It is a string input // parameter. Set the string value, convert it to the server format, // then add the parameter to the parm list. AS400Text text3 = new AS400Text(10, as400); byte[] resetStatsParm; if (resetStats) { resetStatsParm = text3.toBytes("*YES"); } else { resetStatsParm = text3.toBytes("*NO "); } parmlist[3] = new ProgramParameter( resetStatsParm ); // Parameter 5 is the error info parameter. It is an input/output // parameter. Add it to the parm list. byte[] errorInfo = new byte[32]; parmlist[4] = new ProgramParameter( errorInfo, 0 ); if (format.equals("SSTS0400")) { byte[] poolSelectionInfo = new byte[24]; AS400Text text4 = new AS400Text(10, as400); byte[] typeOfPool = text4.toBytes("*SYSTEM"); AS400Text text5 = new AS400Text(10, as400); byte[] sharedPoolName = text5.toBytes(""); bin4 = new AS400Bin4(); int returnAllPools = -1; byte[] systemPoolIdentifier = bin4.toBytes(returnAllPools); System.arraycopy(typeOfPool, 0, poolSelectionInfo, 0, 10); System.arraycopy(sharedPoolName, 0, poolSelectionInfo, 10, 10); System.arraycopy(systemPoolIdentifier, 0, poolSelectionInfo, 20, 4); parmlist[5] = new ProgramParameter( poolSelectionInfo ); bin4 = new AS400Bin4(); byte[] sizeOfPoolSelectionInfo = bin4.toBytes(24); parmlist[6] = new ProgramParameter( sizeOfPoolSelectionInfo ); } // Set the program to call and the parameter list to the program // call object. s_getSystemStatus.setProgram(programName.getPath(), parmlist ); // Run the program s_getSystemStatus_Result = s_getSystemStatus.run(); if (s_getSystemStatus_Result != true) { // If the program did not run get the list of error messages // from the program object and display the messages. The error // would be something like program-not-found or not-authorized // to the program. AS400Message[] msgList = s_getSystemStatus.getMessageList(); System.err.println("The program did not run. Server messages:"); for (int i=0; i<msgList.length; i++) { System.err.println(msgList[i].getText()); } return null; } // Else the program did run. else { ///////////////////////////////////////////////////// // Get the results of the program. Output data is in // a byte array in the first parameter. byte[] as400Data = parmlist[0].getOutputData(); return as400Data; } } catch (Exception e) { // If any of the above operations failed say the program failed // and output the exception. System.err.println("Program call failed"); System.err.println(e); return null; } } private static void getFormat0400_Data(AS400 as400, byte[] as400Data) throws AS400SecurityException, ErrorCompletingRequestException, InterruptedException, IOException, ObjectDoesNotExistException { Integer anInteger; AS400Bin4 as400Int = new AS400Bin4(); AS400Text as400Text; byte[] ba8; // int s_ssts0400_bytesAvailable; // offset: 0 anInteger = (Integer)as400Int.toObject(as400Data, 0); s_ssts0400_bytesAvailable = anInteger.intValue(); // int s_ssts0400_bytesReturned; // offset: 4 anInteger = (Integer)as400Int.toObject(as400Data, 4); s_ssts0400_bytesReturned = anInteger.intValue(); // Date s_ssts0400_currentDateAndTime; // offset: 8 byte[] timeStamp = new byte[8]; for (int x=0; x < 8; x++) { timeStamp[x] = as400Data[8+x]; } DateTimeConverter dtConverter = new DateTimeConverter(as400); s_ssts0400_currentDateAndTime = dtConverter.convert(timeStamp, "*DTS"); // String s_ssts0400_systemName; // offset: 16 as400Text = new AS400Text(8, as400 ); s_ssts0400_systemName = ((String)as400Text.toObject(as400Data, 16)).trim(); // String s_ssts0400_elapsedTime; // offset: 24 as400Text = new AS400Text(6, as400 ); s_ssts0400_elapsedTime = (String)as400Text.toObject(as400Data, 24); // int s_ssts0400_mainStorageSize; // offset: 32 anInteger = (Integer)as400Int.toObject(as400Data, 32); s_ssts0400_mainStorageSize = anInteger.intValue(); // int s_ssts0400_minimumMachinePoolSize; // offset: 36 anInteger = (Integer)as400Int.toObject(as400Data, 36); s_ssts0400_minimumMachinePoolSize = anInteger.intValue(); // int s_ssts0400_minimumBasePoolSize; // offset: 40 anInteger = (Integer)as400Int.toObject(as400Data, 40); s_ssts0400_minimumBasePoolSize = anInteger.intValue(); // int s_ssts0400_numberOfPools; // offset: 44 anInteger = (Integer)as400Int.toObject(as400Data, 44); s_ssts0400_numberOfPools = anInteger.intValue(); // int s_ssts0400_offsetToPoolInformation; // offset: 48 anInteger = (Integer)as400Int.toObject(as400Data, 48); s_ssts0400_offsetToPoolInformation = anInteger.intValue(); // int s_ssts0400_lengthOfPoolInformationEntry; // offset: 52 anInteger = (Integer)as400Int.toObject(as400Data, 52); s_ssts0400_lengthOfPoolInformationEntry = anInteger.intValue(); // Double s_ssts0400_mainStorageSize_long; // offset: 56 ba8 = new byte[8]; for (int y=0; y < 8; y++) { ba8[y] = as400Data[56+y]; } s_ssts0400_mainStorageSize_long = toDouble(ba8); // Double s_ssts0400_minimumMachinePoolSize_long; // offset: 64 ba8 = new byte[8]; for (int y=0; y < 8; y++) { ba8[y] = as400Data[64+y]; } s_ssts0400_minimumMachinePoolSize_long = toDouble(ba8); // Double s_ssts0400_minimumBasePoolSize_long; // offset: 72 ba8 = new byte[8]; for (int y=0; y < 8; y++) { ba8[y] = as400Data[72+y]; } s_ssts0400_minimumBasePoolSize_long = toDouble(ba8); int numOfPoolEntries = (s_ssts0400_bytesReturned - s_ssts0400_offsetToPoolInformation) / s_ssts0400_lengthOfPoolInformationEntry; //System.err.println("Number Of SSTS0400 Pool Entries:" + numOfPoolEntries); if (numOfPoolEntries > 0) { s_ssts0400_poolEntries = new SSTS0400_PoolEntry[numOfPoolEntries]; int x = s_ssts0400_offsetToPoolInformation; for (int entry = 0; entry < numOfPoolEntries; entry++) { // SSTS0400 Pool Information (repeats) SSTS0400_PoolEntry poolEntry = new SSTS0400_PoolEntry(); // int m_ssts0400_systemPool; // offset: 0 anInteger = (Integer)as400Int.toObject(as400Data, x+0); poolEntry.m_ssts0400_systemPool = anInteger.intValue(); // int m_ssts0400_poolSize; // offset: 4 anInteger = (Integer)as400Int.toObject(as400Data, x+4); poolEntry.m_ssts0400_poolSize = anInteger.intValue(); // int m_ssts0400_reservedSize; // offset: 8 anInteger = (Integer)as400Int.toObject(as400Data, x+8); poolEntry.m_ssts0400_reservedSize = anInteger.intValue(); // int m_ssts0400_maximumActiveThreads; // offset: 12 anInteger = (Integer)as400Int.toObject(as400Data, x+12); poolEntry.m_ssts0400_maximumActiveThreads = anInteger.intValue(); // float m_ssts0400_databaseFaults; // offset: 16 anInteger = (Integer)as400Int.toObject(as400Data, x+16); poolEntry.m_ssts0400_databaseFaults = (float)anInteger / 10; // float m_ssts0400_databasePages; // offset: 20 anInteger = (Integer)as400Int.toObject(as400Data, x+20); poolEntry.m_ssts0400_databasePages = (float)anInteger / 10; // float m_ssts0400_nondatabaseFaults; // offset: 24 anInteger = (Integer)as400Int.toObject(as400Data, x+24); poolEntry.m_ssts0400_nondatabaseFaults = (float)anInteger / 10; // float m_ssts0400_nondatabasePages; // offset: 28 anInteger = (Integer)as400Int.toObject(as400Data, x+28); poolEntry.m_ssts0400_nondatabasePages = (float)anInteger / 10; // float m_ssts0400_activeToWait; // offset: 32 anInteger = (Integer)as400Int.toObject(as400Data, x+32); poolEntry.m_ssts0400_activeToWait = (float)anInteger / 10; // float m_ssts0400_waitToIneligible; // offset: 36 anInteger = (Integer)as400Int.toObject(as400Data, x+36); poolEntry.m_ssts0400_waitToIneligible = (float)anInteger / 10; // int m_ssts0400_activeToIneligible; // offset: 40 anInteger = (Integer)as400Int.toObject(as400Data, x+40); poolEntry.m_ssts0400_activeToIneligible = anInteger.intValue(); // String m_ssts0400_poolName; // offset: 44 as400Text = new AS400Text(10, as400 ); poolEntry.m_ssts0400_poolName = ((String)as400Text.toObject(as400Data, x+44)).trim(); // String m_ssts0400_subsystemName; // offset: 54 as400Text = new AS400Text(10, as400 ); poolEntry.m_ssts0400_subsystemName = ((String)as400Text.toObject(as400Data, x+54)).trim(); // String m_ssts0400_subsystemLibraryName; // offset: 64 as400Text = new AS400Text(10, as400 ); poolEntry.m_ssts0400_subsystemLibraryName = ((String)as400Text.toObject(as400Data, x+64)).trim(); // String m_ssts0400_pagingOption; // offset: 74 as400Text = new AS400Text(10, as400 ); poolEntry.m_ssts0400_pagingOption = ((String)as400Text.toObject(as400Data, x+74)).trim(); // int m_ssts0400_definedSize; // offset: 84 anInteger = (Integer)as400Int.toObject(as400Data, x+84); poolEntry.m_ssts0400_definedSize = anInteger.intValue(); // int m_ssts0400_currentThreads; // offset: 88 anInteger = (Integer)as400Int.toObject(as400Data, x+88); poolEntry.m_ssts0400_currentThreads = anInteger.intValue(); // int m_ssts0400_currentIneligibleThreads; // offset: 92 anInteger = (Integer)as400Int.toObject(as400Data, x+92); poolEntry.m_ssts0400_currentIneligibleThreads = anInteger.intValue(); // int m_ssts0400_tuningPriority; // offset: 96 anInteger = (Integer)as400Int.toObject(as400Data, x+96); poolEntry.m_ssts0400_tuningPriority = anInteger.intValue(); // int m_ssts0400_tuningMinimumPoolSizePct; // offset: 100 anInteger = (Integer)as400Int.toObject(as400Data, x+100); poolEntry.m_ssts0400_tuningMinimumPoolSizePct = anInteger.intValue(); // int m_ssts0400_tuningMaximumPoolSizePct; // offset: 104 anInteger = (Integer)as400Int.toObject(as400Data, x+104); poolEntry.m_ssts0400_tuningMaximumPoolSizePct = anInteger.intValue(); // int m_ssts0400_tuningMinimumFaults; // offset: 108 anInteger = (Integer)as400Int.toObject(as400Data, x+108); poolEntry.m_ssts0400_tuningMinimumFaults = anInteger.intValue(); // float m_ssts0400_tuningPerThreadFaults; // offset: 112 anInteger = (Integer)as400Int.toObject(as400Data, x+112); poolEntry.m_ssts0400_tuningPerThreadFaults = (float)anInteger / 100; // float m_ssts0400_tuningNaximumFaults; // offset: 116 anInteger = (Integer)as400Int.toObject(as400Data, x+116); poolEntry.m_ssts0400_tuningMaximumFaults = (float)anInteger / 100; // String m_ssts0400_description; // offset: 120 as400Text = new AS400Text(50, as400 ); poolEntry.m_ssts0400_description = ((String)as400Text.toObject(as400Data, x+120)).trim(); // String m_ssts0400_status; // offset: 170 as400Text = new AS400Text(1, as400 ); poolEntry.m_ssts0400_status = (String)as400Text.toObject(as400Data, x+170); // int m_ssts0400_tuningMinimumActivityLevel; // offset: 172 anInteger = (Integer)as400Int.toObject(as400Data, x+172); poolEntry.m_ssts0400_tuningMinimumActivityLevel = anInteger.intValue(); // int m_ssts0400_tuningMaximumActivityLevel; // offset: 176 anInteger = (Integer)as400Int.toObject(as400Data, x+176); poolEntry.m_ssts0400_tuningMaximumActivityLevel = anInteger.intValue(); // Double m_ssts0400_poolSize_long; // offset: 180 ba8 = new byte[8]; for (int y=0; y < 8; y++) { ba8[y] = as400Data[x+180+y]; } poolEntry.m_ssts0400_poolSize_long = toDouble(ba8); // Double m_ssts0400_definedSize_long; // offset: 188 ba8 = new byte[8]; for (int y=0; y < 8; y++) { ba8[y] = as400Data[x+188+y]; } poolEntry.m_ssts0400_definedSize_long = toDouble(ba8); s_ssts0400_poolEntries[entry] = poolEntry; x += s_ssts0400_lengthOfPoolInformationEntry; } //System.err.println("NumberOfPoolEntries:" + numOfPoolEntries); } } private static double toDouble(byte[] bytes) { return ByteBuffer.wrap(bytes).getDouble(); } private static String getJsonText() { String strNrName = "com.newrelic.as400-memory-status"; String strNrEventType = "AS400:MemoryStatusEvent"; String strNrProtoVersion = "1"; String strNrIntVersion = "0.2.0"; String strJSONMetrics = ""; String strJSONHeader = ("{" + "\"name\":" + '"' + strNrName + '"' + "," + "\"protocol_version\":" + '"' + strNrProtoVersion + '"' + "," + "\"integration_version\":" + '"' + strNrIntVersion + '"' + "," + "\"metrics\":" + "["); String strJSONFooter = ("]," + "\"inventory\":" + "{" + "}," + "\"events\":" + "[" + "]" + "}"); String instanceGUID = java.util.UUID.randomUUID().toString(); for (int x = 0; x < s_ssts0400_poolEntries.length; x++) { if (x > 0 ) { strJSONMetrics = strJSONMetrics + ","; } SSTS0400_PoolEntry poolEntry = s_ssts0400_poolEntries[x]; strJSONMetrics = strJSONMetrics + "{" + "\"event_type\":" + '"' + strNrEventType + '"' + "," + "\"eventInstanceId\":" + '"' + instanceGUID + '"' + "," + "\"systemName\":" + '"' + s_ssts0400_systemName + '"' + "," + "\"dateTimeStatusGathered\":" + '"' + s_ssts0400_currentDateAndTime + '"' + "," + "\"mainStorageSize\":" + s_ssts0400_mainStorageSize + "," + "\"minimumMachinePoolSize\":" + s_ssts0400_minimumMachinePoolSize + "," + "\"minimumBasePoolSize\":" + s_ssts0400_minimumBasePoolSize + "," + "\"numberOfPools\":" + s_ssts0400_numberOfPools + "," + "\"poolName\":" + '"' + poolEntry.m_ssts0400_poolName + '"' + "," + "\"subsystemName\":" + '"' + poolEntry.m_ssts0400_subsystemName + '"' + "," + "\"susbsystemLibraryName\":" + '"' + poolEntry.m_ssts0400_subsystemLibraryName + '"' + "," + "\"pagingOption\":" + '"' + poolEntry.m_ssts0400_pagingOption + '"' + "," + "\"description\":" + '"' + poolEntry.m_ssts0400_description + '"' + "," + "\"status\":" + '"' + poolEntry.m_ssts0400_status + '"' + "," + "\"systemPools\":" + poolEntry.m_ssts0400_systemPool + "," + "\"poolSize\":" + poolEntry.m_ssts0400_poolSize + "," + "\"maximumActiveThreads\":" + poolEntry.m_ssts0400_maximumActiveThreads + "," + "\"databaseFaults\":" + poolEntry.m_ssts0400_databaseFaults + "," + "\"databasePages\":" + poolEntry.m_ssts0400_databasePages + "," + "\"nondatabaseFaults\":" + poolEntry.m_ssts0400_nondatabaseFaults + "," + "\"nondatabasePages\":" + poolEntry.m_ssts0400_nondatabasePages + "," + "\"activeToWait\":" + poolEntry.m_ssts0400_activeToWait + "," + "\"waitToIneligible\":" + poolEntry.m_ssts0400_waitToIneligible + "," + "\"activeToIneligible\":" + poolEntry.m_ssts0400_activeToIneligible + "," + "\"definedSize\":" + poolEntry.m_ssts0400_definedSize + "," + "\"currentThreads\":" + poolEntry.m_ssts0400_currentThreads + "," + "\"currentIneligibleThreads\":" + poolEntry.m_ssts0400_currentIneligibleThreads + "}"; } return strJSONHeader + strJSONMetrics + strJSONFooter; } }
[ "\"AS400HOST\"", "\"USERID\"", "\"PASSWD\"" ]
[]
[ "USERID", "PASSWD", "AS400HOST" ]
[]
["USERID", "PASSWD", "AS400HOST"]
java
3
0
vendor/github.com/mitchellh/packer/provisioner/ansible/provisioner_test.go
// +build !windows package ansible import ( "bytes" "crypto/rand" "fmt" "io" "io/ioutil" "os" "path" "strings" "testing" "github.com/hashicorp/packer/packer" ) // Be sure to remove the Ansible stub file in each test with: // defer os.Remove(config["command"].(string)) func testConfig(t *testing.T) map[string]interface{} { m := make(map[string]interface{}) wd, err := os.Getwd() if err != nil { t.Fatalf("err: %s", err) } ansible_stub := path.Join(wd, "packer-ansible-stub.sh") err = ioutil.WriteFile(ansible_stub, []byte("#!/usr/bin/env bash\necho ansible 1.6.0"), 0777) if err != nil { t.Fatalf("err: %s", err) } m["command"] = ansible_stub return m } func TestProvisioner_Impl(t *testing.T) { var raw interface{} raw = &Provisioner{} if _, ok := raw.(packer.Provisioner); !ok { t.Fatalf("must be a Provisioner") } } func TestProvisionerPrepare_Defaults(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) err := p.Prepare(config) if err == nil { t.Fatalf("should have error") } hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) config["ssh_host_key_file"] = hostkey_file.Name() config["ssh_authorized_key_file"] = publickey_file.Name() config["playbook_file"] = playbook_file.Name() err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) err = os.Unsetenv("USER") if err != nil { t.Fatalf("err: %s", err) } err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } } func TestProvisionerPrepare_PlaybookFile(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) config["ssh_host_key_file"] = hostkey_file.Name() config["ssh_authorized_key_file"] = publickey_file.Name() err = p.Prepare(config) if err == nil { t.Fatal("should have error") } playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) config["playbook_file"] = playbook_file.Name() err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } } func TestProvisionerPrepare_HostKeyFile(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) filename := make([]byte, 10) n, err := io.ReadFull(rand.Reader, filename) if n != len(filename) || err != nil { t.Fatal("could not create random file name") } config["ssh_host_key_file"] = fmt.Sprintf("%x", filename) config["ssh_authorized_key_file"] = publickey_file.Name() config["playbook_file"] = playbook_file.Name() err = p.Prepare(config) if err == nil { t.Fatal("should error if ssh_host_key_file does not exist") } hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) config["ssh_host_key_file"] = hostkey_file.Name() err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } } func TestProvisionerPrepare_AuthorizedKeyFile(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) filename := make([]byte, 10) n, err := io.ReadFull(rand.Reader, filename) if n != len(filename) || err != nil { t.Fatal("could not create random file name") } config["ssh_host_key_file"] = hostkey_file.Name() config["playbook_file"] = playbook_file.Name() config["ssh_authorized_key_file"] = fmt.Sprintf("%x", filename) err = p.Prepare(config) if err == nil { t.Errorf("should error if ssh_authorized_key_file does not exist") } publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) config["ssh_authorized_key_file"] = publickey_file.Name() err = p.Prepare(config) if err != nil { t.Errorf("err: %s", err) } } func TestProvisionerPrepare_LocalPort(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) config["ssh_host_key_file"] = hostkey_file.Name() config["ssh_authorized_key_file"] = publickey_file.Name() config["playbook_file"] = playbook_file.Name() config["local_port"] = "65537" err = p.Prepare(config) if err == nil { t.Fatal("should have error") } config["local_port"] = "22222" err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } } func TestProvisionerPrepare_InventoryDirectory(t *testing.T) { var p Provisioner config := testConfig(t) defer os.Remove(config["command"].(string)) hostkey_file, err := ioutil.TempFile("", "hostkey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(hostkey_file.Name()) publickey_file, err := ioutil.TempFile("", "publickey") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(publickey_file.Name()) playbook_file, err := ioutil.TempFile("", "playbook") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(playbook_file.Name()) config["ssh_host_key_file"] = hostkey_file.Name() config["ssh_authorized_key_file"] = publickey_file.Name() config["playbook_file"] = playbook_file.Name() config["inventory_directory"] = "doesnotexist" err = p.Prepare(config) if err == nil { t.Errorf("should error if inventory_directory does not exist") } inventoryDirectory, err := ioutil.TempDir("", "some_inventory_dir") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(inventoryDirectory) config["inventory_directory"] = inventoryDirectory err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } } func TestAnsibleGetVersion(t *testing.T) { if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1 and it requires Ansible to be installed") } var p Provisioner p.config.Command = "ansible-playbook" err := p.getVersion() if err != nil { t.Fatalf("err: %s", err) } } func TestAnsibleGetVersionError(t *testing.T) { var p Provisioner p.config.Command = "./test-fixtures/exit1" err := p.getVersion() if err == nil { t.Fatal("Should return error") } if !strings.Contains(err.Error(), "./test-fixtures/exit1 --version") { t.Fatal("Error message should include command name") } } func TestAnsibleLongMessages(t *testing.T) { if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1 and it requires Ansible to be installed") } var p Provisioner p.config.Command = "ansible-playbook" p.config.PlaybookFile = "./test-fixtures/long-debug-message.yml" err := p.Prepare() if err != nil { t.Fatalf("err: %s", err) } comm := &packer.MockCommunicator{} ui := &packer.BasicUi{ Reader: new(bytes.Buffer), Writer: new(bytes.Buffer), } err = p.Provision(ui, comm) if err != nil { t.Fatalf("err: %s", err) } }
[ "\"PACKER_ACC\"", "\"PACKER_ACC\"" ]
[]
[ "PACKER_ACC" ]
[]
["PACKER_ACC"]
go
1
0
src/azure-cli/azure/cli/command_modules/acs/_params.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long,too-many-statements,no-name-in-module,import-error import os.path import platform from argcomplete.completers import FilesCompleter from azure.cli.core.commands.parameters import ( file_type, get_enum_type, get_resource_name_completion_list, name_type, tags_type, zones_type, edge_zone_type) from azure.cli.core.commands.validators import validate_file_or_dict from azure.cli.core.profiles import ResourceType from knack.arguments import CLIArgumentType from ._completers import ( get_vm_size_completion_list, get_k8s_versions_completion_list, get_k8s_upgrades_completion_list, get_ossku_completion_list) from ._validators import ( validate_create_parameters, validate_kubectl_version, validate_kubelogin_version, validate_k8s_version, validate_linux_host_name, validate_list_of_integers, validate_ssh_key, validate_nodes_count, validate_nodepool_name, validate_vm_set_type, validate_load_balancer_sku, validate_nodepool_id, validate_snapshot_id, validate_load_balancer_outbound_ips, validate_priority, validate_eviction_policy, validate_spot_max_price, validate_load_balancer_outbound_ip_prefixes, validate_taints, validate_ip_ranges, validate_acr, validate_nodepool_tags, validate_load_balancer_outbound_ports, validate_load_balancer_idle_timeout, validate_vnet_subnet_id, validate_nodepool_labels, validate_ppg, validate_assign_identity, validate_max_surge, validate_assign_kubelet_identity) from ._consts import ( CONST_OUTBOUND_TYPE_LOAD_BALANCER, CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE, CONST_SPOT_EVICTION_POLICY_DEALLOCATE, CONST_OS_DISK_TYPE_MANAGED, CONST_OS_DISK_TYPE_EPHEMERAL, CONST_RAPID_UPGRADE_CHANNEL, CONST_STABLE_UPGRADE_CHANNEL, CONST_PATCH_UPGRADE_CHANNEL, CONST_NODE_IMAGE_UPGRADE_CHANNEL, CONST_NONE_UPGRADE_CHANNEL, CONST_NODEPOOL_MODE_SYSTEM, CONST_NODEPOOL_MODE_USER, ) # candidates for enumeration, no longer maintained orchestrator_types = ["Custom", "DCOS", "Kubernetes", "Swarm", "DockerCE"] regions_in_preview = [ "canadacentral", "canadaeast", "centralindia", "koreasouth", "koreacentral", "southindia", "uksouth", "ukwest", "westcentralus", "westindia", "westus2", ] regions_in_prod = [ "australiaeast", "australiasoutheast", "brazilsouth", "centralus", "eastasia", "eastus", "eastus2", "japaneast", "japanwest", "northcentralus", "northeurope", "southcentralus", "southeastasia", "westeurope", "westus", ] storage_profile_types = ["StorageAccount", "ManagedDisks"] # candidates for enumeration, under support node_mode_types = [CONST_NODEPOOL_MODE_SYSTEM, CONST_NODEPOOL_MODE_USER] node_priorities = [CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT] node_eviction_policies = [CONST_SPOT_EVICTION_POLICY_DELETE, CONST_SPOT_EVICTION_POLICY_DEALLOCATE] node_os_disk_types = [CONST_OS_DISK_TYPE_MANAGED, CONST_OS_DISK_TYPE_EPHEMERAL] network_plugins = ['azure', 'kubenet'] outbound_types = [CONST_OUTBOUND_TYPE_LOAD_BALANCER, CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING] auto_upgrade_channels = [ CONST_RAPID_UPGRADE_CHANNEL, CONST_STABLE_UPGRADE_CHANNEL, CONST_PATCH_UPGRADE_CHANNEL, CONST_NODE_IMAGE_UPGRADE_CHANNEL, CONST_NONE_UPGRADE_CHANNEL, ] dev_space_endpoint_types = ['Public', 'Private', 'None'] def load_arguments(self, _): acr_arg_type = CLIArgumentType(metavar='ACR_NAME_OR_RESOURCE_ID') # ACS command argument configuration with self.argument_context('acs') as c: c.argument('resource_name', name_type, completer=get_resource_name_completion_list( 'Microsoft.ContainerService/ContainerServices'), help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`') c.argument('name', name_type, completer=get_resource_name_completion_list( 'Microsoft.ContainerService/ContainerServices'), help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`') c.argument('container_service_name', name_type, help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`', completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices')) c.argument('admin_username', options_list=[ '--admin-username', '-u'], default='azureuser') c.argument('api_version', help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location') c.argument('dns_name_prefix', options_list=['--dns-prefix', '-d']) c.argument('orchestrator_type', get_enum_type( orchestrator_types), options_list=['--orchestrator-type', '-t']) c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'), completer=FilesCompleter(), validator=validate_ssh_key) c.argument('tags', tags_type) c.argument('disable_browser', help='Do not open browser after opening a proxy to the cluster web user interface') with self.argument_context('acs create') as c: c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'), completer=FilesCompleter(), validator=validate_ssh_key) c.argument('master_profile', options_list=['--master-profile', '-m'], type=validate_file_or_dict, help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set') c.argument('master_vm_size', completer=get_vm_size_completion_list, help=_get_feature_in_preview_message()) c.argument('agent_count', type=int) c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters, help='Generate SSH public and private key files if missing') c.argument('master_osdisk_size', type=int, help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size') c.argument('master_vnet_subnet_id', type=str, help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""') c.argument('master_first_consecutive_static_ip', type=str, help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.') c.argument('master_storage_profile', get_enum_type(storage_profile_types), help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator') c.argument('agent_profiles', options_list=['--agent-profiles', '-a'], type=validate_file_or_dict, help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set') c.argument('agent_vm_size', completer=get_vm_size_completion_list, help='Set the default size for agent pools vms.') c.argument('agent_osdisk_size', type=int, help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size') c.argument('agent_vnet_subnet_id', type=str, help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""') c.argument('agent_ports', type=validate_list_of_integers, help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80') c.argument('agent_storage_profile', get_enum_type(storage_profile_types), help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator') c.argument('windows', action='store_true', help='If true, set the default osType of agent pools to be Windows.') c.argument('validate', action='store_true', help='Generate and validate the ARM template without creating any resources') c.argument('orchestrator_version', help=_get_feature_in_preview_message( ) + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.') with self.argument_context('acs scale') as c: c.argument('new_agent_count', type=int) for scope in ['dcos', 'kubernetes']: with self.argument_context('acs {} browse'.format(scope)) as c: c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'), completer=FilesCompleter(), help='Path to an SSH key file to use.') with self.argument_context('acs dcos install-cli') as c: c.argument('install_location', default=_get_default_install_location('dcos')) with self.argument_context('acs kubernetes get-credentials') as c: c.argument('path', options_list=['--file', '-f']) c.argument('overwrite_existing', action='store_true', help='If specified, overwrite any existing credentials.') with self.argument_context('acs kubernetes install-cli') as c: c.argument('install_location', type=file_type, completer=FilesCompleter(), default=_get_default_install_location('kubectl')) c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'), completer=FilesCompleter(), help='Path to an SSH key file to use.') # AKS command argument configuration with self.argument_context('aks', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('resource_name', name_type, help='Name of the managed cluster.', completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters')) c.argument('name', name_type, help='Name of the managed cluster.', completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters')) c.argument('kubernetes_version', options_list=[ '--kubernetes-version', '-k'], validator=validate_k8s_version) c.argument('node_count', options_list=['--node-count', '-c'], type=int) c.argument('tags', tags_type) with self.argument_context('aks create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('name', validator=validate_linux_host_name) c.argument('kubernetes_version', completer=get_k8s_versions_completion_list) c.argument('admin_username', options_list=[ '--admin-username', '-u'], default='azureuser') c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p']) c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters) c.argument('node_vm_size', options_list=[ '--node-vm-size', '-s'], completer=get_vm_size_completion_list) c.argument('nodepool_name', type=str, default='nodepool1', help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name) c.argument('os_sku', completer=get_ossku_completion_list) c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'), completer=FilesCompleter(), validator=validate_ssh_key) c.argument('aad_client_app_id') c.argument('aad_server_app_id') c.argument('aad_server_app_secret') c.argument('aad_tenant_id') c.argument('dns_service_ip') c.argument('docker_bridge_address') c.argument('edge_zone', edge_zone_type) c.argument('load_balancer_sku', type=str, validator=validate_load_balancer_sku) c.argument('load_balancer_managed_outbound_ip_count', type=int) c.argument('load_balancer_outbound_ips', type=str, validator=validate_load_balancer_outbound_ips) c.argument('load_balancer_outbound_ip_prefixes', type=str, validator=validate_load_balancer_outbound_ip_prefixes) c.argument('load_balancer_outbound_ports', type=int, validator=validate_load_balancer_outbound_ports) c.argument('load_balancer_idle_timeout', type=int, validator=validate_load_balancer_idle_timeout) c.argument('outbound_type', arg_type=get_enum_type(outbound_types)) c.argument('auto_upgrade_channel', arg_type=get_enum_type(auto_upgrade_channels)) c.argument('enable_cluster_autoscaler', action='store_true') c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.") c.argument('min_count', type=int, validator=validate_nodes_count) c.argument('max_count', type=int, validator=validate_nodes_count) c.argument('vm_set_type', type=str, validator=validate_vm_set_type) c.argument('zones', zones_type, options_list=[ '--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.') c.argument('uptime_sla', action='store_true') c.argument('enable_addons', options_list=['--enable-addons', '-a']) c.argument('disable_rbac', action='store_true') c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'], deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45")) c.argument('max_pods', type=int, options_list=['--max-pods', '-m']) c.argument('network_plugin', arg_type=get_enum_type(network_plugins)) c.argument('network_policy') c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x']) c.argument('pod_cidr') c.argument('service_cidr') c.argument('ppg', type=str, validator=validate_ppg) c.argument('vnet_subnet_id', type=str, validator=validate_vnet_subnet_id) c.argument('workspace_resource_id') c.argument('skip_subnet_role_assignment', action='store_true') c.argument('api_server_authorized_ip_ranges', type=str, validator=validate_ip_ranges) c.argument('attach_acr', acr_arg_type) c.argument('enable_private_cluster', action='store_true') c.argument('private_dns_zone') c.argument('fqdn_subdomain') c.argument('disable_public_fqdn', action='store_true') c.argument('nodepool_tags', nargs='*', validator=validate_nodepool_tags, help='space-separated tags: key[=value] [key[=value] ...]. Use "" to clear existing tags.') c.argument('enable_managed_identity', action='store_true') c.argument('assign_identity', type=str, validator=validate_assign_identity) c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels, help='space-separated labels: key[=value] [key[=value] ...]. See https://aka.ms/node-labels for syntax of labels.') c.argument('enable_node_public_ip', action='store_true') c.argument('node_public_ip_prefix_id', type=str) c.argument('windows_admin_username', options_list=[ '--windows-admin-username']) c.argument('windows_admin_password', options_list=[ '--windows-admin-password']) c.argument('enable_ahub', options_list=['--enable-ahub'], action='store_true') c.argument('node_osdisk_diskencryptionset_id', type=str, options_list=['--node-osdisk-diskencryptionset-id', '-d']) c.argument('aci_subnet_name') c.argument('enable_encryption_at_host', options_list=[ '--enable-encryption-at-host'], action='store_true') c.argument('enable_ultra_ssd', options_list=[ '--enable-ultra-ssd'], action='store_true') c.argument('appgw_name', options_list=[ '--appgw-name'], arg_group='Application Gateway') c.argument('appgw_subnet_cidr', options_list=[ '--appgw-subnet-cidr'], arg_group='Application Gateway') c.argument('appgw_id', options_list=[ '--appgw-id'], arg_group='Application Gateway') c.argument('appgw_subnet_id', options_list=[ '--appgw-subnet-id'], arg_group='Application Gateway') c.argument('appgw_watch_namespace', options_list=[ '--appgw-watch-namespace'], arg_group='Application Gateway') c.argument('assign_kubelet_identity', validator=validate_assign_kubelet_identity) c.argument('disable_local_accounts', action='store_true') c.argument('enable_secret_rotation', action='store_true') c.argument('rotation_poll_interval', type=str) c.argument('yes', options_list=[ '--yes', '-y'], help='Do not prompt for confirmation.', action='store_true') c.argument('enable_sgxquotehelper', action='store_true') c.argument('enable_fips_image', action='store_true') c.argument('snapshot_id', validator=validate_snapshot_id) with self.argument_context('aks update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('attach_acr', acr_arg_type, validator=validate_acr) c.argument('detach_acr', acr_arg_type, validator=validate_acr) with self.argument_context('aks update') as c: c.argument('enable_cluster_autoscaler', options_list=[ "--enable-cluster-autoscaler", "-e"], action='store_true') c.argument('disable_cluster_autoscaler', options_list=[ "--disable-cluster-autoscaler", "-d"], action='store_true') c.argument('update_cluster_autoscaler', options_list=[ "--update-cluster-autoscaler", "-u"], action='store_true') c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"], help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.") c.argument('min_count', type=int, validator=validate_nodes_count) c.argument('max_count', type=int, validator=validate_nodes_count) c.argument('uptime_sla', action='store_true') c.argument('no_uptime_sla', action='store_true') c.argument('load_balancer_managed_outbound_ip_count', type=int) c.argument('load_balancer_outbound_ips', type=str, validator=validate_load_balancer_outbound_ips) c.argument('load_balancer_outbound_ip_prefixes', type=str, validator=validate_load_balancer_outbound_ip_prefixes) c.argument('load_balancer_outbound_ports', type=int, validator=validate_load_balancer_outbound_ports) c.argument('load_balancer_idle_timeout', type=int, validator=validate_load_balancer_idle_timeout) c.argument('auto_upgrade_channel', arg_type=get_enum_type(auto_upgrade_channels)) c.argument('api_server_authorized_ip_ranges', type=str, validator=validate_ip_ranges) c.argument('enable_ahub', options_list=['--enable-ahub'], action='store_true') c.argument('disable_ahub', options_list=['--disable-ahub'], action='store_true') c.argument('enable_public_fqdn', action='store_true') c.argument('disable_public_fqdn', action='store_true') c.argument('windows_admin_password', options_list=[ '--windows-admin-password']) c.argument('enable_managed_identity', action='store_true') c.argument('assign_identity', type=str, validator=validate_assign_identity) c.argument('disable_local_accounts', action='store_true') c.argument('enable_local_accounts', action='store_true') c.argument('enable_secret_rotation', action='store_true') c.argument('disable_secret_rotation', action='store_true') c.argument('rotation_poll_interval', type=str) c.argument('yes', options_list=[ '--yes', '-y'], help='Do not prompt for confirmation.', action='store_true') c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels, help='space-separated labels: key[=value] [key[=value] ...]. See https://aka.ms/node-labels for syntax of labels.') with self.argument_context('aks disable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('addons', options_list=['--addons', '-a']) with self.argument_context('aks enable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('addons', options_list=['--addons', '-a']) c.argument('subnet_name', options_list=[ '--subnet-name', '-s'], help='Name of an existing subnet to use with the virtual-node add-on.') c.argument('appgw_name', options_list=[ '--appgw-name'], arg_group='Application Gateway') c.argument('appgw_subnet_cidr', options_list=[ '--appgw-subnet-cidr'], arg_group='Application Gateway') c.argument('appgw_id', options_list=[ '--appgw-id'], arg_group='Application Gateway') c.argument('appgw_subnet_id', options_list=[ '--appgw-subnet-id'], arg_group='Application Gateway') c.argument('appgw_watch_namespace', options_list=[ '--appgw-watch-namespace'], arg_group='Application Gateway') c.argument('enable_sgxquotehelper', action='store_true') c.argument('enable_secret_rotation', action='store_true') c.argument('rotation_poll_interval', type=str) with self.argument_context('aks get-credentials', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('admin', options_list=['--admin', '-a'], default=False) c.argument('context_name', options_list=['--context'], help='If specified, overwrite the default context name.') c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(), default=os.path.join(os.path.expanduser('~'), '.kube', 'config')) c.argument('public_fqdn', default=False, action='store_true') for scope in ['aks', 'acs kubernetes', 'acs dcos']: with self.argument_context('{} install-cli'.format(scope)) as c: c.argument('client_version', validator=validate_kubectl_version, help='Version of kubectl to install.') c.argument('install_location', default=_get_default_install_location( 'kubectl'), help='Path at which to install kubectl.') c.argument('base_src_url', help='Base download source URL for kubectl releases.') c.argument('kubelogin_version', validator=validate_kubelogin_version, help='Version of kubelogin to install.') c.argument('kubelogin_install_location', default=_get_default_install_location( 'kubelogin'), help='Path at which to install kubelogin.') c.argument('kubelogin_base_src_url', options_list=[ '--kubelogin-base-src-url', '-l'], help='Base download source URL for kubelogin releases.') with self.argument_context('aks update-credentials', arg_group='Service Principal') as c: c.argument('reset_service_principal', action='store_true') c.argument('service_principal') c.argument('client_secret') with self.argument_context('aks update-credentials', arg_group='AAD') as c: c.argument('reset_aad', action='store_true') c.argument('aad_client_app_id') c.argument('aad_server_app_id') c.argument('aad_server_app_secret') c.argument('aad_tenant_id') with self.argument_context('aks upgrade', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('kubernetes_version', completer=get_k8s_upgrades_completion_list) c.argument('yes', options_list=[ '--yes', '-y'], help='Do not prompt for confirmation.', action='store_true') with self.argument_context('aks scale', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('nodepool_name', type=str, help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name) with self.argument_context('aks nodepool', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c: c.argument('cluster_name', type=str, help='The cluster name.') for scope in ['aks nodepool add']: with self.argument_context(scope) as c: c.argument('nodepool_name', type=str, options_list=[ '--name', '-n'], validator=validate_nodepool_name, help='The node pool name.') c.argument('zones', zones_type, options_list=[ '--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.') c.argument('node_vm_size', options_list=[ '--node-vm-size', '-s'], completer=get_vm_size_completion_list) c.argument('max_pods', type=int, options_list=['--max-pods', '-m']) c.argument('os_type', type=str) c.argument('os_sku', completer=get_ossku_completion_list) c.argument('enable_cluster_autoscaler', options_list=[ "--enable-cluster-autoscaler", "-e"], action='store_true') c.argument('node_taints', type=str, validator=validate_taints) c.argument('priority', arg_type=get_enum_type(node_priorities), validator=validate_priority) c.argument('eviction_policy', arg_type=get_enum_type(node_eviction_policies), validator=validate_eviction_policy) c.argument('spot_max_price', type=float, validator=validate_spot_max_price) c.argument('tags', tags_type) c.argument('labels', nargs='*', validator=validate_nodepool_labels) c.argument('mode', get_enum_type(node_mode_types)) c.argument('enable_node_public_ip', action='store_true') c.argument('node_public_ip_prefix_id', type=str) c.argument('ppg', type=str, validator=validate_ppg) c.argument('max_surge', type=str, validator=validate_max_surge) c.argument('node_os_disk_type', arg_type=get_enum_type(node_os_disk_types)) c.argument('enable_encryption_at_host', options_list=[ '--enable-encryption-at-host'], action='store_true') c.argument('enable_ultra_ssd', options_list=[ '--enable-ultra-ssd'], action='store_true') c.argument('enable_fips_image', action='store_true') c.argument('snapshot_id', validator=validate_snapshot_id) for scope in ['aks nodepool show', 'aks nodepool delete', 'aks nodepool scale', 'aks nodepool upgrade', 'aks nodepool update']: with self.argument_context(scope) as c: c.argument('nodepool_name', type=str, options_list=[ '--name', '-n'], validator=validate_nodepool_name, help='The node pool name.') with self.argument_context('aks nodepool upgrade') as c: c.argument('snapshot_id', validator=validate_snapshot_id) with self.argument_context('aks nodepool update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='agent_pools') as c: c.argument('enable_cluster_autoscaler', options_list=[ "--enable-cluster-autoscaler", "-e"], action='store_true') c.argument('disable_cluster_autoscaler', options_list=[ "--disable-cluster-autoscaler", "-d"], action='store_true') c.argument('update_cluster_autoscaler', options_list=[ "--update-cluster-autoscaler", "-u"], action='store_true') c.argument('tags', tags_type) c.argument('mode', get_enum_type(node_mode_types)) c.argument('max_surge', type=str, validator=validate_max_surge) c.argument('labels', nargs='*', validator=validate_nodepool_labels) with self.argument_context('aks command invoke') as c: c.argument('command_string', type=str, options_list=[ "--command", "-c"], help='the command to run') c.argument('command_files', options_list=["--file", "-f"], required=False, action="append", help='attach any files the command may use, or use \'.\' to upload the current folder.') with self.argument_context('aks command result') as c: c.argument('command_id', type=str, options_list=[ "--command-id", "-i"], help='the command ID from "aks command invoke"') with self.argument_context('aks use-dev-spaces') as c: c.argument('update', options_list=['--update'], action='store_true') c.argument('space_name', options_list=['--space', '-s']) c.argument('endpoint_type', get_enum_type(dev_space_endpoint_types, default='Public'), options_list=['--endpoint', '-e']) c.argument('prompt', options_list=[ '--yes', '-y'], action='store_true', help='Do not prompt for confirmation. Requires --space.') with self.argument_context('aks remove-dev-spaces') as c: c.argument('prompt', options_list=[ '--yes', '-y'], action='store_true', help='Do not prompt for confirmation') # OpenShift command argument configuration with self.argument_context('openshift', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c: c.argument('resource_name', name_type, help='Name of the managed OpenShift cluster.', completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters')) c.argument('name', name_type, help='Name of the managed OpenShift cluster.', completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters')) c.argument('compute_count', options_list=[ '--compute-count', '-c'], type=int, default=4) c.argument('tags', tags_type) with self.argument_context('openshift create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c: c.argument('name', validator=validate_linux_host_name) c.argument('compute_vm_size', options_list=['--compute-vm-size', '-s']) c.argument('customer_admin_group_id', options_list=[ '--customer-admin-group-id']) c.argument('workspace_id') with self.argument_context('openshift monitor enable', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c: c.argument( 'workspace_id', help='The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.') for scope in ['aks snapshot create']: with self.argument_context(scope) as c: c.argument('snapshot_name', options_list=['--name', '-n'], required=True, validator=validate_linux_host_name, help='The snapshot name.') c.argument('tags', tags_type) c.argument('nodepool_id', required=True, validator=validate_nodepool_id, help='The nodepool id.') c.argument('aks_custom_headers') for scope in ['aks snapshot show', 'aks snapshot delete']: with self.argument_context(scope) as c: c.argument('snapshot_name', options_list=['--name', '-n'], required=True, validator=validate_linux_host_name, help='The snapshot name.') c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true') def _get_default_install_location(exe_name): system = platform.system() if system == 'Windows': home_dir = os.environ.get('USERPROFILE') if not home_dir: return None install_location = os.path.join( home_dir, r'.azure-{0}\{0}.exe'.format(exe_name)) elif system in ('Linux', 'Darwin'): install_location = '/usr/local/bin/{}'.format(exe_name) else: install_location = None return install_location def _get_feature_in_preview_message(): return "Feature in preview, only in " + ", ".join(regions_in_preview) + ". "
[]
[]
[ "USERPROFILE" ]
[]
["USERPROFILE"]
python
1
0
log/log.go
// Package log provides a simple logging service to print to stdout/stderr with timestamp // and log source information. package log import ( "fmt" "os" "runtime" "runtime/debug" "strings" "time" ) type logger struct { } var ( // Logger to be passed around as a LogWriter instance. Logger logger gopath = os.Getenv("GOPATH") + "/src/" ) // Println is a wrapper for the Info log method. // // This is used for passing the Logger around as a LogWriter interface. func (l logger) Print(a ...interface{}) { Info(a) } // Info outputs to stdout. func Info(a ...interface{}) { fmt.Println(fileAndLineNumber(), dateAndTimeStamp(), a) } // Infof outputs a formatted string to stdout. func Infof(format string, a ...interface{}) { // Note: Cannot call Info() directly because that would ruin the file/line number of the caller fmt.Println(fileAndLineNumber(), dateAndTimeStamp(), fmt.Sprintf(format, a...)) } // Error outputs to stderr. func Error(a ...interface{}) { fmt.Fprintln(os.Stderr, fileAndLineNumber(), dateAndTimeStamp(), a) } // Errorf outputs a formatted error to stderr. func Errorf(format string, a ...interface{}) { // Note: Cannot call Error() directly because that would ruin the file/line number of the caller fmt.Fprintln(os.Stderr, fileAndLineNumber(), dateAndTimeStamp(), fmt.Sprintf(format, a...)) } // PrintStack outputs the current go routine's stack trace. func PrintStack() { debug.PrintStack() } // fileAndLineNumber returns the file and line number of the code that called `log`. func fileAndLineNumber() string { // Use 2 for the Caller because 0 is this function, // 1 is the log.* method that called it, // and 2 is what came before. _, fn, line, _ := runtime.Caller(2) return fmt.Sprintf("%v:%v", strings.Replace(fn, gopath, "", 1), line) } // dateAndTimeStamp returns a formatted date/time stamp for the current time. func dateAndTimeStamp() string { return time.Now().Format(time.StampMilli) }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
contrib/devtools/github-merge.py
#!/usr/bin/env python3 # Copyright (c) 2016-2017 Bitcoin Core Developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # This script will locally construct a merge commit for a pull request on a # github repository, inspect it, sign it and optionally push it. # The following temporary branches are created/overwritten and deleted: # * pull/$PULL/base (the current master we're merging onto) # * pull/$PULL/head (the current state of the remote pull request) # * pull/$PULL/merge (github's merge) # * pull/$PULL/local-merge (our merge) # In case of a clean merge that is accepted by the user, the local branch with # name $BRANCH is overwritten with the merged result, and optionally pushed. from __future__ import division,print_function,unicode_literals import os,sys from sys import stdin,stdout,stderr import argparse import hashlib import subprocess import json,codecs try: from urllib.request import Request,urlopen except: from urllib2 import Request,urlopen # External tools (can be overridden using environment) GIT = os.getenv('GIT','git') BASH = os.getenv('BASH','bash') # OS specific configuration for terminal attributes ATTR_RESET = '' ATTR_PR = '' COMMIT_FORMAT = '%h %s (%an)%d' if os.name == 'posix': # if posix, assume we can use basic terminal escapes ATTR_RESET = '\033[0m' ATTR_PR = '\033[1;36m' COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset' def git_config_get(option, default=None): ''' Get named configuration option from git repository. ''' try: return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8') except subprocess.CalledProcessError as e: return default def retrieve_pr_info(repo,pull): ''' Retrieve pull request information from github. Return None if no title can be found, or an error happens. ''' try: req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull) result = urlopen(req) reader = codecs.getreader('utf-8') obj = json.load(reader(result)) return obj except Exception as e: print('Warning: unable to retrieve pull information from github: %s' % e) return None def ask_prompt(text): print(text,end=" ",file=stderr) stderr.flush() reply = stdin.readline().rstrip() print("",file=stderr) return reply def get_symlink_files(): files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines()) ret = [] for f in files: if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000: ret.append(f.decode('utf-8').split("\t")[1]) return ret def tree_sha512sum(commit='HEAD'): # request metadata for entire tree, recursively files = [] blob_by_name = {} for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines(): name_sep = line.index(b'\t') metadata = line[:name_sep].split() # perms, 'blob', blobid assert(metadata[1] == b'blob') name = line[name_sep+1:] files.append(name) blob_by_name[name] = metadata[2] files.sort() # open connection to git-cat-file in batch mode to request data for all blobs # this is much faster than launching it per file p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) overall = hashlib.sha512() for f in files: blob = blob_by_name[f] # request blob p.stdin.write(blob + b'\n') p.stdin.flush() # read header: blob, "blob", size reply = p.stdout.readline().split() assert(reply[0] == blob and reply[1] == b'blob') size = int(reply[2]) # hash the blob data intern = hashlib.sha512() ptr = 0 while ptr < size: bs = min(65536, size - ptr) piece = p.stdout.read(bs) if len(piece) == bs: intern.update(piece) else: raise IOError('Premature EOF reading git cat-file output') ptr += bs dig = intern.hexdigest() assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data # update overall hash with file hash overall.update(dig.encode("utf-8")) overall.update(" ".encode("utf-8")) overall.update(f) overall.update("\n".encode("utf-8")) p.stdin.close() if p.wait(): raise IOError('Non-zero return value executing git cat-file') return overall.hexdigest() def print_merge_details(pull, title, branch, base_branch, head_branch): print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET)) subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch]) def parse_arguments(): epilog = ''' In addition, you can set the following git configuration variables: githubmerge.repository (mandatory), user.signingkey (mandatory), githubmerge.host (default: [email protected]), githubmerge.branch (no default), githubmerge.testcmd (default: none). ''' parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests', epilog=epilog) parser.add_argument('pull', metavar='PULL', type=int, nargs=1, help='Pull request ID to merge') parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?', default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')') return parser.parse_args() def main(): # Extract settings from git repo repo = git_config_get('githubmerge.repository') host = git_config_get('githubmerge.host','[email protected]') opt_branch = git_config_get('githubmerge.branch',None) testcmd = git_config_get('githubmerge.testcmd') signingkey = git_config_get('user.signingkey') if repo is None: print("ERROR: No repository configured. Use this command to set:", file=stderr) print("git config githubmerge.repository <owner>/<repo>", file=stderr) exit(1) if signingkey is None: print("ERROR: No GPG signing key set. Set one using:",file=stderr) print("git config --global user.signingkey <key>",file=stderr) exit(1) host_repo = host+":"+repo # shortcut for push/pull target # Extract settings from command line args = parse_arguments() pull = str(args.pull[0]) # Receive pull information from github info = retrieve_pr_info(repo,pull) if info is None: exit(1) title = info['title'].strip() # precedence order for destination branch argument: # - command line argument # - githubmerge.branch setting # - base branch for pull (as retrieved from github) # - 'master' branch = args.branch or opt_branch or info['base']['ref'] or 'master' # Initialize source branches head_branch = 'pull/'+pull+'/head' base_branch = 'pull/'+pull+'/base' merge_branch = 'pull/'+pull+'/merge' local_merge_branch = 'pull/'+pull+'/local-merge' devnull = open(os.devnull,'w') try: subprocess.check_call([GIT,'checkout','-q',branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot check out branch %s." % (branch), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*']) except subprocess.CalledProcessError as e: print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr) exit(3) subprocess.check_call([GIT,'checkout','-q',base_branch]) subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull) subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch]) try: # Go up to the repository's root. toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip() os.chdir(toplevel) # Create unsigned merge commit. if title: firstline = 'Merge #%s: %s' % (pull,title) else: firstline = 'Merge #%s' % (pull,) message = firstline + '\n\n' message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8') try: subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot be merged cleanly.",file=stderr) subprocess.check_call([GIT,'merge','--abort']) exit(4) logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8') if logmsg.rstrip() != firstline.rstrip(): print("ERROR: Creating merge failed (already merged?).",file=stderr) exit(4) symlink_files = get_symlink_files() for f in symlink_files: print("ERROR: File %s was a symlink" % f) if len(symlink_files) > 0: exit(4) # Put tree SHA512 into the message try: first_sha512 = tree_sha512sum() message += '\n\nTree-SHA512: ' + first_sha512 except subprocess.CalledProcessError as e: printf("ERROR: Unable to compute tree hash") exit(4) try: subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')]) except subprocess.CalledProcessError as e: printf("ERROR: Cannot update message.",file=stderr) exit(4) print_merge_details(pull, title, branch, base_branch, head_branch) print() # Run test command if configured. if testcmd: if subprocess.call(testcmd,shell=True): print("ERROR: Running %s failed." % testcmd,file=stderr) exit(5) # Show the created merge. diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch]) subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch]) if diff: print("WARNING: merge differs from github!",file=stderr) reply = ask_prompt("Type 'ignore' to continue.") if reply.lower() == 'ignore': print("Difference with github ignored.",file=stderr) else: exit(6) else: # Verify the result manually. print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr) print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr) print("Type 'exit' when done.",file=stderr) if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt os.putenv('debian_chroot',pull) subprocess.call([BASH,'-i']) second_sha512 = tree_sha512sum() if first_sha512 != second_sha512: print("ERROR: Tree hash changed unexpectedly",file=stderr) exit(8) # Sign the merge commit. print_merge_details(pull, title, branch, base_branch, head_branch) while True: reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower() if reply == 's': try: subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) break except subprocess.CalledProcessError as e: print("Error signing, exiting.",file=stderr) exit(1) elif reply == 'x': print("Not signing off on merge, exiting.",file=stderr) exit(1) # Put the result in branch. subprocess.check_call([GIT,'checkout','-q',branch]) subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch]) finally: # Clean up temporary branches. subprocess.call([GIT,'checkout','-q',branch]) subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull) # Push the result. while True: reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower() if reply == 'push': subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch]) break elif reply == 'x': exit(1) if __name__ == '__main__': main()
[]
[]
[ "GIT", "BASH" ]
[]
["GIT", "BASH"]
python
2
0
bin/get-github-release.go
// +build ignore // Get the latest release from a github project // // If GITHUB_USER and GITHUB_TOKEN are set then these will be used to // authenticate the request which is useful to avoid rate limits. package main import ( "encoding/json" "flag" "fmt" "io" "io/ioutil" "log" "net/http" "os" "os/exec" "path/filepath" "regexp" "strings" "time" "golang.org/x/sys/unix" ) var ( // Flags install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.") extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.") bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.") // Globals matchProject = regexp.MustCompile(`^(\w+)/(\w+)$`) ) // A github release // // Made by pasting the JSON into https://mholt.github.io/json-to-go/ type Release struct { URL string `json:"url"` AssetsURL string `json:"assets_url"` UploadURL string `json:"upload_url"` HTMLURL string `json:"html_url"` ID int `json:"id"` TagName string `json:"tag_name"` TargetCommitish string `json:"target_commitish"` Name string `json:"name"` Draft bool `json:"draft"` Author struct { Login string `json:"login"` ID int `json:"id"` AvatarURL string `json:"avatar_url"` GravatarID string `json:"gravatar_id"` URL string `json:"url"` HTMLURL string `json:"html_url"` FollowersURL string `json:"followers_url"` FollowingURL string `json:"following_url"` GistsURL string `json:"gists_url"` StarredURL string `json:"starred_url"` SubscriptionsURL string `json:"subscriptions_url"` OrganizationsURL string `json:"organizations_url"` ReposURL string `json:"repos_url"` EventsURL string `json:"events_url"` ReceivedEventsURL string `json:"received_events_url"` Type string `json:"type"` SiteAdmin bool `json:"site_admin"` } `json:"author"` Prerelease bool `json:"prerelease"` CreatedAt time.Time `json:"created_at"` PublishedAt time.Time `json:"published_at"` Assets []struct { URL string `json:"url"` ID int `json:"id"` Name string `json:"name"` Label string `json:"label"` Uploader struct { Login string `json:"login"` ID int `json:"id"` AvatarURL string `json:"avatar_url"` GravatarID string `json:"gravatar_id"` URL string `json:"url"` HTMLURL string `json:"html_url"` FollowersURL string `json:"followers_url"` FollowingURL string `json:"following_url"` GistsURL string `json:"gists_url"` StarredURL string `json:"starred_url"` SubscriptionsURL string `json:"subscriptions_url"` OrganizationsURL string `json:"organizations_url"` ReposURL string `json:"repos_url"` EventsURL string `json:"events_url"` ReceivedEventsURL string `json:"received_events_url"` Type string `json:"type"` SiteAdmin bool `json:"site_admin"` } `json:"uploader"` ContentType string `json:"content_type"` State string `json:"state"` Size int `json:"size"` DownloadCount int `json:"download_count"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` BrowserDownloadURL string `json:"browser_download_url"` } `json:"assets"` TarballURL string `json:"tarball_url"` ZipballURL string `json:"zipball_url"` Body string `json:"body"` } // checks if a path has write access func writable(path string) bool { return unix.Access(path, unix.W_OK) == nil } // Directory to install releases in by default // // Find writable directories on $PATH. Use the first writable // directory which is in $HOME or failing that the first writable // directory. // // Returns "" if none of the above were found func defaultBinDir() string { home := os.Getenv("HOME") var binDir string for _, dir := range strings.Split(os.Getenv("PATH"), ":") { if writable(dir) { if strings.HasPrefix(dir, home) { return dir } if binDir != "" { binDir = dir } } } return binDir } // read the body or an error message func readBody(in io.Reader) string { data, err := ioutil.ReadAll(in) if err != nil { return fmt.Sprintf("Error reading body: %v", err.Error()) } return string(data) } // Get an asset URL and name func getAsset(project string, matchName *regexp.Regexp) (string, string) { url := "https://api.github.com/repos/" + project + "/releases/latest" log.Printf("Fetching asset info for %q from %q", project, url) user, pass := os.Getenv("GITHUB_USER"), os.Getenv("GITHUB_TOKEN") req, err := http.NewRequest("GET", url, nil) if user != "" && pass != "" { log.Printf("Fetching using GITHUB_USER and GITHUB_TOKEN") req.SetBasicAuth(user, pass) } resp, err := http.DefaultClient.Do(req) if err != nil { log.Fatalf("Failed to fetch release info %q: %v", url, err) } if resp.StatusCode != http.StatusOK { log.Printf("Error: %s", readBody(resp.Body)) log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, url, resp.Status) } var release Release err = json.NewDecoder(resp.Body).Decode(&release) if err != nil { log.Fatalf("Failed to decode release info: %v", err) } err = resp.Body.Close() if err != nil { log.Fatalf("Failed to close body: %v", err) } for _, asset := range release.Assets { if matchName.MatchString(asset.Name) { return asset.BrowserDownloadURL, asset.Name } } log.Fatalf("Didn't find asset in info") return "", "" } // get a file for download func getFile(url, fileName string) { log.Printf("Downloading %q from %q", fileName, url) out, err := os.Create(fileName) if err != nil { log.Fatalf("Failed to open %q: %v", fileName, err) } resp, err := http.Get(url) if err != nil { log.Fatalf("Failed to fetch asset %q: %v", url, err) } if resp.StatusCode != http.StatusOK { log.Printf("Error: %s", readBody(resp.Body)) log.Fatalf("Bad status %d when fetching %q asset: %s", resp.StatusCode, url, resp.Status) } n, err := io.Copy(out, resp.Body) if err != nil { log.Fatalf("Error while downloading: %v", err) } err = resp.Body.Close() if err != nil { log.Fatalf("Failed to close body: %v", err) } err = out.Close() if err != nil { log.Fatalf("Failed to close output file: %v", err) } log.Printf("Downloaded %q (%d bytes)", fileName, n) } // run a shell command func run(args ...string) { cmd := exec.Command(args[0], args[1:]...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if err != nil { log.Fatalf("Failed to run %v: %v", args, err) } } func main() { flag.Parse() args := flag.Args() if len(args) != 2 { log.Fatalf("Syntax: %s <user/project> <name reg exp>", os.Args[0]) } project, nameRe := args[0], args[1] if !matchProject.MatchString(project) { log.Fatalf("Project %q must be in form user/project", project) } matchName, err := regexp.Compile(nameRe) if err != nil { log.Fatalf("Invalid regexp for name %q: %v", nameRe, err) } assetURL, assetName := getAsset(project, matchName) fileName := filepath.Join(os.TempDir(), assetName) getFile(assetURL, fileName) if *install { log.Printf("Installing %s", fileName) run("sudo", "dpkg", "--force-bad-version", "-i", fileName) log.Printf("Installed %s", fileName) } else if *extract != "" { if *bindir == "" { log.Fatalf("Need to set -bindir") } log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir) run("tar", "xf", fileName, *extract) run("chmod", "a+x", *extract) run("mv", "-f", *extract, *bindir+"/") } }
[ "\"HOME\"", "\"PATH\"", "\"GITHUB_USER\"", "\"GITHUB_TOKEN\"" ]
[]
[ "GITHUB_USER", "HOME", "GITHUB_TOKEN", "PATH" ]
[]
["GITHUB_USER", "HOME", "GITHUB_TOKEN", "PATH"]
go
4
0
app.py
from flask import Flask from redis import Redis, RedisError import os import socket # Connect to Redis redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2) app = Flask(__name__) @app.route("/") def hello(): try: visits = redis.incr("counter") except RedisError: visits = "<i>Cannot Connect to Redis, counter disabled.</i>" html = "<h3>Hello {name}!</h3><b>Hostname:</b> {hostname}<br/><b>Visits:</b> {visits}" return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits) if __name__ == "__main__": app.run(host='0.0.0.0', port=80)
[]
[]
[ "NAME" ]
[]
["NAME"]
python
1
0
test_aiobspwm.py
import asyncio import os import stat import tempfile import unittest.mock as mock import aiobspwm import pytest def test_parse_display() -> None: """ Test various display strings """ with pytest.raises(ValueError): aiobspwm._parse_display(':1.1.1') assert aiobspwm._parse_display(':1') == ('', 1, 0) assert aiobspwm._parse_display('abc:0') == ('abc', 0, 0) assert aiobspwm._parse_display(':1.1') == ('', 1, 1) def test_make_bspwm_socket_path() -> None: """ Make various configurations of socket paths """ assert aiobspwm._make_socket_path('', 0, 0) == '/tmp/bspwm_0_0-socket' assert aiobspwm._make_socket_path('box', 1, 2) == '/tmp/bspwmbox_1_2-socket' def test_find_socket_simple(monkeypatch) -> None: """ Test the simple case of getting the bspwm socket in an env variable """ fakeenviron = { 'BSPWM_SOCKET': '/tmp/bspwm_0_0-socket' } monkeypatch.setattr('os.environ', fakeenviron) assert aiobspwm.find_socket() == '/tmp/bspwm_0_0-socket', 'simple case' def test_find_socket(monkeypatch) -> None: oldstat = os.stat def fakestat(path): if path != '/tmp/bspwmtest_1_2-socket': return oldstat(path) return os.stat_result((0,) * 10) monkeypatch.setattr('os.stat', fakestat) fakeenviron = { 'DISPLAY': 'test:1.2' } monkeypatch.setattr('os.environ', fakeenviron) with pytest.raises(RuntimeError): # test non-socket file aiobspwm.find_socket() def fakestat2(path): if path != '/tmp/bspwmtest_1_2-socket': return oldstat(path) res = [stat.S_IFSOCK] res += [0] * (10 - len(res)) return os.stat_result(res) monkeypatch.setattr('os.stat', fakestat2) assert aiobspwm.find_socket() == '/tmp/bspwmtest_1_2-socket', \ 'success case with DISPLAY' @pytest.mark.asyncio async def test_call(event_loop: asyncio.BaseEventLoop) -> None: """ Test calling operations """ requests = [] async def connect_cb(r: asyncio.StreamReader, w: asyncio.StreamWriter) -> None: nonlocal requests requests.append(await r.read(4096)) w.write(b'abc\n') await w.drain() w.write_eof() path = tempfile.mktemp() svr = await asyncio.start_unix_server(connect_cb, path=path, loop=event_loop) assert await aiobspwm.call(path, ['abc', 'def']) == 'abc' svr.close() await svr.wait_closed() assert requests[0] == b'abc\0def\0' # some heavily edited data out of a bspwm dump testdata = { 'focusedMonitorId': 6291457, 'monitors': [ {'borderWidth': 1, 'desktops': [ { 'borderWidth': 1, 'id': 6291459, 'layout': 'monocle', 'name': 'I', 'windowGap': 6 }, { 'borderWidth': 1, 'id': 6291460, 'layout': 'monocle', 'name': 'II', 'windowGap': 6 } ], 'focusedDesktopId': 6291460, 'id': 6291457, 'name': 'LVDS1', 'padding': {'bottom': 0, 'left': 0, 'right': 0, 'top': 20}, 'randrId': 66, 'rectangle': {'height': 768, 'width': 1366, 'x': 0, 'y': 0}, 'stickyCount': 0, 'windowGap': 6, 'wired': True }, {'borderWidth': 1, 'desktops': [ { 'borderWidth': 1, 'id': 1234, 'layout': 'monocle', 'name': 'test1', 'windowGap': 6 } ], 'focusedDesktopId': 1234, 'id': 12345678, 'name': 'monitor2', 'padding': {'bottom': 0, 'left': 0, 'right': 0, 'top': 20}, 'randrId': 67, 'rectangle': {'height': 768, 'width': 1366, 'x': 0, 'y': 0}, 'stickyCount': 0, 'windowGap': 6, 'wired': True } ], 'primaryMonitorId': 6291457, 'stackingList': [37748745, 33554441, 23068673, 29360137, 39845897, 31457289] } def test_initial_load(): """ Test loading a state dump into the WM class """ wm = aiobspwm.WM('/dev/null') wm._apply_initial_state(testdata) assert wm.focused_monitor in wm.monitors.values() for idx, monitor in wm.monitors.items(): assert monitor.id == idx assert monitor.name in ('LVDS1', 'monitor2') assert monitor.focused_desktop in monitor.desktops.values() for desk_idx, desk in monitor.desktops.items(): assert desk.id == desk_idx assert desk.layout in ('tiled', 'monocle') assert desk.name in ('I', 'II', 'test1') def test_wm_event(): """ Test incoming window management events TODO: add coverage for unsupported event logging (how is that even possible to do anyway??) """ evt_hook = mock.MagicMock() wm = aiobspwm.WM('/dev/null', evt_hook=evt_hook) wm._apply_initial_state(testdata) wm._on_wm_event('desktop_focus 0x00600001 0x00600003') evt_hook.assert_called_once_with('desktop_focus 0x00600001 0x00600003') mon_id = 0x00600001 desk_id = 0x00600003 assert wm.monitors[mon_id].focused_desktop == \ wm.monitors[mon_id].desktops[desk_id] wm._on_wm_event('desktop_layout 0x00600001 0x00600003 tiled') assert wm.monitors[mon_id].desktops[desk_id].layout == 'tiled'
[]
[]
[]
[]
[]
python
0
0
pkg/controller/mustgatherservice/mustgatherservice.go
// // Copyright 2020 IBM Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package mustgatherservice import ( "context" "os" "reflect" operatorv1alpha1 "github.com/IBM/ibm-healthcheck-operator/pkg/apis/operator/v1alpha1" common "github.com/IBM/ibm-healthcheck-operator/pkg/controller/common" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/api/extensions/v1beta1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) var gracePeriod = int64(60) var trueVar = true var falseVar = false var mustGatherResourceName = "must-gather-service" var commonSecurityContext = corev1.SecurityContext{ AllowPrivilegeEscalation: &falseVar, Privileged: &falseVar, ReadOnlyRootFilesystem: &trueVar, RunAsNonRoot: &trueVar, Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{ "ALL", }, }, } func (r *ReconcileMustGatherService) createOrUpdateMustGatherServiceStatefulSet(instance *operatorv1alpha1.MustGatherService) error { reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) // Define a new StatefulSet desired := r.desiredMustGatherServiceStatefulset(instance) // Check if the StatefulSet already exists, if not create a new one current := &appsv1.StatefulSet{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: mustGatherResourceName, Namespace: instance.Namespace}, current) if err != nil && errors.IsNotFound(err) { reqLogger.Info("Creating a new StatefulSet", "StatefulSet.Namespace", desired.Namespace, "StatefulSet.Name", desired.Name) if err := r.client.Create(context.TODO(), desired); err != nil { reqLogger.Error(err, "Failed to create new StatefulSet", "StatefulSet.Namespace", desired.Namespace, "StatefulSet.Name", desired.Name) return err } } else if err != nil { reqLogger.Error(err, "Failed to get StatefulSet", "StatefulSet.Namespace", current.Namespace, "StatefulSet.Name", current.Name) return err } else if err := r.updateMustGatherServiceStatefulSet(instance, current, desired); err != nil { return err } // Update the MustGatherService status with the pod names podList := &corev1.PodList{} listOpts := []client.ListOption{ client.InNamespace(instance.Namespace), client.MatchingLabels(labelsForMustGatherService(mustGatherResourceName, instance.Name)), } if err = r.client.List(context.TODO(), podList, listOpts...); err != nil { reqLogger.Error(err, "Failed to list pods", "instance.Namespace", instance.Namespace, "instance.Name", instance.Name) return err } podNames := common.GetPodNames(podList.Items) // Update status.MustGatherServiceNodes if needed if !reflect.DeepEqual(podNames, instance.Status.MustGatherServiceNodes) { instance.Status.MustGatherServiceNodes = podNames err := r.client.Status().Update(context.TODO(), instance) if err != nil { reqLogger.Error(err, "Failed to update MustGatherService status") return err } } return nil } func (r *ReconcileMustGatherService) updateMustGatherServiceStatefulSet(instance *operatorv1alpha1.MustGatherService, current, desired *appsv1.StatefulSet) error { reqLogger := log.WithValues("StatefulSet.Namespace", current.Namespace, "StatefulSet.Name", current.Name) updated := current.DeepCopy() updated.Spec.Replicas = desired.Spec.Replicas updated.Spec.Template.Spec.Containers = desired.Spec.Template.Spec.Containers updated.Spec.Template.Spec.Volumes = desired.Spec.Template.Spec.Volumes reqLogger.Info("Updating StatefulSet") // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, updated, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "StatefulSet.Namespace", updated.Namespace, "StatefulSet.Name", updated.Name) } if err := r.client.Update(context.TODO(), updated); err != nil { reqLogger.Error(err, "Failed to update StatefulSet", "StatefulSet.Namespace", updated.Namespace, "StatefulSet.Name", updated.Name) return err } return nil } func (r *ReconcileMustGatherService) desiredMustGatherServiceStatefulset(instance *operatorv1alpha1.MustGatherService) *appsv1.StatefulSet { appName := "must-gather-service" labels := labelsForMustGatherService(appName, instance.Name) annotations := annotationsForMustGatherService() serviceAccountName := "ibm-healthcheck-operator" defaultCommand := []string{"/bin/must-gather-service", "-v", "1"} reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) reqLogger.Info("Building MustGatherService StatefulSet", "StatefulSet.Namespace", instance.Namespace, "StatefulSet.Name", appName) appResources := common.GetResources(&instance.Spec.MustGather.Resources) appReplicas := int32(1) if instance.Spec.MustGather.Replicas > 0 { appReplicas = instance.Spec.MustGather.Replicas } dep := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: appName, Namespace: instance.Namespace, Labels: labels, }, Spec: appsv1.StatefulSetSpec{ Replicas: &appReplicas, Selector: &metav1.LabelSelector{ MatchLabels: labels, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Annotations: annotations, }, Spec: corev1.PodSpec{ TerminationGracePeriodSeconds: &gracePeriod, HostNetwork: false, HostPID: false, HostIPC: false, ServiceAccountName: serviceAccountName, Containers: []corev1.Container{ { Name: appName, Image: os.Getenv("MUST_GATHER_SERVICE_IMAGE"), ImagePullPolicy: corev1.PullIfNotPresent, Command: defaultCommand, SecurityContext: &commonSecurityContext, Env: []corev1.EnvVar{ { Name: "POD_NAMESPACE", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: "metadata.namespace", }, }, }, { Name: "LOGLEVEL", Value: "1", }, }, Resources: *appResources, VolumeMounts: []corev1.VolumeMount{ { Name: "must-gather", MountPath: "/must-gather", }, }, LivenessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.IntOrString{Type: intstr.Int, IntVal: 6967}, Path: "/v1alpha1/healthz", Scheme: "HTTP", }, }, FailureThreshold: 3, InitialDelaySeconds: 10, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 2, }, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.IntOrString{Type: intstr.Int, IntVal: 6967}, Path: "/v1alpha1/healthz", Scheme: "HTTP", }, }, FailureThreshold: 1, InitialDelaySeconds: 10, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 2, }, }, }, NodeSelector: map[string]string{ "node-role.kubernetes.io/worker": "", }, Tolerations: []corev1.Toleration{ { Key: "dedicated", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, }, { Key: "CriticalAddonsOnly", Operator: corev1.TolerationOpExists, }, }, Volumes: []corev1.Volume{ { Name: "must-gather", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: instance.Spec.PersistentVolumeClaim.Name, }, }, }, }, }, }, }, } // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, dep, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "StatefulSet.Namespace", instance.Namespace, "StatefulSet.Name", appName) } return dep } func (r *ReconcileMustGatherService) createOrUpdateMustGatherServiceService(instance *operatorv1alpha1.MustGatherService) error { appName := mustGatherResourceName reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) // Define a new service desired := r.desiredMustGatherServiceService(instance) // Check if the service already exists, if not create a new one current := &corev1.Service{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: appName, Namespace: instance.Namespace}, current) if err != nil && errors.IsNotFound(err) { reqLogger.Info("Creating a new Service", "Service.Namespace", desired.Namespace, "Service.Name", desired.Name) if err := r.client.Create(context.TODO(), desired); err != nil { reqLogger.Error(err, "Failed to create new Service", "Service.Namespace", desired.Namespace, "Service.Name", desired.Name) return err } } else if err != nil { reqLogger.Error(err, "Failed to get Service", "Service.Namespace", current.Namespace, "Service.Name", current.Name) return err } else if err := r.updateMustGatherServiceService(instance, current, desired); err != nil { return err } return nil } func (r *ReconcileMustGatherService) updateMustGatherServiceService(instance *operatorv1alpha1.MustGatherService, current, desired *corev1.Service) error { reqLogger := log.WithValues("Service.Namespace", current.Namespace, "Service.Name", current.Name) updated := current.DeepCopy() updated.ObjectMeta.Labels = desired.ObjectMeta.Labels updated.Spec.Ports = desired.Spec.Ports updated.Spec.Selector = desired.Spec.Selector updated.Spec.Type = desired.Spec.Type reqLogger.Info("Updating Service") // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, updated, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "Service.Namespace", updated.Namespace, "Service.Name", updated.Name) } if err := r.client.Update(context.TODO(), updated); err != nil { reqLogger.Error(err, "Failed to update Service", "Service.Namespace", updated.Namespace, "Service.Name", updated.Name) return err } return nil } func (r *ReconcileMustGatherService) desiredMustGatherServiceService(instance *operatorv1alpha1.MustGatherService) *corev1.Service { appName := mustGatherResourceName labels := labelsForMustGatherService(appName, instance.Name) reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) reqLogger.Info("Building MustGatherService Service", "Service.Namespace", instance.Namespace, "Service.Name", appName) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: appName, Namespace: instance.Namespace, Labels: labels, ResourceVersion: "", }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { Port: 6967, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 6967}, }, }, Selector: labels, Type: corev1.ServiceTypeClusterIP, }, } // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, svc, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "Service.Namespace", instance.Namespace, "Service.Name", appName) } return svc } func (r *ReconcileMustGatherService) createOrUpdateMustGatherServiceIngress(instance *operatorv1alpha1.MustGatherService) error { appName := mustGatherResourceName reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) // Define a new ingress desired := r.desiredMustGatherServiceIngress(instance) // Check if the ingress already exists, if not create a new one current := &extensionsv1.Ingress{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: appName, Namespace: instance.Namespace}, current) if err != nil && errors.IsNotFound(err) { reqLogger.Info("Creating a new Ingress", "Ingress.Namespace", desired.Namespace, "Ingress.Name", desired.Name) if err := r.client.Create(context.TODO(), desired); err != nil { reqLogger.Error(err, "Failed to create new Ingress", "Ingress.Namespace", desired.Namespace, "Ingress.Name", desired.Name) return err } } else if err != nil { reqLogger.Error(err, "Failed to get Ingress", "Ingress.Namespace", current.Namespace, "Ingress.Name", current.Name) return err } else if err := r.updateMustGatherServiceIngress(instance, current, desired); err != nil { return err } return nil } func (r *ReconcileMustGatherService) updateMustGatherServiceIngress(instance *operatorv1alpha1.MustGatherService, current, desired *extensionsv1.Ingress) error { reqLogger := log.WithValues("Ingress.Namespace", current.Namespace, "Ingress.Name", current.Name) updated := current.DeepCopy() updated.ObjectMeta.Labels = desired.ObjectMeta.Labels updated.ObjectMeta.Annotations = desired.ObjectMeta.Annotations updated.Spec.Rules = desired.Spec.Rules reqLogger.Info("Updating Ingress") // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, updated, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "Ingress.Namespace", updated.Namespace, "Ingress.Name", updated.Name) } if err := r.client.Update(context.TODO(), updated); err != nil { reqLogger.Error(err, "Failed to update Ingress", "Ingress.Namespace", updated.Namespace, "Ingress.Name", updated.Name) return err } return nil } func (r *ReconcileMustGatherService) desiredMustGatherServiceIngress(instance *operatorv1alpha1.MustGatherService) *extensionsv1.Ingress { appName := mustGatherResourceName labels := labelsForMustGatherService(appName, instance.Name) annotations := annotationsForMustGatherServiceIngress() reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) reqLogger.Info("Building MustGatherService Ingress", "Ingress.Namespace", instance.Namespace, "Ingress.Name", appName) ing := &extensionsv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: appName, Namespace: instance.Namespace, Labels: labels, Annotations: annotations, }, Spec: extensionsv1.IngressSpec{ Rules: []extensionsv1.IngressRule{ { IngressRuleValue: extensionsv1.IngressRuleValue{ HTTP: &extensionsv1.HTTPIngressRuleValue{ Paths: []extensionsv1.HTTPIngressPath{ { Path: "/must-gather/", Backend: extensionsv1.IngressBackend{ ServiceName: appName, ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 6967}}, }, }, }, }, }, }, }, } // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, ing, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "Ingress.Namespace", instance.Namespace, "Ingress.Name", appName) } return ing } func (r *ReconcileMustGatherService) createOrUpdateMustGatherServicePVC(instance *operatorv1alpha1.MustGatherService) error { pvcName := instance.Spec.PersistentVolumeClaim.Name reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) // Define must gather persistence storage desired := r.desiredMustGatherServicePVC(instance) // Check if this pvc already exists current := &corev1.PersistentVolumeClaim{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: pvcName, Namespace: instance.Namespace}, current) if err != nil && errors.IsNotFound(err) { reqLogger.Info("Creating a new pvc", "pvc.Namespace", desired.Namespace, "pvc.Name", desired.Name) err = r.client.Create(context.TODO(), desired) if err != nil { return err } // pvc created successfully - don't requeue return nil } else if err != nil { reqLogger.Error(err, "Failed to get pvc", "pvc.Namespace", current.Namespace, "pvc.Name", current.Name) return err } else { // pvc already exists - don't requeue reqLogger.Info("Skip reconcile: pvc already exists", "pvc.Namespace", current.Namespace, "pvc.Name", current.Name) return nil } } // newMustGatherPVC create a pvc for must gather service func (r *ReconcileMustGatherService) desiredMustGatherServicePVC(instance *operatorv1alpha1.MustGatherService) *corev1.PersistentVolumeClaim { var storageClassName string var storageRequest resource.Quantity reqLogger := log.WithValues("MustGatherService.Namespace", instance.Namespace, "MustGatherService.Name", instance.Name) reqLogger.Info("Building MustGatherService PVC", "PVC.Namespace", instance.Namespace, "PVC.Name", instance.Spec.PersistentVolumeClaim.Name) if instance.Spec.PersistentVolumeClaim.StorageClassName != "" { storageClassName = instance.Spec.PersistentVolumeClaim.StorageClassName } else { storageClassName = r.getDefaultStorageClass() } if val, ok := instance.Spec.PersistentVolumeClaim.Resources.Requests[v1.ResourceStorage]; ok { storageRequest = val } else { storageRequest = resource.MustParse("2Gi") } pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: instance.Spec.PersistentVolumeClaim.Name, Namespace: instance.Namespace, Labels: labelsForMustGatherService(instance.Spec.PersistentVolumeClaim.Name, instance.Name), Annotations: annotationsForMustGatherService(), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: storageRequest, }, }, StorageClassName: &storageClassName, }, } // Set MustGatherService instance as the owner and controller if err := controllerutil.SetControllerReference(instance, pvc, r.scheme); err != nil { reqLogger.Error(err, "SetControllerReference failed", "pvc.Namespace", instance.Namespace, "pvc.Name", instance.Spec.PersistentVolumeClaim.Name) } return pvc } func (r *ReconcileMustGatherService) getDefaultStorageClass() string { scList := &storagev1.StorageClassList{} err := r.reader.List(context.TODO(), scList) if err != nil { return "" } if len(scList.Items) == 0 { return "" } var defaultSC []string var nonDefaultSC []string for _, sc := range scList.Items { if sc.Provisioner == "kubernetes.io/no-provisioner" { continue } if sc.ObjectMeta.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" { defaultSC = append(defaultSC, sc.GetName()) continue } nonDefaultSC = append(nonDefaultSC, sc.GetName()) } if len(defaultSC) != 0 { return defaultSC[0] } if len(nonDefaultSC) != 0 { return nonDefaultSC[0] } return "" } func labelsForMustGatherService(name string, releaseName string) map[string]string { return map[string]string{ "app": name, "release": releaseName, "app.kubernetes.io/name": name, "app.kubernetes.io/instance": releaseName, "app.kubernetes.io/managed-by": "", } } func annotationsForMustGatherService() map[string]string { return map[string]string{ "productName": "IBM Cloud Platform Common Services", "productID": "068a62892a1e4db39641342e592daa25", "productMetric": "FREE", } } func annotationsForMustGatherServiceIngress() map[string]string { return map[string]string{ "kubernetes.io/ingress.class": "ibm-icp-management", "icp.management.ibm.com/rewrite-target": "/", "icp.management.ibm.com/configuration-snippet": `add_header Cache-Control "no-cache, no-store, must-revalidate"; add_header Pragma no-cache; add_header Expires 0; add_header X-Frame-Options "SAMEORIGIN"; add_header X-Content-Type-Options nosniff; add_header X-XSS-Protection "1; mode=block";`, } }
[ "\"MUST_GATHER_SERVICE_IMAGE\"" ]
[]
[ "MUST_GATHER_SERVICE_IMAGE" ]
[]
["MUST_GATHER_SERVICE_IMAGE"]
go
1
0
examples/service/flex/plugin_release/fetch/plugin_release_fetch_example.go
package main import ( "log" "os" "github.com/RJPearson94/twilio-sdk-go" v1 "github.com/RJPearson94/twilio-sdk-go/service/flex/v1" "github.com/RJPearson94/twilio-sdk-go/session/credentials" ) var flexClient *v1.Flex func init() { creds, err := credentials.New(credentials.Account{ Sid: os.Getenv("TWILIO_ACCOUNT_SID"), AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"), }) if err != nil { log.Panicf("%s", err.Error()) } flexClient = twilio.NewWithCredentials(creds).Flex.V1 } func main() { resp, err := flexClient. PluginRelease("FKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"). Fetch() if err != nil { log.Panicf("%s", err.Error()) } log.Printf("SID: %s", resp.Sid) }
[ "\"TWILIO_ACCOUNT_SID\"", "\"TWILIO_AUTH_TOKEN\"" ]
[]
[ "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID" ]
[]
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
go
2
0
mesonbuild/interpreter/interpreter.py
# Copyright 2012-2021 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from .. import mparser from .. import environment from .. import coredata from .. import dependencies from .. import mlog from .. import build from .. import optinterpreter from .. import compilers from .. import envconfig from ..wrap import wrap, WrapMode from .. import mesonlib from ..mesonlib import MesonBugException, HoldableObject, FileMode, MachineChoice, OptionKey, listify, extract_as_list, has_path_sep from ..programs import ExternalProgram, NonExistingExternalProgram from ..dependencies import Dependency from ..depfile import DepFile from ..interpreterbase import ContainerTypeInfo, InterpreterBase, KwargInfo, typed_kwargs, typed_pos_args from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, noArgsFlattening, noSecondLevelHolderResolving, unholder_return from ..interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest from ..interpreterbase import Disabler, disablerIfNotFound from ..interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs from ..interpreterbase import ObjectHolder from ..modules import ExtensionModule, ModuleObject, MutableModuleObject, NewExtensionModule, NotFoundExtensionModule from ..cmake import CMakeInterpreter from ..backend.backends import ExecutableSerialisation from . import interpreterobjects as OBJ from . import compiler as compilerOBJ from .mesonmain import MesonMain from .dependencyfallbacks import DependencyFallbacksHolder from .interpreterobjects import ( SubprojectHolder, Test, RunProcess, extract_required_kwarg, extract_search_dirs, NullSubprojectInterpreter, ) from .type_checking import ( COMMAND_KW, CT_BUILD_ALWAYS, CT_BUILD_ALWAYS_STALE, CT_BUILD_BY_DEFAULT, CT_INPUT_KW, CT_INSTALL_DIR_KW, CT_OUTPUT_KW, DEFAULT_OPTIONS, DEPENDS_KW, DEPEND_FILES_KW, DEPFILE_KW, DISABLER_KW, ENV_KW, ENV_METHOD_KW, ENV_SEPARATOR_KW, INSTALL_KW, INSTALL_MODE_KW, CT_INSTALL_TAG_KW, INSTALL_TAG_KW, LANGUAGE_KW, NATIVE_KW, OVERRIDE_OPTIONS_KW, REQUIRED_KW, NoneType, in_set_validator, env_convertor_with_method ) from . import primitives as P_OBJ from pathlib import Path import os import shutil import uuid import re import stat import collections import typing as T import textwrap import importlib if T.TYPE_CHECKING: import argparse from typing_extensions import Literal from . import kwargs from ..backend.backends import Backend from ..interpreterbase.baseobjects import InterpreterObject, TYPE_var, TYPE_kwargs from ..programs import OverrideProgram # Input source types passed to Targets SourceInputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.BothLibraries, build.CustomTargetIndex, build.CustomTarget, build.GeneratedList, build.ExtractedObjects, str] # Input source types passed to the build.Target classes SourceOutputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.CustomTargetIndex, build.CustomTarget, build.ExtractedObjects, build.GeneratedList, build.StructuredSources] def _project_version_validator(value: T.Union[T.List, str, mesonlib.File, None]) -> T.Optional[str]: if isinstance(value, list): if len(value) != 1: return 'when passed as array must have a length of 1' elif not isinstance(value[0], mesonlib.File): return 'when passed as array must contain a File' return None def stringifyUserArguments(args: T.List[T.Any], quote: bool = False) -> str: if isinstance(args, list): return '[%s]' % ', '.join([stringifyUserArguments(x, True) for x in args]) elif isinstance(args, dict): return '{%s}' % ', '.join(['{} : {}'.format(stringifyUserArguments(k, True), stringifyUserArguments(v, True)) for k, v in args.items()]) elif isinstance(args, bool): return 'true' if args else 'false' elif isinstance(args, int): return str(args) elif isinstance(args, str): return f"'{args}'" if quote else args raise InvalidArguments('Function accepts only strings, integers, bools, lists, dictionaries and lists thereof.') class Summary: def __init__(self, project_name: str, project_version: str): self.project_name = project_name self.project_version = project_version self.sections = collections.defaultdict(dict) self.max_key_len = 0 def add_section(self, section: str, values: T.Dict[str, T.Any], bool_yn: bool, list_sep: T.Optional[str], subproject: str) -> None: for k, v in values.items(): if k in self.sections[section]: raise InterpreterException(f'Summary section {section!r} already have key {k!r}') formatted_values = [] for i in listify(v): if isinstance(i, bool) and bool_yn: formatted_values.append(mlog.green('YES') if i else mlog.red('NO')) elif isinstance(i, (str, int, bool)): formatted_values.append(str(i)) elif isinstance(i, (ExternalProgram, Dependency)): FeatureNew.single_use('dependency or external program in summary', '0.57.0', subproject) formatted_values.append(i.summary_value()) elif isinstance(i, coredata.UserOption): FeatureNew.single_use('feature option in summary', '0.58.0', subproject) formatted_values.append(i.printable_value()) else: m = 'Summary value in section {!r}, key {!r}, must be string, integer, boolean, dependency or external program' raise InterpreterException(m.format(section, k)) self.sections[section][k] = (formatted_values, list_sep) self.max_key_len = max(self.max_key_len, len(k)) def dump(self): mlog.log(self.project_name, mlog.normal_cyan(self.project_version)) for section, values in self.sections.items(): mlog.log('') # newline if section: mlog.log(' ', mlog.bold(section)) for k, v in values.items(): v, list_sep = v padding = self.max_key_len - len(k) end = ' ' if v else '' mlog.log(' ' * 3, k + ' ' * padding + ':', end=end) indent = self.max_key_len + 6 self.dump_value(v, list_sep, indent) mlog.log('') # newline def dump_value(self, arr, list_sep, indent): lines_sep = '\n' + ' ' * indent if list_sep is None: mlog.log(*arr, sep=lines_sep) return max_len = shutil.get_terminal_size().columns line = [] line_len = indent lines_sep = list_sep.rstrip() + lines_sep for v in arr: v_len = len(v) + len(list_sep) if line and line_len + v_len > max_len: mlog.log(*line, sep=list_sep, end=lines_sep) line_len = indent line = [] line.append(v) line_len += v_len mlog.log(*line, sep=list_sep) known_library_kwargs = ( build.known_shlib_kwargs | build.known_stlib_kwargs ) known_build_target_kwargs = ( known_library_kwargs | build.known_exe_kwargs | build.known_jar_kwargs | {'target_type'} ) TEST_KWARGS: T.List[KwargInfo] = [ KwargInfo('args', ContainerTypeInfo(list, (str, mesonlib.File, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)), listify=True, default=[]), KwargInfo('should_fail', bool, default=False), KwargInfo('timeout', int, default=30), KwargInfo('workdir', (str, NoneType), default=None, validator=lambda x: 'must be an absolute path' if not os.path.isabs(x) else None), KwargInfo('protocol', str, default='exitcode', validator=in_set_validator({'exitcode', 'tap', 'gtest', 'rust'}), since_values={'gtest': '0.55.0', 'rust': '0.57.0'}), KwargInfo('priority', int, default=0, since='0.52.0'), # TODO: env needs reworks of the way the environment variable holder itself works probably ENV_KW, DEPENDS_KW.evolve(since='0.46.0'), KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string KwargInfo('verbose', bool, default=False, since='0.62.0'), ] permitted_dependency_kwargs = { 'allow_fallback', 'cmake_args', 'cmake_module_path', 'cmake_package_version', 'components', 'default_options', 'fallback', 'include_type', 'language', 'main', 'method', 'modules', 'native', 'not_found_message', 'optional_modules', 'private_headers', 'required', 'static', 'version', } implicit_check_false_warning = """You should add the boolean check kwarg to the run_command call. It currently defaults to false, but it will default to true in future releases of meson. See also: https://github.com/mesonbuild/meson/issues/9300""" class Interpreter(InterpreterBase, HoldableObject): def __init__( self, _build: build.Build, backend: T.Optional[Backend] = None, subproject: str = '', subdir: str = '', subproject_dir: str = 'subprojects', default_project_options: T.Optional[T.Dict[OptionKey, str]] = None, mock: bool = False, ast: T.Optional[mparser.CodeBlockNode] = None, is_translated: bool = False, user_defined_options: T.Optional['argparse.Namespace'] = None, ) -> None: super().__init__(_build.environment.get_source_dir(), subdir, subproject) self.active_projectname = '' self.build = _build self.environment = self.build.environment self.coredata = self.environment.get_coredata() self.backend = backend self.summary: T.Dict[str, 'Summary'] = {} self.modules: T.Dict[str, NewExtensionModule] = {} # Subproject directory is usually the name of the subproject, but can # be different for dependencies provided by wrap files. self.subproject_directory_name = subdir.split(os.path.sep)[-1] self.subproject_dir = subproject_dir self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt') if not mock and ast is None: self.load_root_meson_file() self.sanity_check_ast() elif ast is not None: self.ast = ast self.sanity_check_ast() self.builtin.update({'meson': MesonMain(self.build, self)}) self.generators: T.List[build.Generator] = [] self.processed_buildfiles = set() # type: T.Set[str] self.project_args_frozen = False self.global_args_frozen = False # implies self.project_args_frozen self.subprojects: T.Dict[str, SubprojectHolder] = {} self.subproject_stack: T.List[str] = [] self.configure_file_outputs: T.Dict[str, int] = {} # Passed from the outside, only used in subprojects. if default_project_options: self.default_project_options = default_project_options.copy() else: self.default_project_options = {} self.project_default_options: T.Dict[OptionKey, str] = {} self.build_func_dict() self.build_holder_map() self.user_defined_options = user_defined_options # build_def_files needs to be defined before parse_project is called # # For non-meson subprojects, we'll be using the ast. Even if it does # exist we don't want to add a dependency on it, it's autogenerated # from the actual build files, and is just for reference. self.build_def_files: mesonlib.OrderedSet[str] = mesonlib.OrderedSet() build_filename = os.path.join(self.subdir, environment.build_filename) if not is_translated: self.build_def_files.add(build_filename) if not mock: self.parse_project() self._redetect_machines() def __getnewargs_ex__(self) -> T.Tuple[T.Tuple[object], T.Dict[str, object]]: raise MesonBugException('This class is unpicklable') def _redetect_machines(self) -> None: # Re-initialize machine descriptions. We can do a better job now because we # have the compilers needed to gain more knowledge, so wipe out old # inference and start over. machines = self.build.environment.machines.miss_defaulting() machines.build = environment.detect_machine_info(self.coredata.compilers.build) self.build.environment.machines = machines.default_missing() assert self.build.environment.machines.build.cpu is not None assert self.build.environment.machines.host.cpu is not None assert self.build.environment.machines.target.cpu is not None self.builtin['build_machine'] = \ OBJ.MachineHolder(self.build.environment.machines.build, self) self.builtin['host_machine'] = \ OBJ.MachineHolder(self.build.environment.machines.host, self) self.builtin['target_machine'] = \ OBJ.MachineHolder(self.build.environment.machines.target, self) def build_func_dict(self) -> None: self.funcs.update({'add_global_arguments': self.func_add_global_arguments, 'add_global_link_arguments': self.func_add_global_link_arguments, 'add_languages': self.func_add_languages, 'add_project_arguments': self.func_add_project_arguments, 'add_project_link_arguments': self.func_add_project_link_arguments, 'add_test_setup': self.func_add_test_setup, 'alias_target': self.func_alias_target, 'assert': self.func_assert, 'benchmark': self.func_benchmark, 'both_libraries': self.func_both_lib, 'build_target': self.func_build_target, 'configuration_data': self.func_configuration_data, 'configure_file': self.func_configure_file, 'custom_target': self.func_custom_target, 'declare_dependency': self.func_declare_dependency, 'dependency': self.func_dependency, 'disabler': self.func_disabler, 'environment': self.func_environment, 'error': self.func_error, 'executable': self.func_executable, 'files': self.func_files, 'find_library': self.func_find_library, 'find_program': self.func_find_program, 'generator': self.func_generator, 'get_option': self.func_get_option, 'get_variable': self.func_get_variable, 'gettext': self.func_gettext, 'import': self.func_import, 'include_directories': self.func_include_directories, 'install_data': self.func_install_data, 'install_emptydir': self.func_install_emptydir, 'install_headers': self.func_install_headers, 'install_man': self.func_install_man, 'install_subdir': self.func_install_subdir, 'install_symlink': self.func_install_symlink, 'is_disabler': self.func_is_disabler, 'is_variable': self.func_is_variable, 'jar': self.func_jar, 'join_paths': self.func_join_paths, 'library': self.func_library, 'message': self.func_message, 'option': self.func_option, 'project': self.func_project, 'range': self.func_range, 'run_command': self.func_run_command, 'run_target': self.func_run_target, 'set_variable': self.func_set_variable, 'structured_sources': self.func_structured_sources, 'subdir': self.func_subdir, 'shared_library': self.func_shared_lib, 'shared_module': self.func_shared_module, 'static_library': self.func_static_lib, 'subdir_done': self.func_subdir_done, 'subproject': self.func_subproject, 'summary': self.func_summary, 'test': self.func_test, 'unset_variable': self.func_unset_variable, 'vcs_tag': self.func_vcs_tag, 'warning': self.func_warning, }) if 'MESON_UNIT_TEST' in os.environ: self.funcs.update({'exception': self.func_exception}) def build_holder_map(self) -> None: ''' Build a mapping of `HoldableObject` types to their corresponding `ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically holderify all returned values from methods and functions. ''' self.holder_map.update({ # Primitives list: P_OBJ.ArrayHolder, dict: P_OBJ.DictHolder, int: P_OBJ.IntegerHolder, bool: P_OBJ.BooleanHolder, str: P_OBJ.StringHolder, P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder, # Meson types mesonlib.File: OBJ.FileHolder, build.SharedLibrary: OBJ.SharedLibraryHolder, build.StaticLibrary: OBJ.StaticLibraryHolder, build.BothLibraries: OBJ.BothLibrariesHolder, build.SharedModule: OBJ.SharedModuleHolder, build.Executable: OBJ.ExecutableHolder, build.Jar: OBJ.JarHolder, build.CustomTarget: OBJ.CustomTargetHolder, build.CustomTargetIndex: OBJ.CustomTargetIndexHolder, build.Generator: OBJ.GeneratorHolder, build.GeneratedList: OBJ.GeneratedListHolder, build.ExtractedObjects: OBJ.GeneratedObjectsHolder, build.RunTarget: OBJ.RunTargetHolder, build.AliasTarget: OBJ.AliasTargetHolder, build.Headers: OBJ.HeadersHolder, build.Man: OBJ.ManHolder, build.EmptyDir: OBJ.EmptyDirHolder, build.Data: OBJ.DataHolder, build.SymlinkData: OBJ.SymlinkDataHolder, build.InstallDir: OBJ.InstallDirHolder, build.IncludeDirs: OBJ.IncludeDirsHolder, build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder, build.StructuredSources: OBJ.StructuredSourcesHolder, compilers.RunResult: compilerOBJ.TryRunResultHolder, dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder, coredata.UserFeatureOption: OBJ.FeatureOptionHolder, envconfig.MachineInfo: OBJ.MachineHolder, build.ConfigurationData: OBJ.ConfigurationDataHolder, }) ''' Build a mapping of `HoldableObject` base classes to their corresponding `ObjectHolder`s. The difference to `self.holder_map` is that the keys here define an upper bound instead of requiring an exact match. The mappings defined here are only used when there was no direct hit found in `self.holder_map`. ''' self.bound_holder_map.update({ dependencies.Dependency: OBJ.DependencyHolder, ExternalProgram: OBJ.ExternalProgramHolder, compilers.Compiler: compilerOBJ.CompilerHolder, ModuleObject: OBJ.ModuleObjectHolder, MutableModuleObject: OBJ.MutableModuleObjectHolder, }) def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None: ''' Adds one additional mapping to the `holder_map`. The intended use for this function is in the `initialize` method of modules to register custom object holders. ''' self.holder_map.update({ held_type: holder_type }) def process_new_values(self, invalues: T.List[T.Union[TYPE_var, ExecutableSerialisation]]) -> None: invalues = listify(invalues) for v in invalues: if isinstance(v, ObjectHolder): raise InterpreterException('Modules must not return ObjectHolders') if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)): self.add_target(v.name, v) elif isinstance(v, list): self.process_new_values(v) elif isinstance(v, ExecutableSerialisation): v.subproject = self.subproject self.build.install_scripts.append(v) elif isinstance(v, build.Data): self.build.data.append(v) elif isinstance(v, build.SymlinkData): self.build.symlinks.append(v) elif isinstance(v, dependencies.InternalDependency): # FIXME: This is special cased and not ideal: # The first source is our new VapiTarget, the rest are deps self.process_new_values(v.sources[0]) elif isinstance(v, build.InstallDir): self.build.install_dirs.append(v) elif isinstance(v, Test): self.build.tests.append(v) elif isinstance(v, (int, str, bool, Disabler, ObjectHolder, build.GeneratedList, ExternalProgram, build.ConfigurationData)): pass else: raise InterpreterException(f'Module returned a value of unknown type {v!r}.') def get_build_def_files(self) -> mesonlib.OrderedSet[str]: return self.build_def_files def add_build_def_file(self, f: mesonlib.FileOrString) -> None: # Use relative path for files within source directory, and absolute path # for system files. Skip files within build directory. Also skip not regular # files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this # is especially important to convert '/' to '\' on Windows. if isinstance(f, mesonlib.File): if f.is_built: return f = os.path.normpath(f.relative_name()) elif os.path.isfile(f) and not f.startswith('/dev'): srcdir = Path(self.environment.get_source_dir()) builddir = Path(self.environment.get_build_dir()) try: f_ = Path(f).resolve() except OSError: f_ = Path(f) s = f_.stat() if (hasattr(s, 'st_file_attributes') and s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK): # This is a Windows Store link which we can't # resolve, so just do our best otherwise. f_ = f_.parent.resolve() / f_.name else: raise if builddir in f_.parents: return if srcdir in f_.parents: f_ = f_.relative_to(srcdir) f = str(f_) else: return if f not in self.build_def_files: self.build_def_files.add(f) def get_variables(self) -> T.Dict[str, InterpreterObject]: return self.variables def check_stdlibs(self) -> None: machine_choices = [MachineChoice.HOST] if self.coredata.is_cross_build(): machine_choices.append(MachineChoice.BUILD) for for_machine in machine_choices: props = self.build.environment.properties[for_machine] for l in self.coredata.compilers[for_machine].keys(): try: di = mesonlib.stringlistify(props.get_stdlib(l)) except KeyError: continue if len(di) == 1: FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject, location=self.current_node) kwargs = {'native': for_machine is MachineChoice.BUILD, } name = l + '_stdlib' df = DependencyFallbacksHolder(self, [name]) df.set_fallback(di) dep = df.lookup(kwargs, force_fallback=True) self.build.stdlibs[for_machine][l] = dep def _import_module(self, modname: str, required: bool) -> NewExtensionModule: if modname in self.modules: return self.modules[modname] try: module = importlib.import_module('mesonbuild.modules.' + modname) except ImportError: if required: raise InvalidArguments(f'Module "{modname}" does not exist') ext_module = NotFoundExtensionModule() else: ext_module = module.initialize(self) assert isinstance(ext_module, (ExtensionModule, NewExtensionModule)) self.build.modules.append(modname) self.modules[modname] = ext_module return ext_module @typed_pos_args('import', str) @typed_kwargs( 'import', REQUIRED_KW.evolve(since='0.59.0'), DISABLER_KW.evolve(since='0.59.0'), ) @disablerIfNotFound def func_import(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.FuncImportModule') -> T.Union[ExtensionModule, NewExtensionModule, NotFoundExtensionModule]: modname = args[0] disabled, required, _ = extract_required_kwarg(kwargs, self.subproject) if disabled: return NotFoundExtensionModule() if modname.startswith('unstable-'): plainname = modname.split('-', 1)[1] try: # check if stable module exists mod = self._import_module(plainname, required) # XXX: this is actually not helpful, since it doesn't do a version check mlog.warning(f'Module {modname} is now stable, please use the {plainname} module instead.') return mod except InvalidArguments: mlog.warning(f'Module {modname} has no backwards or forwards compatibility and might not exist in future releases.', location=node) modname = 'unstable_' + plainname return self._import_module(modname, required) @typed_pos_args('files', varargs=str) @noKwargs def func_files(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> T.List[mesonlib.File]: return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args[0]] # Used by declare_dependency() and pkgconfig.generate() def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False): variables = kwargs.get(argname, {}) if isinstance(variables, dict): if dict_new and variables: FeatureNew.single_use(f'{argname} as dictionary', '0.56.0', self.subproject, location=self.current_node) else: varlist = mesonlib.stringlistify(variables) if list_new: FeatureNew.single_use(f'{argname} as list of strings', '0.56.0', self.subproject, location=self.current_node) variables = collections.OrderedDict() for v in varlist: try: (key, value) = v.split('=', 1) except ValueError: raise InterpreterException(f'Variable {v!r} must have a value separated by equals sign.') variables[key.strip()] = value.strip() for k, v in variables.items(): if not k or not v: raise InterpreterException('Empty variable name or value') if any(c.isspace() for c in k): raise InterpreterException(f'Invalid whitespace in variable name "{k}"') if not isinstance(v, str): raise InterpreterException('variables values must be strings.') return variables @FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole']) @FeatureNewKwargs('declare_dependency', '0.54.0', ['variables']) @FeatureNewKwargs('declare_dependency', '0.62.0', ['d_module_versions', 'd_import_dirs']) @permittedKwargs({'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version', 'variables', 'd_module_versions', 'd_import_dirs'}) @noPosargs def func_declare_dependency(self, node, args, kwargs): version = kwargs.get('version', self.project_version) if not isinstance(version, str): raise InterpreterException('Version must be a string.') incs = self.extract_incdirs(kwargs) libs = extract_as_list(kwargs, 'link_with') libs_whole = extract_as_list(kwargs, 'link_whole') sources = extract_as_list(kwargs, 'sources') sources = listify(self.source_strings_to_files(sources)) deps = extract_as_list(kwargs, 'dependencies') compile_args = mesonlib.stringlistify(kwargs.get('compile_args', [])) link_args = mesonlib.stringlistify(kwargs.get('link_args', [])) variables = self.extract_variables(kwargs, list_new=True) d_module_versions = extract_as_list(kwargs, 'd_module_versions') d_import_dirs = self.extract_incdirs(kwargs, 'd_import_dirs') final_deps = [] for d in deps: if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)): raise InterpreterException('Dependencies must be external deps') final_deps.append(d) for l in libs: if isinstance(l, dependencies.Dependency): raise InterpreterException('''Entries in "link_with" may only be self-built targets, external dependencies (including libraries) must go to "dependencies".''') dep = dependencies.InternalDependency(version, incs, compile_args, link_args, libs, libs_whole, sources, final_deps, variables, d_module_versions, d_import_dirs) return dep @typed_pos_args('assert', bool, optargs=[str]) @noKwargs def func_assert(self, node: mparser.FunctionNode, args: T.Tuple[bool, T.Optional[str]], kwargs: 'TYPE_kwargs') -> None: value, message = args if message is None: FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject, location=node) if not value: if message is None: from ..ast import AstPrinter printer = AstPrinter() node.args.arguments[0].accept(printer) message = printer.result raise InterpreterException('Assert failed: ' + message) def validate_arguments(self, args, argcount, arg_types): if argcount is not None: if argcount != len(args): raise InvalidArguments(f'Expected {argcount} arguments, got {len(args)}.') for actual, wanted in zip(args, arg_types): if wanted is not None: if not isinstance(actual, wanted): raise InvalidArguments('Incorrect argument type.') # Executables aren't actually accepted, but we allow them here to allow for # better error messages when overridden @typed_pos_args( 'run_command', (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), varargs=(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str)) @typed_kwargs( 'run_command', KwargInfo('check', (bool, NoneType), since='0.47.0'), KwargInfo('capture', bool, default=True, since='0.47.0'), ENV_KW.evolve(since='0.50.0'), ) def func_run_command(self, node: mparser.BaseNode, args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str], T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]], kwargs: 'kwargs.RunCommand') -> RunProcess: return self.run_command_impl(node, args, kwargs) def run_command_impl(self, node: mparser.BaseNode, args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str], T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]], kwargs: 'kwargs.RunCommand', in_builddir: bool = False) -> RunProcess: cmd, cargs = args capture = kwargs['capture'] env = kwargs['env'] srcdir = self.environment.get_source_dir() builddir = self.environment.get_build_dir() check = kwargs['check'] if check is None: mlog.warning(implicit_check_false_warning, once=True) check = False overridden_msg = ('Program {!r} was overridden with the compiled ' 'executable {!r} and therefore cannot be used during ' 'configuration') expanded_args: T.List[str] = [] if isinstance(cmd, build.Executable): progname = node.args.arguments[0].value raise InterpreterException(overridden_msg.format(progname, cmd.description())) if isinstance(cmd, ExternalProgram): if not cmd.found(): raise InterpreterException(f'command {cmd.get_name()!r} not found or not executable') elif isinstance(cmd, compilers.Compiler): exelist = cmd.get_exelist() cmd = exelist[0] prog = ExternalProgram(cmd, silent=True) if not prog.found(): raise InterpreterException(f'Program {cmd!r} not found or not executable') cmd = prog expanded_args = exelist[1:] else: if isinstance(cmd, mesonlib.File): cmd = cmd.absolute_path(srcdir, builddir) # Prefer scripts in the current source directory search_dir = os.path.join(srcdir, self.subdir) prog = ExternalProgram(cmd, silent=True, search_dir=search_dir) if not prog.found(): raise InterpreterException(f'Program or command {cmd!r} not found or not executable') cmd = prog for a in cargs: if isinstance(a, str): expanded_args.append(a) elif isinstance(a, mesonlib.File): expanded_args.append(a.absolute_path(srcdir, builddir)) elif isinstance(a, ExternalProgram): expanded_args.append(a.get_path()) elif isinstance(a, compilers.Compiler): FeatureNew.single_use('Compiler object as a variadic argument to `run_command`', '0.61.0', self.subproject, location=node) prog = ExternalProgram(a.exelist[0], silent=True) if not prog.found(): raise InterpreterException(f'Program {cmd!r} not found or not executable') expanded_args.append(prog.get_path()) else: raise InterpreterException(overridden_msg.format(a.name, cmd.description())) # If any file that was used as an argument to the command # changes, we must re-run the configuration step. self.add_build_def_file(cmd.get_path()) for a in expanded_args: if not os.path.isabs(a): a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a) self.add_build_def_file(a) return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir, self.environment.get_build_command() + ['introspect'], in_builddir=in_builddir, check=check, capture=capture) def func_gettext(self, nodes, args, kwargs): raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead') def func_option(self, nodes, args, kwargs): raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.') @typed_pos_args('subproject', str) @typed_kwargs( 'subproject', REQUIRED_KW, DEFAULT_OPTIONS.evolve(since='0.38.0'), KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True), ) def func_subproject(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs_: kwargs.Subproject) -> SubprojectHolder: kw: kwargs.DoSubproject = { 'required': kwargs_['required'], 'default_options': kwargs_['default_options'], 'version': kwargs_['version'], 'options': None, 'cmake_options': [], } return self.do_subproject(args[0], 'meson', kw) def disabled_subproject(self, subp_name: str, disabled_feature: T.Optional[str] = None, exception: T.Optional[Exception] = None) -> SubprojectHolder: sub = SubprojectHolder(NullSubprojectInterpreter(), os.path.join(self.subproject_dir, subp_name), disabled_feature=disabled_feature, exception=exception) self.subprojects[subp_name] = sub self.coredata.initialized_subprojects.add(subp_name) return sub def do_subproject(self, subp_name: str, method: Literal['meson', 'cmake'], kwargs: kwargs.DoSubproject) -> SubprojectHolder: disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled') return self.disabled_subproject(subp_name, disabled_feature=feature) default_options = coredata.create_options_dict(kwargs['default_options'], subp_name) if subp_name == '': raise InterpreterException('Subproject name must not be empty.') if subp_name[0] == '.': raise InterpreterException('Subproject name must not start with a period.') if '..' in subp_name: raise InterpreterException('Subproject name must not contain a ".." path segment.') if os.path.isabs(subp_name): raise InterpreterException('Subproject name must not be an absolute path.') if has_path_sep(subp_name): mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.', location=self.current_node) if subp_name in self.subproject_stack: fullstack = self.subproject_stack + [subp_name] incpath = ' => '.join(fullstack) raise InvalidCode(f'Recursive include of subprojects: {incpath}.') if subp_name in self.subprojects: subproject = self.subprojects[subp_name] if required and not subproject.found(): raise InterpreterException(f'Subproject "{subproject.subdir}" required but not found.') if kwargs['version']: pv = self.build.subprojects[subp_name] wanted = kwargs['version'] if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]: raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.') return subproject r = self.environment.wrap_resolver try: subdir = r.resolve(subp_name, method) except wrap.WrapException as e: if not required: mlog.log(e) mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(subp_name, exception=e) raise e subdir_abs = os.path.join(self.environment.get_source_dir(), subdir) os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True) self.global_args_frozen = True stack = ':'.join(self.subproject_stack + [subp_name]) m = ['\nExecuting subproject', mlog.bold(stack)] if method != 'meson': m += ['method', mlog.bold(method)] mlog.log(*m, '\n', nested=False) try: if method == 'meson': return self._do_subproject_meson(subp_name, subdir, default_options, kwargs) elif method == 'cmake': return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs) else: raise mesonlib.MesonBugException(f'The method {method} is invalid for the subproject {subp_name}') # Invalid code is always an error except InvalidCode: raise except Exception as e: if not required: with mlog.nested(subp_name): # Suppress the 'ERROR:' prefix because this exception is not # fatal and VS CI treat any logs with "ERROR:" as fatal. mlog.exception(e, prefix=mlog.yellow('Exception:')) mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(subp_name, exception=e) raise e def _do_subproject_meson(self, subp_name: str, subdir: str, default_options: T.Dict[OptionKey, str], kwargs: kwargs.DoSubproject, ast: T.Optional[mparser.CodeBlockNode] = None, build_def_files: T.Optional[T.List[str]] = None, is_translated: bool = False) -> SubprojectHolder: with mlog.nested(subp_name): new_build = self.build.copy() subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir, default_options, ast=ast, is_translated=is_translated, user_defined_options=self.user_defined_options) # Those lists are shared by all interpreters. That means that # even if the subproject fails, any modification that the subproject # made to those lists will affect the parent project. subi.subprojects = self.subprojects subi.modules = self.modules subi.holder_map = self.holder_map subi.bound_holder_map = self.bound_holder_map subi.summary = self.summary subi.subproject_stack = self.subproject_stack + [subp_name] current_active = self.active_projectname current_warnings_counter = mlog.log_warnings_counter mlog.log_warnings_counter = 0 subi.run() subi_warnings = mlog.log_warnings_counter mlog.log_warnings_counter = current_warnings_counter mlog.log('Subproject', mlog.bold(subp_name), 'finished.') mlog.log() if kwargs['version']: pv = subi.project_version wanted = kwargs['version'] if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]: raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.') self.active_projectname = current_active self.subprojects.update(subi.subprojects) self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings) # Duplicates are possible when subproject uses files from project root if build_def_files: self.build_def_files.update(build_def_files) # We always need the subi.build_def_files, to propgate sub-sub-projects self.build_def_files.update(subi.build_def_files) self.build.merge(subi.build) self.build.subprojects[subp_name] = subi.project_version self.coredata.initialized_subprojects.add(subp_name) return self.subprojects[subp_name] def _do_subproject_cmake(self, subp_name: str, subdir: str, subdir_abs: str, default_options: T.Dict[OptionKey, str], kwargs: kwargs.DoSubproject) -> SubprojectHolder: with mlog.nested(subp_name): new_build = self.build.copy() prefix = self.coredata.options[OptionKey('prefix')].value from ..modules.cmake import CMakeSubprojectOptions options = kwargs['options'] or CMakeSubprojectOptions() cmake_options = kwargs['cmake_options'] + options.cmake_options cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend) cm_int.initialise(cmake_options) cm_int.analyse() # Generate a meson ast and execute it with the normal do_subproject_meson ast = cm_int.pretend_to_be_meson(options.target_options) mlog.log() with mlog.nested('cmake-ast'): mlog.log('Processing generated meson AST') # Debug print the generated meson file from ..ast import AstIndentationGenerator, AstPrinter printer = AstPrinter() ast.accept(AstIndentationGenerator()) ast.accept(printer) printer.post_process() meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build') with open(meson_filename, "w", encoding='utf-8') as f: f.write(printer.result) mlog.log('Build file:', meson_filename) mlog.cmd_ci_include(meson_filename) mlog.log() result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, [str(f) for f in cm_int.bs_files], is_translated=True) result.cm_interpreter = cm_int mlog.log() return result def get_option_internal(self, optname: str) -> coredata.UserOption: key = OptionKey.from_string(optname).evolve(subproject=self.subproject) if not key.is_project(): for opts in [self.coredata.options, compilers.base_options]: v = opts.get(key) if v is None or v.yielding: v = opts.get(key.as_root()) if v is not None: assert isinstance(v, coredata.UserOption), 'for mypy' return v try: opt = self.coredata.options[key] if opt.yielding and key.subproject and key.as_root() in self.coredata.options: popt = self.coredata.options[key.as_root()] if type(opt) is type(popt): opt = popt else: # Get class name, then option type as a string opt_type = opt.__class__.__name__[4:][:-6].lower() popt_type = popt.__class__.__name__[4:][:-6].lower() # This is not a hard error to avoid dependency hell, the workaround # when this happens is to simply set the subproject's option directly. mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield ' 'to parent option of type {3!r}, ignoring parent value. ' 'Use -D{2}:{0}=value to set the value for this option manually' '.'.format(optname, opt_type, self.subproject, popt_type), location=self.current_node) return opt except KeyError: pass raise InterpreterException(f'Tried to access unknown option {optname!r}.') @typed_pos_args('get_option', str) @noKwargs def func_get_option(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> T.Union[coredata.UserOption, 'TYPE_var']: optname = args[0] if ':' in optname: raise InterpreterException('Having a colon in option name is forbidden, ' 'projects are not allowed to directly access ' 'options of other subprojects.') opt = self.get_option_internal(optname) if isinstance(opt, coredata.UserFeatureOption): opt.name = optname return opt elif isinstance(opt, coredata.UserOption): return opt.value return opt @typed_pos_args('configuration_data', optargs=[dict]) @noKwargs def func_configuration_data(self, node: mparser.BaseNode, args: T.Tuple[T.Optional[T.Dict[str, T.Any]]], kwargs: 'TYPE_kwargs') -> build.ConfigurationData: initial_values = args[0] if initial_values is not None: FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject, location=node) for k, v in initial_values.items(): if not isinstance(v, (str, int, bool)): raise InvalidArguments( f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"') return build.ConfigurationData(initial_values) def set_backend(self) -> None: # The backend is already set when parsing subprojects if self.backend is not None: return backend = self.coredata.get_option(OptionKey('backend')) from ..backend import backends self.backend = backends.get_backend_from_name(backend, self.build, self) if self.backend is None: raise InterpreterException(f'Unknown backend "{backend}".') if backend != self.backend.name: if self.backend.name.startswith('vs'): mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name)) self.coredata.set_option(OptionKey('backend'), self.backend.name) # Only init backend options on first invocation otherwise it would # override values previously set from command line. if self.environment.first_invocation: self.coredata.init_backend_options(backend) options = {k: v for k, v in self.environment.options.items() if k.is_backend()} self.coredata.set_options(options) @typed_pos_args('project', str, varargs=str) @typed_kwargs( 'project', DEFAULT_OPTIONS, KwargInfo('meson_version', (str, NoneType)), KwargInfo( 'version', (str, mesonlib.File, NoneType, list), default='undefined', validator=_project_version_validator, convertor=lambda x: x[0] if isinstance(x, list) else x, ), KwargInfo('license', ContainerTypeInfo(list, str), default=['unknown'], listify=True), KwargInfo('subproject_dir', str, default='subprojects'), ) def func_project(self, node: mparser.FunctionNode, args: T.Tuple[str, T.List[str]], kwargs: 'kwargs.Project') -> None: proj_name, proj_langs = args if ':' in proj_name: raise InvalidArguments(f"Project name {proj_name!r} must not contain ':'") # This needs to be evaluated as early as possible, as meson uses this # for things like deprecation testing. if kwargs['meson_version']: cv = coredata.version pv = kwargs['meson_version'] if not mesonlib.version_compare(cv, pv): raise InterpreterException(f'Meson version is {cv} but project requires {pv}') mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version'] if os.path.exists(self.option_file): oi = optinterpreter.OptionInterpreter(self.subproject) oi.process(self.option_file) self.coredata.update_project_options(oi.options) self.add_build_def_file(self.option_file) # Do not set default_options on reconfigure otherwise it would override # values previously set from command line. That means that changing # default_options in a project will trigger a reconfigure but won't # have any effect. self.project_default_options = coredata.create_options_dict( kwargs['default_options'], self.subproject) # If this is the first invocation we always need to initialize # builtins, if this is a subproject that is new in a re-invocation we # need to initialize builtins for that if self.environment.first_invocation or (self.subproject != '' and self.subproject not in self.coredata.initialized_subprojects): default_options = self.project_default_options.copy() default_options.update(self.default_project_options) self.coredata.init_builtins(self.subproject) else: default_options = {} self.coredata.set_default_options(default_options, self.subproject, self.environment) if not self.is_subproject(): self.build.project_name = proj_name self.active_projectname = proj_name version = kwargs['version'] if isinstance(version, mesonlib.File): FeatureNew.single_use('version from file', '0.57.0', self.subproject, location=node) self.add_build_def_file(version) ifname = version.absolute_path(self.environment.source_dir, self.environment.build_dir) try: ver_data = Path(ifname).read_text(encoding='utf-8').split('\n') except FileNotFoundError: raise InterpreterException('Version file not found.') if len(ver_data) == 2 and ver_data[1] == '': ver_data = ver_data[0:1] if len(ver_data) != 1: raise InterpreterException('Version file must contain exactly one line of text.') self.project_version = ver_data[0] else: self.project_version = version if self.build.project_version is None: self.build.project_version = self.project_version proj_license = kwargs['license'] self.build.dep_manifest[proj_name] = build.DepManifest(self.project_version, proj_license) if self.subproject in self.build.projects: raise InvalidCode('Second call to project().') # spdirname is the subproject_dir for this project, relative to self.subdir. # self.subproject_dir is the subproject_dir for the main project, relative to top source dir. spdirname = kwargs['subproject_dir'] if not isinstance(spdirname, str): raise InterpreterException('Subproject_dir must be a string') if os.path.isabs(spdirname): raise InterpreterException('Subproject_dir must not be an absolute path.') if spdirname.startswith('.'): raise InterpreterException('Subproject_dir must not begin with a period.') if '..' in spdirname: raise InterpreterException('Subproject_dir must not contain a ".." segment.') if not self.is_subproject(): self.subproject_dir = spdirname self.build.subproject_dir = self.subproject_dir # Load wrap files from this (sub)project. wrap_mode = self.coredata.get_option(OptionKey('wrap_mode')) if not self.is_subproject() or wrap_mode != WrapMode.nopromote: subdir = os.path.join(self.subdir, spdirname) r = wrap.Resolver(self.environment.get_source_dir(), subdir, self.subproject, wrap_mode) if self.is_subproject(): self.environment.wrap_resolver.merge_wraps(r) else: self.environment.wrap_resolver = r self.build.projects[self.subproject] = proj_name mlog.log('Project name:', mlog.bold(proj_name)) mlog.log('Project version:', mlog.bold(self.project_version)) if not self.is_subproject(): # We have to activate VS before adding languages and before calling # self.set_backend() otherwise it wouldn't be able to detect which # vs backend version we need. But after setting default_options in case # the project sets vs backend by default. backend = self.coredata.get_option(OptionKey('backend')) force_vsenv = self.user_defined_options.vsenv or backend.startswith('vs') if mesonlib.setup_vsenv(force_vsenv): self.build.need_vsenv = True self.add_languages(proj_langs, True, MachineChoice.HOST) self.add_languages(proj_langs, False, MachineChoice.BUILD) self.set_backend() if not self.is_subproject(): self.check_stdlibs() @typed_kwargs('add_languages', KwargInfo('native', (bool, NoneType), since='0.54.0'), REQUIRED_KW) @typed_pos_args('add_languages', varargs=str) def func_add_languages(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddLanguages') -> bool: langs = args[0] disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) native = kwargs['native'] if disabled: for lang in sorted(langs, key=compilers.sort_clink): mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled') return False if native is not None: return self.add_languages(langs, required, self.machine_from_native_kwarg(kwargs)) else: # absent 'native' means 'both' for backwards compatibility tv = FeatureNew.get_target_version(self.subproject) if FeatureNew.check_version(tv, '0.54.0'): mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.', location=node) success = self.add_languages(langs, False, MachineChoice.BUILD) success &= self.add_languages(langs, required, MachineChoice.HOST) return success @noArgsFlattening @noKwargs def func_message(self, node, args, kwargs): if len(args) > 1: FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject, location=node) args_str = [stringifyUserArguments(i) for i in args] self.message_impl(args_str) def message_impl(self, args): mlog.log(mlog.bold('Message:'), *args) @noArgsFlattening @FeatureNew('summary', '0.53.0') @typed_pos_args('summary', (str, dict), optargs=[object]) @typed_kwargs( 'summary', KwargInfo('section', str, default=''), KwargInfo('bool_yn', bool, default=False), KwargInfo('list_sep', (str, NoneType), since='0.54.0') ) def func_summary(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, T.Dict[str, T.Any]], T.Optional[T.Any]], kwargs: 'kwargs.Summary') -> None: if args[1] is None: if not isinstance(args[0], dict): raise InterpreterException('Summary first argument must be dictionary.') values = args[0] else: if not isinstance(args[0], str): raise InterpreterException('Summary first argument must be string.') values = {args[0]: args[1]} self.summary_impl(kwargs['section'], values, kwargs) def summary_impl(self, section: str, values, kwargs: 'kwargs.Summary') -> None: if self.subproject not in self.summary: self.summary[self.subproject] = Summary(self.active_projectname, self.project_version) self.summary[self.subproject].add_section( section, values, kwargs['bool_yn'], kwargs['list_sep'], self.subproject) def _print_summary(self) -> None: # Add automatic 'Supbrojects' section in main project. all_subprojects = collections.OrderedDict() for name, subp in sorted(self.subprojects.items()): value = subp.found() if subp.disabled_feature: value = [value, f'Feature {subp.disabled_feature!r} disabled'] elif subp.exception: value = [value, str(subp.exception)] elif subp.warnings > 0: value = [value, f'{subp.warnings} warnings'] all_subprojects[name] = value if all_subprojects: self.summary_impl('Subprojects', all_subprojects, {'bool_yn': True, 'list_sep': ' ', }) # Add automatic section with all user defined options if self.user_defined_options: values = collections.OrderedDict() if self.user_defined_options.cross_file: values['Cross files'] = self.user_defined_options.cross_file if self.user_defined_options.native_file: values['Native files'] = self.user_defined_options.native_file sorted_options = sorted(self.user_defined_options.cmd_line_options.items()) values.update({str(k): v for k, v in sorted_options}) if values: self.summary_impl('User defined options', values, {'bool_yn': False, 'list_sep': None}) # Print all summaries, main project last. mlog.log('') # newline main_summary = self.summary.pop('', None) for subp_name, summary in sorted(self.summary.items()): if self.subprojects[subp_name].found(): summary.dump() if main_summary: main_summary.dump() @noArgsFlattening @FeatureNew('warning', '0.44.0') @noKwargs def func_warning(self, node, args, kwargs): if len(args) > 1: FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject, location=node) args_str = [stringifyUserArguments(i) for i in args] mlog.warning(*args_str, location=node) @noArgsFlattening @noKwargs def func_error(self, node, args, kwargs): if len(args) > 1: FeatureNew.single_use('error with more than one argument', '0.58.0', self.subproject, location=node) args_str = [stringifyUserArguments(i) for i in args] raise InterpreterException('Problem encountered: ' + ' '.join(args_str)) @noKwargs @noPosargs def func_exception(self, node, args, kwargs): raise Exception() def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool: success = self.add_languages_for(args, required, for_machine) if not self.coredata.is_cross_build(): self.coredata.copy_build_options_from_regular_ones() self._redetect_machines() return success def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool: should = self.environment.properties.host.get('skip_sanity_check', False) if not isinstance(should, bool): raise InterpreterException('Option skip_sanity_check must be a boolean.') if for_machine != MachineChoice.HOST and not should: return False if not self.environment.is_cross_build() and not should: return False return should def add_languages_for(self, args: T.List[str], required: bool, for_machine: MachineChoice) -> bool: args = [a.lower() for a in args] langs = set(self.coredata.compilers[for_machine].keys()) langs.update(args) # We'd really like to add cython's default language here, but it can't # actually be done because the cython compiler hasn't been initialized, # so we can't actually get the option yet. Because we can't know what # compiler to add by default, and we don't want to add unnecessary # compilers we don't add anything for cython here, and instead do it # When the first cython target using a particular language is used. if 'vala' in langs and 'c' not in langs: FeatureNew.single_use('Adding Vala language without C', '0.59.0', self.subproject, location=self.current_node) args.append('c') success = True for lang in sorted(args, key=compilers.sort_clink): clist = self.coredata.compilers[for_machine] machine_name = for_machine.get_lower_case_name() if lang in clist: comp = clist[lang] else: try: comp = compilers.detect_compiler_for(self.environment, lang, for_machine) if comp is None: raise InvalidArguments(f'Tried to use unknown language "{lang}".') if self.should_skip_sanity_check(for_machine): mlog.log_once('Cross compiler sanity tests disabled via the cross file.') else: comp.sanity_check(self.environment.get_scratch_dir(), self.environment) except Exception: if not required: mlog.log('Compiler for language', mlog.bold(lang), 'for the', machine_name, 'machine not found.') success = False continue else: raise if for_machine == MachineChoice.HOST or self.environment.is_cross_build(): logger_fun = mlog.log else: logger_fun = mlog.debug logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:', mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string()) if comp.linker is not None: logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:', mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version) self.build.ensure_static_linker(comp) return success def program_from_file_for(self, for_machine: MachineChoice, prognames: T.List[mesonlib.FileOrString] ) -> T.Optional[ExternalProgram]: for p in prognames: if isinstance(p, mesonlib.File): continue # Always points to a local (i.e. self generated) file. if not isinstance(p, str): raise InterpreterException('Executable name must be a string') prog = ExternalProgram.from_bin_list(self.environment, for_machine, p) if prog.found(): return prog return None def program_from_system(self, args: T.List[mesonlib.FileOrString], search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable]) -> T.Optional[ExternalProgram]: # Search for scripts relative to current subdir. # Do not cache found programs because find_program('foobar') # might give different results when run from different source dirs. source_dir = os.path.join(self.environment.get_source_dir(), self.subdir) for exename in args: if isinstance(exename, mesonlib.File): if exename.is_built: search_dir = os.path.join(self.environment.get_build_dir(), exename.subdir) else: search_dir = os.path.join(self.environment.get_source_dir(), exename.subdir) exename = exename.fname extra_search_dirs = [] elif isinstance(exename, str): search_dir = source_dir extra_search_dirs = search_dirs else: raise InvalidArguments(f'find_program only accepts strings and files, not {exename!r}') extprog = ExternalProgram(exename, search_dir=search_dir, extra_search_dirs=extra_search_dirs, silent=True) if extprog.found(): extra_info.append(f"({' '.join(extprog.get_command())})") return extprog return None def program_from_overrides(self, command_names: T.List[mesonlib.FileOrString], extra_info: T.List['mlog.TV_Loggable'] ) -> T.Optional[T.Union[ExternalProgram, OverrideProgram, build.Executable]]: for name in command_names: if not isinstance(name, str): continue if name in self.build.find_overrides: exe = self.build.find_overrides[name] extra_info.append(mlog.blue('(overridden)')) return exe return None def store_name_lookups(self, command_names: T.List[mesonlib.FileOrString]) -> None: for name in command_names: if isinstance(name, str): self.build.searched_programs.add(name) def add_find_program_override(self, name: str, exe: T.Union[build.Executable, ExternalProgram, 'OverrideProgram']) -> None: if name in self.build.searched_programs: raise InterpreterException(f'Tried to override finding of executable "{name}" which has already been found.') if name in self.build.find_overrides: raise InterpreterException(f'Tried to override executable "{name}" which has already been overridden.') self.build.find_overrides[name] = exe def notfound_program(self, args: T.List[mesonlib.FileOrString]) -> ExternalProgram: return NonExistingExternalProgram(' '.join( [a if isinstance(a, str) else a.absolute_path(self.environment.source_dir, self.environment.build_dir) for a in args])) # TODO update modules to always pass `for_machine`. It is bad-form to assume # the host machine. def find_program_impl(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice = MachineChoice.HOST, required: bool = True, silent: bool = True, wanted: T.Union[str, T.List[str]] = '', search_dirs: T.Optional[T.List[str]] = None, version_func: T.Optional[T.Callable[[T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']], str]] = None ) -> T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']: args = mesonlib.listify(args) extra_info: T.List[mlog.TV_Loggable] = [] progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info) if progobj is None: progobj = self.notfound_program(args) if isinstance(progobj, ExternalProgram) and not progobj.found(): if not silent: mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO')) if required: m = 'Program {!r} not found or not executable' raise InterpreterException(m.format(progobj.get_name())) return progobj if wanted: if version_func: version = version_func(progobj) elif isinstance(progobj, build.Executable): if progobj.subproject: interp = self.subprojects[progobj.subproject].held_object else: interp = self assert isinstance(interp, Interpreter) version = interp.project_version else: version = progobj.get_version(self) is_found, not_found, _ = mesonlib.version_compare_many(version, wanted) if not is_found: mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(version), 'but need:', mlog.bold(', '.join([f"'{e}'" for e in not_found])), *extra_info) if required: m = 'Invalid version of program, need {!r} {!r} found {!r}.' raise InterpreterException(m.format(progobj.name, not_found, version)) return self.notfound_program(args) extra_info.insert(0, mlog.normal_cyan(version)) # Only store successful lookups self.store_name_lookups(args) if not silent: mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.green('YES'), *extra_info) if isinstance(progobj, build.Executable): progobj.was_returned_by_find_program = True return progobj def program_lookup(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice, required: bool, search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable] ) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]: progobj = self.program_from_overrides(args, extra_info) if progobj: return progobj fallback = None wrap_mode = self.coredata.get_option(OptionKey('wrap_mode')) if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver: fallback = self.environment.wrap_resolver.find_program_provider(args) if fallback and wrap_mode == WrapMode.forcefallback: return self.find_program_fallback(fallback, args, required, extra_info) progobj = self.program_from_file_for(for_machine, args) if progobj is None: progobj = self.program_from_system(args, search_dirs, extra_info) if progobj is None and args[0].endswith('python3'): prog = ExternalProgram('python3', mesonlib.python_command, silent=True) progobj = prog if prog.found() else None if progobj is None and fallback and required: progobj = self.find_program_fallback(fallback, args, required, extra_info) return progobj def find_program_fallback(self, fallback: str, args: T.List[mesonlib.FileOrString], required: bool, extra_info: T.List[mlog.TV_Loggable] ) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]: mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program', mlog.bold(' '.join(args))) sp_kwargs: kwargs.DoSubproject = { 'required': required, 'default_options': [], 'version': [], 'cmake_options': [], 'options': None, } self.do_subproject(fallback, 'meson', sp_kwargs) return self.program_from_overrides(args, extra_info) @typed_pos_args('find_program', varargs=(str, mesonlib.File), min_varargs=1) @typed_kwargs( 'find_program', DISABLER_KW.evolve(since='0.49.0'), NATIVE_KW, REQUIRED_KW, KwargInfo('dirs', ContainerTypeInfo(list, str), default=[], listify=True, since='0.53.0'), KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True, since='0.52.0'), ) @disablerIfNotFound def func_find_program(self, node: mparser.BaseNode, args: T.Tuple[T.List[mesonlib.FileOrString]], kwargs: 'kwargs.FindProgram', ) -> T.Union['build.Executable', ExternalProgram, 'OverrideProgram']: disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Program', mlog.bold(' '.join(args[0])), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_program(args[0]) search_dirs = extract_search_dirs(kwargs) return self.find_program_impl(args[0], kwargs['native'], required=required, silent=False, wanted=kwargs['version'], search_dirs=search_dirs) def func_find_library(self, node, args, kwargs): raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n' 'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n' 'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n' ) # When adding kwargs, please check if they make sense in dependencies.get_dep_identifier() @FeatureNewKwargs('dependency', '0.57.0', ['cmake_package_version']) @FeatureNewKwargs('dependency', '0.56.0', ['allow_fallback']) @FeatureNewKwargs('dependency', '0.54.0', ['components']) @FeatureNewKwargs('dependency', '0.52.0', ['include_type']) @FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args']) @FeatureNewKwargs('dependency', '0.49.0', ['disabler']) @FeatureNewKwargs('dependency', '0.40.0', ['method']) @FeatureNewKwargs('dependency', '0.38.0', ['default_options']) @disablerIfNotFound @permittedKwargs(permitted_dependency_kwargs) @typed_pos_args('dependency', varargs=str, min_varargs=1) def func_dependency(self, node, args, kwargs): # Replace '' by empty list of names names = [n for n in args[0] if n] if len(names) > 1: FeatureNew('dependency with more than one name', '0.60.0').use(self.subproject) allow_fallback = kwargs.get('allow_fallback') if allow_fallback is not None and not isinstance(allow_fallback, bool): raise InvalidArguments('"allow_fallback" argument must be boolean') fallback = kwargs.get('fallback') default_options = kwargs.get('default_options') df = DependencyFallbacksHolder(self, names, allow_fallback, default_options) df.set_fallback(fallback) not_found_message = kwargs.get('not_found_message', '') if not isinstance(not_found_message, str): raise InvalidArguments('The not_found_message must be a string.') try: d = df.lookup(kwargs) except Exception: if not_found_message: self.message_impl([not_found_message]) raise assert isinstance(d, Dependency) if not d.found() and not_found_message: self.message_impl([not_found_message]) self.message_impl([not_found_message]) # Ensure the correct include type if 'include_type' in kwargs: wanted = kwargs['include_type'] if not isinstance(wanted, str): raise InvalidArguments('The `include_type` kwarg must be a string') actual = d.get_include_type() if wanted != actual: mlog.debug(f'Current include type of {args[0]} is {actual}. Converting to requested {wanted}') d = d.generate_system_dependency(wanted) if d.feature_since is not None: version, extra_msg = d.feature_since FeatureNew.single_use(f'dep {d.name!r} custom lookup', version, self.subproject, extra_msg, node) for f in d.featurechecks: f.use(self.subproject, node) return d @FeatureNew('disabler', '0.44.0') @noKwargs @noPosargs def func_disabler(self, node, args, kwargs): return Disabler() @FeatureNewKwargs('executable', '0.42.0', ['implib']) @FeatureNewKwargs('executable', '0.56.0', ['win_subsystem']) @FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.") @permittedKwargs(build.known_exe_kwargs) def func_executable(self, node, args, kwargs): return self.build_target(node, args, kwargs, build.Executable) @permittedKwargs(build.known_stlib_kwargs) def func_static_lib(self, node, args, kwargs): return self.build_target(node, args, kwargs, build.StaticLibrary) @permittedKwargs(build.known_shlib_kwargs) def func_shared_lib(self, node, args, kwargs): holder = self.build_target(node, args, kwargs, build.SharedLibrary) holder.shared_library_only = True return holder @permittedKwargs(known_library_kwargs) def func_both_lib(self, node, args, kwargs): return self.build_both_libraries(node, args, kwargs) @FeatureNew('shared_module', '0.37.0') @permittedKwargs(build.known_shmod_kwargs) def func_shared_module(self, node, args, kwargs): return self.build_target(node, args, kwargs, build.SharedModule) @permittedKwargs(known_library_kwargs) def func_library(self, node, args, kwargs): return self.build_library(node, args, kwargs) @permittedKwargs(build.known_jar_kwargs) def func_jar(self, node, args, kwargs): return self.build_target(node, args, kwargs, build.Jar) @FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options']) @permittedKwargs(known_build_target_kwargs) def func_build_target(self, node, args, kwargs): if 'target_type' not in kwargs: raise InterpreterException('Missing target_type keyword argument') target_type = kwargs.pop('target_type') if target_type == 'executable': return self.build_target(node, args, kwargs, build.Executable) elif target_type == 'shared_library': return self.build_target(node, args, kwargs, build.SharedLibrary) elif target_type == 'shared_module': FeatureNew('build_target(target_type: \'shared_module\')', '0.51.0').use(self.subproject) return self.build_target(node, args, kwargs, build.SharedModule) elif target_type == 'static_library': return self.build_target(node, args, kwargs, build.StaticLibrary) elif target_type == 'both_libraries': return self.build_both_libraries(node, args, kwargs) elif target_type == 'library': return self.build_library(node, args, kwargs) elif target_type == 'jar': return self.build_target(node, args, kwargs, build.Jar) else: raise InterpreterException('Unknown target_type.') @noPosargs @typed_kwargs( 'vcs_tag', CT_INPUT_KW.evolve(required=True), CT_OUTPUT_KW, # Cannot use the COMMAND_KW because command is allowed to be empty KwargInfo( 'command', ContainerTypeInfo(list, (str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex, ExternalProgram, mesonlib.File)), listify=True, default=[], ), KwargInfo('fallback', (str, NoneType)), KwargInfo('replace_string', str, default='@VCS_TAG@'), ) def func_vcs_tag(self, node: mparser.BaseNode, args: T.List['TYPE_var'], kwargs: 'kwargs.VcsTag') -> build.CustomTarget: if kwargs['fallback'] is None: FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject, location=node) fallback = kwargs['fallback'] or self.project_version replace_string = kwargs['replace_string'] regex_selector = '(.*)' # default regex selector for custom command: use complete output vcs_cmd = kwargs['command'] source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir)) if vcs_cmd: if isinstance(vcs_cmd[0], mesonlib.File): FeatureNew.single_use('vcs_tag with file as the first argument', '0.62.0', self.subproject, location=node) maincmd = self.find_program_impl(vcs_cmd[0], required=False) if maincmd.found(): vcs_cmd[0] = maincmd else: vcs = mesonlib.detect_vcs(source_dir) if vcs: mlog.log('Found {} repository at {}'.format(vcs['name'], vcs['wc_dir'])) vcs_cmd = vcs['get_rev'].split() regex_selector = vcs['rev_regex'] else: vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string # vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... self._validate_custom_target_outputs(len(kwargs['input']) > 1, kwargs['output'], "vcs_tag") tg = build.CustomTarget( kwargs['output'][0], self.subdir, self.subproject, self.environment.get_build_command() + ['--internal', 'vcstagger', '@INPUT0@', '@OUTPUT0@', fallback, source_dir, replace_string, regex_selector] + vcs_cmd, self.source_strings_to_files(kwargs['input']), kwargs['output'], build_by_default=True, build_always_stale=True, ) self.add_target(tg.name, tg) return tg @FeatureNew('subdir_done', '0.46.0') @noPosargs @noKwargs def func_subdir_done(self, node, args, kwargs): raise SubdirDoneRequest() @staticmethod def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None: """Checks for additional invalid values in a custom_target output. This cannot be done with typed_kwargs because it requires the number of inputs. """ for out in outputs: if has_multi_in and ('@PLAINNAME@' in out or '@BASENAME@' in out): raise InvalidArguments(f'{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" ' 'when there is more than one input (we can\'t know which to use)') @typed_pos_args('custom_target', optargs=[str]) @typed_kwargs( 'custom_target', COMMAND_KW, CT_BUILD_ALWAYS, CT_BUILD_ALWAYS_STALE, CT_BUILD_BY_DEFAULT, CT_INPUT_KW, CT_INSTALL_DIR_KW, CT_INSTALL_TAG_KW, CT_OUTPUT_KW, DEPENDS_KW, DEPEND_FILES_KW, DEPFILE_KW, ENV_KW.evolve(since='0.57.0'), INSTALL_KW, INSTALL_MODE_KW.evolve(since='0.47.0'), OVERRIDE_OPTIONS_KW, KwargInfo('feed', bool, default=False, since='0.59.0'), KwargInfo('capture', bool, default=False), KwargInfo('console', bool, default=False, since='0.48.0'), ) def func_custom_target(self, node: mparser.FunctionNode, args: T.Tuple[str], kwargs: 'kwargs.CustomTarget') -> build.CustomTarget: if kwargs['depfile'] and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']): FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject, location=node) # Don't mutate the kwargs build_by_default = kwargs['build_by_default'] build_always_stale = kwargs['build_always_stale'] # Remap build_always to build_by_default and build_always_stale if kwargs['build_always'] is not None and kwargs['build_always_stale'] is not None: raise InterpreterException('CustomTarget: "build_always" and "build_always_stale" are mutually exclusive') if build_by_default is None and kwargs['install']: build_by_default = True elif kwargs['build_always'] is not None: if build_by_default is None: build_by_default = kwargs['build_always'] build_always_stale = kwargs['build_by_default'] # These are are nullaable so that we can know whether they're explicitly # set or not. If they haven't been overwritten, set them to their true # default if build_by_default is None: build_by_default = False if build_always_stale is None: build_always_stale = False name = args[0] if name is None: # name will default to first output, but we cannot do that yet because # they could need substitutions (e.g. @BASENAME@) first. CustomTarget() # will take care of setting a proper default but name must be an empty # string in the meantime. FeatureNew.single_use('custom_target() with no name argument', '0.60.0', self.subproject, location=node) name = '' inputs = self.source_strings_to_files(kwargs['input'], strict=False) command = kwargs['command'] if command and isinstance(command[0], str): command[0] = self.find_program_impl([command[0]]) if len(inputs) > 1 and kwargs['feed']: raise InvalidArguments('custom_target: "feed" keyword argument can only be used used with a single input') if len(kwargs['output']) > 1 and kwargs['capture']: raise InvalidArguments('custom_target: "capture" keyword argument can only be used used with a single output') if kwargs['capture'] and kwargs['console']: raise InvalidArguments('custom_target: "capture" and "console" keyword arguments are mutually exclusive') for c in command: if kwargs['capture'] and isinstance(c, str) and '@OUTPUT@' in c: raise InvalidArguments('custom_target: "capture" keyword argument cannot be used with "@OUTPUT@"') if kwargs['feed'] and isinstance(c, str) and '@INPUT@' in c: raise InvalidArguments('custom_target: "feed" keyword argument cannot be used with "@INPUT@"') if kwargs['install'] and not kwargs['install_dir']: raise InvalidArguments('custom_target: "install_dir" keyword argument must be set when "install" is true.') if len(kwargs['install_dir']) > 1: FeatureNew.single_use('multiple install_dir for custom_target', '0.40.0', self.subproject, location=node) if len(kwargs['install_tag']) not in {0, 1, len(kwargs['output'])}: raise InvalidArguments('custom_target: install_tag argument must have 0 or 1 outputs, ' 'or the same number of elements as the output keyword argument. ' f'(there are {len(kwargs["install_tag"])} install_tags, ' f'and {len(kwargs["output"])} outputs)') self._validate_custom_target_outputs(len(inputs) > 1, kwargs['output'], "custom_target") tg = build.CustomTarget( name, self.subdir, self.subproject, command, inputs, kwargs['output'], build_always_stale=build_always_stale, build_by_default=build_by_default, capture=kwargs['capture'], console=kwargs['console'], depend_files=kwargs['depend_files'], depfile=kwargs['depfile'], extra_depends=kwargs['depends'], env=kwargs['env'], feed=kwargs['feed'], install=kwargs['install'], install_dir=kwargs['install_dir'], install_mode=kwargs['install_mode'], install_tag=kwargs['install_tag'], override_options=kwargs['override_options'], backend=self.backend) self.add_target(tg.name, tg) return tg @typed_pos_args('run_target', str) @typed_kwargs( 'run_target', COMMAND_KW, DEPENDS_KW, ENV_KW.evolve(since='0.57.0'), ) def func_run_target(self, node: mparser.FunctionNode, args: T.Tuple[str], kwargs: 'kwargs.RunTarget') -> build.RunTarget: all_args = kwargs['command'].copy() for i in listify(all_args): if isinstance(i, ExternalProgram) and not i.found(): raise InterpreterException(f'Tried to use non-existing executable {i.name!r}') if isinstance(all_args[0], str): all_args[0] = self.find_program_impl([all_args[0]]) name = args[0] tg = build.RunTarget(name, all_args, kwargs['depends'], self.subdir, self.subproject, kwargs['env']) self.add_target(name, tg) full_name = (self.subproject, name) assert full_name not in self.build.run_target_names self.build.run_target_names.add(full_name) return tg @FeatureNew('alias_target', '0.52.0') @typed_pos_args('alias_target', str, varargs=build.Target, min_varargs=1) @noKwargs def func_alias_target(self, node: mparser.BaseNode, args: T.Tuple[str, T.List[build.Target]], kwargs: 'TYPE_kwargs') -> build.AliasTarget: name, deps = args tg = build.AliasTarget(name, deps, self.subdir, self.subproject) self.add_target(name, tg) return tg @typed_pos_args('generator', (build.Executable, ExternalProgram)) @typed_kwargs( 'generator', KwargInfo('arguments', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True), KwargInfo('output', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True), DEPFILE_KW, DEPENDS_KW, KwargInfo('capture', bool, default=False, since='0.43.0'), ) def func_generator(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[build.Executable, ExternalProgram]], kwargs: 'kwargs.FuncGenerator') -> build.Generator: for rule in kwargs['output']: if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule: raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.') if has_path_sep(rule): raise InvalidArguments('"output" must not contain a directory separator.') if len(kwargs['output']) > 1: for o in kwargs['output']: if '@OUTPUT@' in o: raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.') gen = build.Generator(args[0], **kwargs) self.generators.append(gen) return gen @typed_pos_args('benchmark', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File)) @typed_kwargs('benchmark', *TEST_KWARGS) def func_benchmark(self, node: mparser.BaseNode, args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]], kwargs: 'kwargs.FuncBenchmark') -> None: self.add_test(node, args, kwargs, False) @typed_pos_args('test', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File)) @typed_kwargs('test', *TEST_KWARGS, KwargInfo('is_parallel', bool, default=True)) def func_test(self, node: mparser.BaseNode, args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]], kwargs: 'kwargs.FuncTest') -> None: self.add_test(node, args, kwargs, True) def unpack_env_kwarg(self, kwargs: T.Union[build.EnvironmentVariables, T.Dict[str, 'TYPE_var'], T.List['TYPE_var'], str]) -> build.EnvironmentVariables: envlist = kwargs.get('env') if envlist is None: return build.EnvironmentVariables() msg = ENV_KW.validator(envlist) if msg: raise InvalidArguments(f'"env": {msg}') return ENV_KW.convertor(envlist) def make_test(self, node: mparser.BaseNode, args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]], kwargs: 'kwargs.BaseTest') -> Test: name = args[0] if ':' in name: mlog.deprecation(f'":" is not allowed in test name "{name}", it has been replaced with "_"', location=node) name = name.replace(':', '_') exe = args[1] if isinstance(exe, ExternalProgram): if not exe.found(): raise InvalidArguments('Tried to use not-found external program as test exe') elif isinstance(exe, mesonlib.File): exe = self.find_program_impl([exe]) env = self.unpack_env_kwarg(kwargs) if kwargs['timeout'] <= 0: FeatureNew.single_use('test() timeout <= 0', '0.57.0', self.subproject, location=node) prj = self.subproject if self.is_subproject() else self.build.project_name suite: T.List[str] = [] for s in kwargs['suite']: if s: s = ':' + s suite.append(prj.replace(' ', '_').replace(':', '_') + s) return Test(name, prj, suite, exe, kwargs['depends'], kwargs.get('is_parallel', False), kwargs['args'], env, kwargs['should_fail'], kwargs['timeout'], kwargs['workdir'], kwargs['protocol'], kwargs['priority'], kwargs['verbose']) def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool): t = self.make_test(node, args, kwargs) if is_base_test: self.build.tests.append(t) mlog.debug('Adding test', mlog.bold(t.name, True)) else: self.build.benchmarks.append(t) mlog.debug('Adding benchmark', mlog.bold(t.name, True)) @typed_pos_args('install_headers', varargs=(str, mesonlib.File)) @typed_kwargs( 'install_headers', KwargInfo('install_dir', (str, NoneType)), KwargInfo('subdir', (str, NoneType)), INSTALL_MODE_KW.evolve(since='0.47.0'), ) def func_install_headers(self, node: mparser.BaseNode, args: T.Tuple[T.List['mesonlib.FileOrString']], kwargs: 'kwargs.FuncInstallHeaders') -> build.Headers: source_files = self.source_strings_to_files(args[0]) install_subdir = kwargs['subdir'] if install_subdir is not None: if kwargs['install_dir'] is not None: raise InterpreterException('install_headers: cannot specify both "install_dir" and "subdir". Use only "install_dir".') if os.path.isabs(install_subdir): mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.') h = build.Headers(source_files, install_subdir, kwargs['install_dir'], kwargs['install_mode'], self.subproject) self.build.headers.append(h) return h @typed_pos_args('install_man', varargs=(str, mesonlib.File)) @typed_kwargs( 'install_man', KwargInfo('install_dir', (str, NoneType)), KwargInfo('locale', (str, NoneType), since='0.58.0'), INSTALL_MODE_KW.evolve(since='0.47.0') ) def func_install_man(self, node: mparser.BaseNode, args: T.Tuple[T.List['mesonlib.FileOrString']], kwargs: 'kwargs.FuncInstallMan') -> build.Man: # We just need to narrow this, because the input is limited to files and # Strings as inputs, so only Files will be returned sources = self.source_strings_to_files(args[0]) for s in sources: try: num = int(s.rsplit('.', 1)[-1]) except (IndexError, ValueError): num = 0 if not 1 <= num <= 9: raise InvalidArguments('Man file must have a file extension of a number between 1 and 9') m = build.Man(sources, kwargs['install_dir'], kwargs['install_mode'], self.subproject, kwargs['locale']) self.build.man.append(m) return m @FeatureNew('install_emptydir', '0.60.0') @typed_kwargs( 'install_emptydir', INSTALL_MODE_KW, KwargInfo('install_tag', (str, NoneType), since='0.62.0') ) def func_install_emptydir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs) -> None: d = build.EmptyDir(args[0], kwargs['install_mode'], self.subproject, kwargs['install_tag']) self.build.emptydir.append(d) return d @FeatureNew('install_symlink', '0.61.0') @typed_pos_args('symlink_name', str) @typed_kwargs( 'install_symlink', KwargInfo('pointing_to', str, required=True), KwargInfo('install_dir', str, required=True), INSTALL_TAG_KW, ) def func_install_symlink(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs) -> build.SymlinkData: name = args[0] # Validation while creating the SymlinkData object target = kwargs['pointing_to'] l = build.SymlinkData(target, name, kwargs['install_dir'], self.subproject, kwargs['install_tag']) self.build.symlinks.append(l) return l @FeatureNew('structured_sources', '0.62.0') @typed_pos_args('structured_sources', object, optargs=[dict]) @noKwargs @noArgsFlattening def func_structured_sources( self, node: mparser.BaseNode, args: T.Tuple[object, T.Optional[T.Dict[str, object]]], kwargs: 'TYPE_kwargs') -> build.StructuredSources: valid_types = (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList) sources: T.Dict[str, T.List[T.Union[mesonlib.File, 'build.GeneratedTypes']]] = collections.defaultdict(list) for arg in mesonlib.listify(args[0]): if not isinstance(arg, valid_types): raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid') if isinstance(arg, str): arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg) sources[''].append(arg) if args[1]: if '' in args[1]: raise InvalidArguments('structured_sources: keys to dictionary argument may not be an empty string.') for k, v in args[1].items(): for arg in mesonlib.listify(v): if not isinstance(arg, valid_types): raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid') if isinstance(arg, str): arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg) sources[k].append(arg) return build.StructuredSources(sources) @typed_pos_args('subdir', str) @typed_kwargs( 'subdir', KwargInfo( 'if_found', ContainerTypeInfo(list, object), validator=lambda a: 'Objects must have a found() method' if not all(hasattr(x, 'found') for x in a) else None, since='0.44.0', default=[], listify=True, ), ) def func_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.Subdir') -> None: mesonlib.check_direntry_issues(args) if '..' in args[0]: raise InvalidArguments('Subdir contains ..') if self.subdir == '' and args[0] == self.subproject_dir: raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.') if self.subdir == '' and args[0].startswith('meson-'): raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().') if args[0] == '': raise InvalidArguments("The argument given to subdir() is the empty string ''. This is prohibited.") for i in kwargs['if_found']: if not i.found(): return prev_subdir = self.subdir subdir = os.path.join(prev_subdir, args[0]) if os.path.isabs(subdir): raise InvalidArguments('Subdir argument must be a relative path.') absdir = os.path.join(self.environment.get_source_dir(), subdir) symlinkless_dir = os.path.realpath(absdir) build_file = os.path.join(symlinkless_dir, 'meson.build') if build_file in self.processed_buildfiles: raise InvalidArguments(f'Tried to enter directory "{subdir}", which has already been visited.') self.processed_buildfiles.add(build_file) self.subdir = subdir os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True) buildfilename = os.path.join(self.subdir, environment.build_filename) self.build_def_files.add(buildfilename) absname = os.path.join(self.environment.get_source_dir(), buildfilename) if not os.path.isfile(absname): self.subdir = prev_subdir raise InterpreterException(f"Non-existent build file '{buildfilename!s}'") with open(absname, encoding='utf-8') as f: code = f.read() assert isinstance(code, str) try: codeblock = mparser.Parser(code, absname).parse() except mesonlib.MesonException as me: me.file = absname raise me try: self.evaluate_codeblock(codeblock) except SubdirDoneRequest: pass self.subdir = prev_subdir def _get_kwarg_install_mode(self, kwargs: T.Dict[str, T.Any]) -> T.Optional[FileMode]: if kwargs.get('install_mode', None) is None: return None if isinstance(kwargs['install_mode'], FileMode): return kwargs['install_mode'] install_mode: T.List[str] = [] mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int)) for m in mode: # We skip any arguments that are set to `false` if m is False: m = None install_mode.append(m) if len(install_mode) > 3: raise InvalidArguments('Keyword argument install_mode takes at ' 'most 3 arguments.') if len(install_mode) > 0 and install_mode[0] is not None and \ not isinstance(install_mode[0], str): raise InvalidArguments('Keyword argument install_mode requires the ' 'permissions arg to be a string or false') return FileMode(*install_mode) @typed_pos_args('install_data', varargs=(str, mesonlib.File)) @typed_kwargs( 'install_data', KwargInfo('install_dir', (str, NoneType)), KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), listify=True, default=[]), KwargInfo('rename', ContainerTypeInfo(list, str), default=[], listify=True, since='0.46.0'), INSTALL_MODE_KW.evolve(since='0.38.0'), INSTALL_TAG_KW.evolve(since='0.60.0'), ) def func_install_data(self, node: mparser.BaseNode, args: T.Tuple[T.List['mesonlib.FileOrString']], kwargs: 'kwargs.FuncInstallData') -> build.Data: sources = self.source_strings_to_files(args[0] + kwargs['sources']) rename = kwargs['rename'] or None if rename: if len(rename) != len(sources): raise InvalidArguments( '"rename" and "sources" argument lists must be the same length if "rename" is given. ' f'Rename has {len(rename)} elements and sources has {len(sources)}.') install_dir_name = kwargs['install_dir'] if install_dir_name: if not os.path.isabs(install_dir_name): install_dir_name = os.path.join('{datadir}', install_dir_name) else: install_dir_name = '{datadir}' return self.install_data_impl(sources, kwargs['install_dir'], kwargs['install_mode'], rename, kwargs['install_tag'], install_dir_name) def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str, install_mode: FileMode, rename: T.Optional[str], tag: T.Optional[str], install_dir_name: T.Optional[str] = None, install_data_type: T.Optional[str] = None) -> build.Data: """Just the implementation with no validation.""" data = build.Data(sources, install_dir, install_dir_name or install_dir, install_mode, self.subproject, rename, tag, install_data_type) self.build.data.append(data) return data @typed_pos_args('install_subdir', str) @typed_kwargs( 'install_subdir', KwargInfo('install_dir', str, required=True), KwargInfo('strip_directory', bool, default=False), KwargInfo('exclude_files', ContainerTypeInfo(list, str), default=[], listify=True, since='0.42.0', validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None), KwargInfo('exclude_directories', ContainerTypeInfo(list, str), default=[], listify=True, since='0.42.0', validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None), INSTALL_MODE_KW.evolve(since='0.38.0'), INSTALL_TAG_KW.evolve(since='0.60.0'), ) def func_install_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.FuncInstallSubdir') -> build.InstallDir: exclude = (set(kwargs['exclude_files']), set(kwargs['exclude_directories'])) idir = build.InstallDir( self.subdir, args[0], kwargs['install_dir'], kwargs['install_mode'], exclude, kwargs['strip_directory'], self.subproject, install_tag=kwargs['install_tag']) self.build.install_dirs.append(idir) return idir @noPosargs @typed_kwargs( 'configure_file', DEPFILE_KW.evolve(since='0.52.0'), INSTALL_MODE_KW.evolve(since='0.47.0,'), INSTALL_TAG_KW.evolve(since='0.60.0'), KwargInfo('capture', bool, default=False, since='0.41.0'), KwargInfo( 'command', (ContainerTypeInfo(list, (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), allow_empty=False), NoneType), listify=True, ), KwargInfo( 'configuration', (ContainerTypeInfo(dict, (str, int, bool)), build.ConfigurationData, NoneType), ), KwargInfo('copy', bool, default=False, since='0.47.0'), KwargInfo('encoding', str, default='utf-8', since='0.47.0'), KwargInfo('format', str, default='meson', since='0.46.0', validator=in_set_validator({'meson', 'cmake', 'cmake@'})), KwargInfo( 'input', ContainerTypeInfo(list, (mesonlib.File, str)), listify=True, default=[], ), # Cannot use shared implementation until None backwards compat is dropped KwargInfo('install', (bool, NoneType), since='0.50.0'), KwargInfo('install_dir', (str, bool), default='', validator=lambda x: 'must be `false` if boolean' if x is True else None), KwargInfo('output', str, required=True), KwargInfo('output_format', str, default='c', since='0.47.0', validator=in_set_validator({'c', 'nasm'})), ) def func_configure_file(self, node: mparser.BaseNode, args: T.List[TYPE_var], kwargs: kwargs.ConfigureFile): actions = sorted(x for x in {'configuration', 'command', 'copy'} if kwargs[x] not in [None, False]) num_actions = len(actions) if num_actions == 0: raise InterpreterException('Must specify an action with one of these ' 'keyword arguments: \'configuration\', ' '\'command\', or \'copy\'.') elif num_actions == 2: raise InterpreterException('Must not specify both {!r} and {!r} ' 'keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) elif num_actions == 3: raise InterpreterException('Must specify one of {!r}, {!r}, and ' '{!r} keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) if kwargs['capture'] and not kwargs['command']: raise InvalidArguments('configure_file: "capture" keyword requires "command" keyword.') fmt = kwargs['format'] output_format = kwargs['output_format'] depfile = kwargs['depfile'] # Validate input inputs = self.source_strings_to_files(kwargs['input']) inputs_abs = [] for f in inputs: if isinstance(f, mesonlib.File): inputs_abs.append(f.absolute_path(self.environment.source_dir, self.environment.build_dir)) self.add_build_def_file(f) else: raise InterpreterException('Inputs can only be strings or file objects') # Validate output output = kwargs['output'] if inputs_abs: values = mesonlib.get_filenames_templates_dict(inputs_abs, None) outputs = mesonlib.substitute_values([output], values) output = outputs[0] if depfile: depfile = mesonlib.substitute_values([depfile], values)[0] ofile_rpath = os.path.join(self.subdir, output) if ofile_rpath in self.configure_file_outputs: mesonbuildfile = os.path.join(self.subdir, 'meson.build') current_call = f"{mesonbuildfile}:{self.current_lineno}" first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath]) mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call) else: self.configure_file_outputs[ofile_rpath] = self.current_lineno if os.path.dirname(output) != '': raise InterpreterException('Output file name must not contain a subdirectory.') (ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output)) ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname) # Perform the appropriate action if kwargs['configuration'] is not None: conf = kwargs['configuration'] if isinstance(conf, dict): FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject, location=node) for k, v in conf.items(): if not isinstance(v, (str, int, bool)): raise InvalidArguments( f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"') conf = build.ConfigurationData(conf) mlog.log('Configuring', mlog.bold(output), 'using configuration') if len(inputs) > 1: raise InterpreterException('At most one input file can given in configuration mode') if inputs: os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) file_encoding = kwargs['encoding'] missing_variables, confdata_useless = \ mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf, fmt, file_encoding) if missing_variables: var_list = ", ".join(map(repr, sorted(missing_variables))) mlog.warning( f"The variable(s) {var_list} in the input file '{inputs[0]}' are not " "present in the given configuration data.", location=node) if confdata_useless: ifbase = os.path.basename(inputs_abs[0]) tv = FeatureNew.get_target_version(self.subproject) if FeatureNew.check_version(tv, '0.47.0'): mlog.warning('Got an empty configuration_data() object and found no ' f'substitutions in the input file {ifbase!r}. If you want to ' 'copy a file to the build dir, use the \'copy:\' keyword ' 'argument added in 0.47.0', location=node) else: mesonlib.dump_conf_header(ofile_abs, conf, output_format) conf.used = True elif kwargs['command'] is not None: if len(inputs) > 1: FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject, location=node) # We use absolute paths for input and output here because the cwd # that the command is run from is 'unspecified', so it could change. # Currently it's builddir/subdir for in_builddir else srcdir/subdir. values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs]) if depfile: depfile = os.path.join(self.environment.get_scratch_dir(), depfile) values['@DEPFILE@'] = depfile # Substitute @INPUT@, @OUTPUT@, etc here. _cmd = mesonlib.substitute_values(kwargs['command'], values) mlog.log('Configuring', mlog.bold(output), 'with command') cmd, *args = _cmd res = self.run_command_impl(node, (cmd, args), {'capture': True, 'check': True, 'env': build.EnvironmentVariables()}, True) if kwargs['capture']: dst_tmp = ofile_abs + '~' file_encoding = kwargs['encoding'] with open(dst_tmp, 'w', encoding=file_encoding) as f: f.writelines(res.stdout) if inputs_abs: shutil.copymode(inputs_abs[0], dst_tmp) mesonlib.replace_if_different(ofile_abs, dst_tmp) if depfile: mlog.log('Reading depfile:', mlog.bold(depfile)) with open(depfile, encoding='utf-8') as f: df = DepFile(f.readlines()) deps = df.get_all_dependencies(ofile_fname) for dep in deps: self.add_build_def_file(dep) elif kwargs['copy']: if len(inputs_abs) != 1: raise InterpreterException('Exactly one input file must be given in copy mode') os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) shutil.copy2(inputs_abs[0], ofile_abs) # Install file if requested, we check for the empty string # for backwards compatibility. That was the behaviour before # 0.45.0 so preserve it. idir = kwargs['install_dir'] if idir is False: idir = '' FeatureDeprecated.single_use('configure_file install_dir: false', '0.50.0', self.subproject, 'Use the `install:` kwarg instead', location=node) install = kwargs['install'] if kwargs['install'] is not None else idir != '' if install: if not idir: raise InterpreterException( '"install_dir" must be specified when "install" in a configure_file is true') cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname) install_mode = kwargs['install_mode'] install_tag = kwargs['install_tag'] self.build.data.append(build.Data([cfile], idir, idir, install_mode, self.subproject, install_tag=install_tag, data_type='configure')) return mesonlib.File.from_built_file(self.subdir, output) def extract_incdirs(self, kwargs, key: str = 'include_directories'): prospectives = extract_as_list(kwargs, key) result = [] for p in prospectives: if isinstance(p, build.IncludeDirs): result.append(p) elif isinstance(p, str): result.append(self.build_incdir_object([p])) else: raise InterpreterException('Include directory objects can only be created from strings or include directories.') return result @typed_pos_args('include_directories', varargs=str) @typed_kwargs('include_directories', KwargInfo('is_system', bool, default=False)) def func_include_directories(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncIncludeDirectories') -> build.IncludeDirs: return self.build_incdir_object(args[0], kwargs['is_system']) def build_incdir_object(self, incdir_strings: T.List[str], is_system: bool = False) -> build.IncludeDirs: if not isinstance(is_system, bool): raise InvalidArguments('Is_system must be boolean.') src_root = self.environment.get_source_dir() build_root = self.environment.get_build_dir() absbase_src = os.path.join(src_root, self.subdir) absbase_build = os.path.join(build_root, self.subdir) for a in incdir_strings: if a.startswith(src_root): raise InvalidArguments(textwrap.dedent('''\ Tried to form an absolute path to a source dir. You should not do that but use relative paths instead. To get include path to any directory relative to the current dir do incdir = include_directories(dirname) After this incdir will contain both the current source dir as well as the corresponding build dir. It can then be used in any subdirectory and Meson will take care of all the busywork to make paths work. Dirname can even be '.' to mark the current directory. Though you should remember that the current source and build directories are always put in the include directories by default so you only need to do include_directories('.') if you intend to use the result in a different subdirectory. ''')) else: try: self.validate_within_subproject(self.subdir, a) except InterpreterException: mlog.warning('include_directories sandbox violation!', location=self.current_node) print(textwrap.dedent(f'''\ The project is trying to access the directory {a!r} which belongs to a different subproject. This is a problem as it hardcodes the relative paths of these two projects. This makes it impossible to compile the project in any other directory layout and also prevents the subproject from changing its own directory layout. Instead of poking directly at the internals the subproject should be executed and it should set a variable that the caller can then use. Something like: # In subproject some_dep = declare_dependency(include_directories: include_directories('include')) # In subproject wrap file [provide] some = some_dep # In parent project some_dep = dependency('some') executable(..., dependencies: [some_dep]) This warning will become a hard error in a future Meson release. ''')) absdir_src = os.path.join(absbase_src, a) absdir_build = os.path.join(absbase_build, a) if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build): raise InvalidArguments(f'Include dir {a} does not exist.') i = build.IncludeDirs(self.subdir, incdir_strings, is_system) return i @typed_pos_args('add_test_setup', str) @typed_kwargs( 'add_test_setup', KwargInfo('exe_wrapper', ContainerTypeInfo(list, (str, ExternalProgram)), listify=True, default=[]), KwargInfo('gdb', bool, default=False), KwargInfo('timeout_multiplier', int, default=1), KwargInfo('exclude_suites', ContainerTypeInfo(list, str), listify=True, default=[], since='0.57.0'), KwargInfo('is_default', bool, default=False, since='0.49.0'), ENV_KW, ) def func_add_test_setup(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.AddTestSetup') -> None: setup_name = args[0] if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None: raise InterpreterException('Setup name may only contain alphanumeric characters.') if ":" not in setup_name: setup_name = f'{(self.subproject if self.subproject else self.build.project_name)}:{setup_name}' exe_wrapper: T.List[str] = [] for i in kwargs['exe_wrapper']: if isinstance(i, str): exe_wrapper.append(i) else: if not i.found(): raise InterpreterException('Tried to use non-found executable.') exe_wrapper += i.get_command() timeout_multiplier = kwargs['timeout_multiplier'] if timeout_multiplier <= 0: FeatureNew('add_test_setup() timeout_multiplier <= 0', '0.57.0').use(self.subproject) if kwargs['is_default']: if self.build.test_setup_default_name is not None: raise InterpreterException(f'{self.build.test_setup_default_name!r} is already set as default. ' 'is_default can be set to true only once') self.build.test_setup_default_name = setup_name self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, kwargs['gdb'], timeout_multiplier, kwargs['env'], kwargs['exclude_suites']) @typed_pos_args('add_global_arguments', varargs=str) @typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW) def func_add_global_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None: self._add_global_arguments(node, self.build.global_args[kwargs['native']], args[0], kwargs) @typed_pos_args('add_global_link_arguments', varargs=str) @typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW) def func_add_global_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None: self._add_global_arguments(node, self.build.global_link_args[kwargs['native']], args[0], kwargs) @typed_pos_args('add_project_arguments', varargs=str) @typed_kwargs('add_project_arguments', NATIVE_KW, LANGUAGE_KW) def func_add_project_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None: self._add_project_arguments(node, self.build.projects_args[kwargs['native']], args[0], kwargs) @typed_pos_args('add_project_link_arguments', varargs=str) @typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW) def func_add_project_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None: self._add_project_arguments(node, self.build.projects_link_args[kwargs['native']], args[0], kwargs) def _warn_about_builtin_args(self, args: T.List[str]) -> None: # -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra # see e.g. # https://github.com/mesonbuild/meson/issues/3275#issuecomment-641354956 # https://github.com/mesonbuild/meson/issues/3742 warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra') optargs = ('-O0', '-O2', '-O3', '-Os', '-Oz', '/O1', '/O2', '/Os') for arg in args: if arg in warnargs: mlog.warning(f'Consider using the built-in warning_level option instead of using "{arg}".', location=self.current_node) elif arg in optargs: mlog.warning(f'Consider using the built-in optimization level instead of using "{arg}".', location=self.current_node) elif arg == '-Werror': mlog.warning(f'Consider using the built-in werror option instead of using "{arg}".', location=self.current_node) elif arg == '-g': mlog.warning(f'Consider using the built-in debug option instead of using "{arg}".', location=self.current_node) elif arg.startswith('-fsanitize'): mlog.warning(f'Consider using the built-in option for sanitizers instead of using "{arg}".', location=self.current_node) elif arg.startswith('-std=') or arg.startswith('/std:'): mlog.warning(f'Consider using the built-in option for language standard version instead of using "{arg}".', location=self.current_node) def _add_global_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]], args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None: if self.is_subproject(): msg = f'Function \'{node.func_name}\' cannot be used in subprojects because ' \ 'there is no way to make that reliable.\nPlease only call ' \ 'this if is_subproject() returns false. Alternatively, ' \ 'define a variable that\ncontains your language-specific ' \ 'arguments and add it to the appropriate *_args kwarg ' \ 'in each target.' raise InvalidCode(msg) frozen = self.project_args_frozen or self.global_args_frozen self._add_arguments(node, argsdict, frozen, args, kwargs) def _add_project_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.Dict[str, T.List[str]]], args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None: if self.subproject not in argsdict: argsdict[self.subproject] = {} self._add_arguments(node, argsdict[self.subproject], self.project_args_frozen, args, kwargs) def _add_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]], args_frozen: bool, args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None: if args_frozen: msg = f'Tried to use \'{node.func_name}\' after a build target has been declared.\n' \ 'This is not permitted. Please declare all arguments before your targets.' raise InvalidCode(msg) self._warn_about_builtin_args(args) for lang in kwargs['language']: argsdict[lang] = argsdict.get(lang, []) + args @noArgsFlattening @typed_pos_args('environment', optargs=[(str, list, dict)]) @typed_kwargs('environment', ENV_METHOD_KW, ENV_SEPARATOR_KW.evolve(since='0.62.0')) def func_environment(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[None, str, T.List['TYPE_var'], T.Dict[str, 'TYPE_var']]], kwargs: 'TYPE_kwargs') -> build.EnvironmentVariables: init = args[0] if init is not None: FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject, location=node) msg = ENV_KW.validator(init) if msg: raise InvalidArguments(f'"environment": {msg}') if isinstance(init, dict) and any(i for i in init.values() if isinstance(i, list)): FeatureNew.single_use('List of string in dictionary value', '0.62.0', self.subproject, location=node) return env_convertor_with_method(init, kwargs['method'], kwargs['separator']) return build.EnvironmentVariables() @typed_pos_args('join_paths', varargs=str, min_varargs=1) @noKwargs def func_join_paths(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> str: return os.path.join(*args[0]).replace('\\', '/') def run(self) -> None: super().run() mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets)))) FeatureNew.report(self.subproject) FeatureDeprecated.report(self.subproject) if not self.is_subproject(): self.print_extra_warnings() if self.subproject == '': self._print_summary() def print_extra_warnings(self) -> None: # TODO cross compilation for c in self.coredata.compilers.host.values(): if c.get_id() == 'clang': self.check_clang_asan_lundef() break def check_clang_asan_lundef(self) -> None: if OptionKey('b_lundef') not in self.coredata.options: return if OptionKey('b_sanitize') not in self.coredata.options: return if (self.coredata.options[OptionKey('b_lundef')].value and self.coredata.options[OptionKey('b_sanitize')].value != 'none'): mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef. This will probably not work. Try setting b_lundef to false instead.'''.format(self.coredata.options[OptionKey('b_sanitize')].value), location=self.current_node) # Check that the indicated file is within the same subproject # as we currently are. This is to stop people doing # nasty things like: # # f = files('../../master_src/file.c') # # Note that this is validated only when the file # object is generated. The result can be used in a different # subproject than it is defined in (due to e.g. a # declare_dependency). def validate_within_subproject(self, subdir, fname): srcdir = Path(self.environment.source_dir) norm = Path(srcdir, subdir, fname).resolve() if os.path.isdir(norm): inputtype = 'directory' else: inputtype = 'file' if srcdir not in norm.parents: # Grabbing files outside the source tree is ok. # This is for vendor stuff like: # # /opt/vendorsdk/src/file_with_license_restrictions.c return project_root = Path(srcdir, self.root_subdir) subproject_dir = project_root / self.subproject_dir if norm == project_root: return if project_root not in norm.parents: raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} outside current (sub)project.') if subproject_dir == norm or subproject_dir in norm.parents: raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} from a nested subproject.') @T.overload def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = True) -> T.List['mesonlib.File']: ... @T.overload def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = False) -> T.List['mesonlib.FileOrString']: ... # noqa: F811 @T.overload def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: ... # noqa: F811 def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: # noqa: F811 """Lower inputs to a list of Targets and Files, replacing any strings. :param sources: A raw (Meson DSL) list of inputs (targets, files, and strings) :raises InterpreterException: if any of the inputs are of an invalid type :return: A list of Targets and Files """ mesonlib.check_direntry_issues(sources) if not isinstance(sources, list): sources = [sources] results: T.List['SourceOutputs'] = [] for s in sources: if isinstance(s, str): if not strict and s.startswith(self.environment.get_build_dir()): results.append(s) mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. ' 'This will become a hard error in the future.', location=self.current_node) else: self.validate_within_subproject(self.subdir, s) results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)) elif isinstance(s, mesonlib.File): results.append(s) elif isinstance(s, (build.GeneratedList, build.BuildTarget, build.CustomTargetIndex, build.CustomTarget, build.ExtractedObjects, build.StructuredSources)): results.append(s) else: raise InterpreterException(f'Source item is {s!r} instead of ' 'string or File-type object') return results def add_target(self, name, tobj): if name == '': raise InterpreterException('Target name must not be empty.') if name.strip() == '': raise InterpreterException('Target name must not consist only of whitespace.') if has_path_sep(name): pathseg = os.path.join(self.subdir, os.path.split(name)[0]) if os.path.exists(os.path.join(self.source_root, pathseg)): raise InvalidArguments(textwrap.dedent(f'''\ Target "{name}" has a path segment pointing to directory "{pathseg}". This is an error. To define a target that builds in that directory you must define it in the meson.build file in that directory. ''')) if name.startswith('meson-'): raise InvalidArguments("Target names starting with 'meson-' are reserved " "for Meson's internal use. Please rename.") if name in coredata.FORBIDDEN_TARGET_NAMES: raise InvalidArguments(f"Target name '{name}' is reserved for Meson's " "internal use. Please rename.") # To permit an executable and a shared library to have the # same name, such as "foo.exe" and "libfoo.a". idname = tobj.get_id() if idname in self.build.targets: raise InvalidCode(f'Tried to create target "{name}", but a target of that name already exists.') self.build.targets[idname] = tobj if idname not in self.coredata.target_guids: self.coredata.target_guids[idname] = str(uuid.uuid4()).upper() @FeatureNew('both_libraries', '0.46.0') def build_both_libraries(self, node, args, kwargs): shared_lib = self.build_target(node, args, kwargs, build.SharedLibrary) # Check if user forces non-PIC static library. pic = True key = OptionKey('b_staticpic') if 'pic' in kwargs: pic = kwargs['pic'] elif key in self.environment.coredata.options: pic = self.environment.coredata.options[key].value if self.backend.name == 'xcode': # Xcode is a bit special in that you can't (at least for the moment) # form a library only from object file inputs. The simple but inefficient # solution is to use the sources directly. This will lead to them being # built twice. This is unfortunate and slow, but at least it works. # Feel free to submit patches to get this fixed if it is an # issue for you. reuse_object_files = False else: reuse_object_files = pic if reuse_object_files: # Exclude sources from args and kwargs to avoid building them twice static_args = [args[0]] static_kwargs = kwargs.copy() static_kwargs['sources'] = [] static_kwargs['objects'] = shared_lib.extract_all_objects() else: static_args = args static_kwargs = kwargs static_lib = self.build_target(node, static_args, static_kwargs, build.StaticLibrary) return build.BothLibraries(shared_lib, static_lib) def build_library(self, node, args, kwargs): default_library = self.coredata.get_option(OptionKey('default_library', subproject=self.subproject)) if default_library == 'shared': return self.build_target(node, args, kwargs, build.SharedLibrary) elif default_library == 'static': return self.build_target(node, args, kwargs, build.StaticLibrary) elif default_library == 'both': return self.build_both_libraries(node, args, kwargs) else: raise InterpreterException(f'Unknown default_library value: {default_library}.') def build_target(self, node: mparser.BaseNode, args, kwargs, targetclass): @FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories']) @FeatureNewKwargs('build target', '0.41.0', ['rust_args']) @FeatureNewKwargs('build target', '0.40.0', ['build_by_default']) @FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility']) def build_target_decorator_caller(self, node, args, kwargs): return True build_target_decorator_caller(self, node, args, kwargs) if not args: raise InterpreterException('Target does not have a name.') name, *sources = args for_machine = self.machine_from_native_kwarg(kwargs) if 'sources' in kwargs: sources += listify(kwargs['sources']) sources = self.source_strings_to_files(sources) objs = extract_as_list(kwargs, 'objects') kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies') kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'extra_files' in kwargs: ef = extract_as_list(kwargs, 'extra_files') kwargs['extra_files'] = self.source_strings_to_files(ef) self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources) if targetclass not in {build.Executable, build.SharedLibrary, build.SharedModule, build.StaticLibrary, build.Jar}: mlog.debug('Unknown target type:', str(targetclass)) raise RuntimeError('Unreachable code') self.kwarg_strings_to_includedirs(kwargs) # Filter out kwargs from other target types. For example 'soversion' # passed to library() when default_library == 'static'. kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs} srcs: T.List['SourceInputs'] = [] struct: T.Optional[build.StructuredSources] = build.StructuredSources() for s in sources: if isinstance(s, build.StructuredSources): struct = struct + s else: srcs.append(s) if not struct: struct = None else: # Validate that we won't end up with two outputs with the same name. # i.e, don't allow: # [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')] for v in struct.sources.values(): outputs: T.Set[str] = set() for f in v: o: T.List[str] if isinstance(f, str): o = [os.path.basename(f)] elif isinstance(f, mesonlib.File): o = [f.fname] else: o = f.get_outputs() conflicts = outputs.intersection(o) if conflicts: raise InvalidArguments.from_node( f"Conflicting sources in structured sources: {', '.join(sorted(conflicts))}", node=node) outputs.update(o) kwargs['include_directories'] = self.extract_incdirs(kwargs) target = targetclass(name, self.subdir, self.subproject, for_machine, srcs, struct, objs, self.environment, kwargs) target.project_version = self.project_version self.add_stdlib_info(target) self.add_target(name, target) self.project_args_frozen = True return target def kwarg_strings_to_includedirs(self, kwargs): if 'd_import_dirs' in kwargs: items = mesonlib.extract_as_list(kwargs, 'd_import_dirs') cleaned_items = [] for i in items: if isinstance(i, str): # BW compatibility. This was permitted so we must support it # for a few releases so people can transition to "correct" # path declarations. if os.path.normpath(i).startswith(self.environment.get_source_dir()): mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead. This will become a hard error in the future.''', location=self.current_node) i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir)) i = self.build_incdir_object([i]) cleaned_items.append(i) kwargs['d_import_dirs'] = cleaned_items def get_used_languages(self, target): result = set() for i in target.sources: for lang, c in self.coredata.compilers[target.for_machine].items(): if c.can_compile(i): result.add(lang) break return result def add_stdlib_info(self, target): for l in self.get_used_languages(target): dep = self.build.stdlibs[target.for_machine].get(l, None) if dep: target.add_deps(dep) def check_sources_exist(self, subdir, sources): for s in sources: if not isinstance(s, str): continue # This means a generated source and they always exist. fname = os.path.join(subdir, s) if not os.path.isfile(fname): raise InterpreterException(f'Tried to add non-existing source file {s}.') # Only permit object extraction from the same subproject def validate_extraction(self, buildtarget: mesonlib.HoldableObject) -> None: if self.subproject != buildtarget.subproject: raise InterpreterException('Tried to extract objects from a different subproject.') def is_subproject(self) -> bool: return self.subproject != '' @typed_pos_args('set_variable', str, object) @noKwargs @noArgsFlattening @noSecondLevelHolderResolving def func_set_variable(self, node: mparser.BaseNode, args: T.Tuple[str, object], kwargs: 'TYPE_kwargs') -> None: varname, value = args self.set_variable(varname, value, holderify=True) @typed_pos_args('get_variable', (str, Disabler), optargs=[object]) @noKwargs @noArgsFlattening @unholder_return def func_get_variable(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, Disabler], T.Optional[object]], kwargs: 'TYPE_kwargs') -> 'TYPE_var': varname, fallback = args if isinstance(varname, Disabler): return varname try: return self.variables[varname] except KeyError: if fallback is not None: return self._holderify(fallback) raise InterpreterException(f'Tried to get unknown variable "{varname}".') @typed_pos_args('is_variable', str) @noKwargs def func_is_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> bool: return args[0] in self.variables @FeatureNew('unset_variable', '0.60.0') @typed_pos_args('unset_variable', str) @noKwargs def func_unset_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> None: varname = args[0] try: del self.variables[varname] except KeyError: raise InterpreterException(f'Tried to unset unknown variable "{varname}".') @staticmethod def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice: native = kwargs.get('native', False) if not isinstance(native, bool): raise InvalidArguments('Argument to "native" must be a boolean.') return MachineChoice.BUILD if native else MachineChoice.HOST @FeatureNew('is_disabler', '0.52.0') @typed_pos_args('is_disabler', object) @noKwargs def func_is_disabler(self, node: mparser.BaseNode, args: T.Tuple[object], kwargs: 'TYPE_kwargs') -> bool: return isinstance(args[0], Disabler) @noKwargs @FeatureNew('range', '0.58.0') @typed_pos_args('range', int, optargs=[int, int]) def func_range(self, node, args: T.Tuple[int, T.Optional[int], T.Optional[int]], kwargs: T.Dict[str, T.Any]) -> P_OBJ.RangeHolder: start, stop, step = args # Just like Python's range, we allow range(stop), range(start, stop), or # range(start, stop, step) if stop is None: stop = start start = 0 if step is None: step = 1 # This is more strict than Python's range() if start < 0: raise InterpreterException('start cannot be negative') if stop < start: raise InterpreterException('stop cannot be less than start') if step < 1: raise InterpreterException('step must be >=1') return P_OBJ.RangeHolder(start, stop, step, subproject=self.subproject)
[]
[]
[]
[]
[]
python
0
0
examples/examples.go
package main import ( "flag" "fmt" "net/http" "os" "path/filepath" "strconv" ) var ( port = flag.Int("port", 8080, "Port to listen on") ) func main() { flag.Parse() mux := http.NewServeMux() paths := map[string]string{ "/examples/cube/cube.js": "bin/cube.js", "/examples/cube/cube.js.map": "bin/cube.js.map", "/examples/obj/obj.js": "bin/obj.js", "/examples/obj/obj.js.map": "bin/obj.js.map", "/": "src/github.com/noonat/goggles", } goPath := os.Getenv("GOPATH") for route, path := range paths { absPath, err := filepath.Abs(filepath.Join(goPath, path)) if err != nil { fmt.Printf("error: %v\n", err) os.Exit(1) } if route == "/" { mux.Handle(route, http.FileServer(http.Dir(absPath))) continue } mux.HandleFunc(route, func(w http.ResponseWriter, r *http.Request) { http.ServeFile(w, r, absPath) }) } fmt.Printf("Listening on http://0.0.0.0:%d. Press Ctrl+C to stop.\n", *port) if err := http.ListenAndServe(":"+strconv.Itoa(*port), mux); err != nil { fmt.Printf("error: %v\n", err) os.Exit(1) } }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
cmd/swarm/upload.go
// Copyright 2016 The go-ethereum Authors // This file is part of go-ethereum. // // go-ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // go-ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // Command bzzup uploads files to the swarm HTTP API. package main import ( "errors" "fmt" "io" "io/ioutil" "mime" "net/http" "os" "os/user" "path" "path/filepath" "strings" "github.com/ProtonFoundation/Proton/cmd/utils" swarm "github.com/ProtonFoundation/Proton/swarm/api/client" "gopkg.in/urfave/cli.v1" ) func upload(ctx *cli.Context) { args := ctx.Args() var ( bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) client = swarm.NewClient(bzzapi) file string ) if len(args) != 1 { if fromStdin { tmp, err := ioutil.TempFile("", "swarm-stdin") if err != nil { utils.Fatalf("error create tempfile: %s", err) } defer os.Remove(tmp.Name()) n, err := io.Copy(tmp, os.Stdin) if err != nil { utils.Fatalf("error copying stdin to tempfile: %s", err) } else if n == 0 { utils.Fatalf("error reading from stdin: zero length") } file = tmp.Name() } else { utils.Fatalf("Need filename as the first and only argument") } } else { file = expandPath(args[0]) } if !wantManifest { f, err := swarm.Open(file) if err != nil { utils.Fatalf("Error opening file: %s", err) } defer f.Close() hash, err := client.UploadRaw(f, f.Size) if err != nil { utils.Fatalf("Upload failed: %s", err) } fmt.Println(hash) return } stat, err := os.Stat(file) if err != nil { utils.Fatalf("Error opening file: %s", err) } // define a function which either uploads a directory or single file // based on the type of the file being uploaded var doUpload func() (hash string, err error) if stat.IsDir() { doUpload = func() (string, error) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } return client.UploadDirectory(file, defaultPath, "") } } else { doUpload = func() (string, error) { f, err := swarm.Open(file) if err != nil { return "", fmt.Errorf("error opening file: %s", err) } defer f.Close() if mimeType == "" { mimeType = detectMimeType(file) } f.ContentType = mimeType return client.Upload(f, "") } } hash, err := doUpload() if err != nil { utils.Fatalf("Upload failed: %s", err) } fmt.Println(hash) } // Expands a file path // 1. replace tilde with users home dir // 2. expands embedded environment variables // 3. cleans the path, e.g. /a/b/../c -> /a/c // Note, it has limitations, e.g. ~someuser/tmp will not be expanded func expandPath(p string) string { if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { if home := homeDir(); home != "" { p = home + p[1:] } } return path.Clean(os.ExpandEnv(p)) } func homeDir() string { if home := os.Getenv("HOME"); home != "" { return home } if usr, err := user.Current(); err == nil { return usr.HomeDir } return "" } func detectMimeType(file string) string { if ext := filepath.Ext(file); ext != "" { return mime.TypeByExtension(ext) } f, err := os.Open(file) if err != nil { return "" } defer f.Close() buf := make([]byte, 512) if n, _ := f.Read(buf); n > 0 { return http.DetectContentType(buf) } return "" }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
examples/broadcastUDPHashRandom_test/broadcastUDPHashRandom.go
package main /* This example program tests Dara's ability to reliably replay a * system with mulitple nodes communicating via UDP. The number of * nodes in the system is set by the enviornment variable * DARATESETPEERS. After beginning each peer resolves the UDP addresses * and saves an MD5 Hash of its ID. The algorithm for this program is thus. 1) Each node broadcasts its current MD5 hash starting at a RANDOM peer address 2) Each node waits to recieve a single hash 3) Upon recipt a nodes current hash = M55( hash + receivedHash) 4) Print the current hash + size This algorithm has the advantage that any nondeterminism in the nodes will cause the hashes that they compute to differ immidiatly, thereby making the ouput of the program sensitive to every nondeterminisic action. This program is nondermanistic in the following ways 1) The order in which messages are placed on the network by any node. 2) The order in which messagesa are received from the network by all nodes. 3) The order in which each node sends it's messages 4) The order in which nodes process their messages. This refers to the global order of all events. */ import ( "fmt" "os" "net" "log" "strconv" "crypto/md5" "time" "math/rand" ) const ( BROADCASTS = 50 BUFSIZE = md5.Size ) var ( logger *log.Logger DaraPID int DaraTestPeers int conn *net.UDPConn hash string r *rand.Rand ) func main() { logger = log.New(os.Stdout, "[INITALIZING]",log.Lshortfile) ParseEnviornment() SetupUDPNetworkConnections() defer conn.Close() logger.SetPrefix(fmt.Sprintf("[Peer %d] ",DaraPID)) logger.Printf("DaraPID: %d\tDaraTestPeers:%d\n",DaraPID,DaraTestPeers) rand.Seed(int64(time.Now().Nanosecond())) r = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) hashf := md5.New() hash = string(hashf.Sum([]byte(fmt.Sprintf("%d",DaraPID)))) logger.Printf("Hash:%x\n",hash) time.Sleep(time.Second) //Write for i:= 0;i<BROADCASTS;i++ { broadcast(hash) newhash := readhashmsg() hash = string(hashf.Sum([]byte(hash+newhash))) } } func broadcast(h string) { peerstart := (r.Int() % (DaraTestPeers+1)) if peerstart == 0 { peerstart = 1 } var counter = 1 for i:=peerstart;counter<=DaraTestPeers;counter++{ logger.Printf("BROADCASTING %d Peerstart %d\n",i,peerstart) if i == DaraPID { i = (i + 1) % (DaraTestPeers+1) if i == 0 { i=1 } continue } else { peerAddrString := fmt.Sprintf(":666%d",i) peerAddr, err := net.ResolveUDPAddr("udp",peerAddrString) if err != nil { logger.Panicf("Unable to resolve peer %s: %s",peerAddrString,err) } n, err := conn.WriteToUDP([]byte(h),peerAddr) if err != nil { logger.Panicf("Unable to write msg to peer %s",peerAddr.String()) } logger.Printf("Writing: %x\t To: %s\t Len: %d\t",h,peerAddr.String(),n) i = (i + 1) % (DaraTestPeers+1) if i == 0 { i=1 } } time.Sleep(time.Millisecond) } } func readhashmsg() string { buf := make([]byte,BUFSIZE) n, addr, err := conn.ReadFromUDP(buf) if err != nil { logger.Panicf("Error reading from udp %s",err.Error()) } logger.Printf("Received: %x From %s Len %d",buf[:n],addr.String(),n) return string(buf) } func ParseEnviornment() { var err error DaraPIDString := os.Getenv("DARAPID") if DaraPIDString == "" { logger.Fatalf("DARAPID not set!") } DaraPID, err = strconv.Atoi(DaraPIDString) if err != nil { logger.Fatalf("DARAPID not a valid integer %s: %s",DaraPIDString,err.Error()) } DaraTESTPEERSString := os.Getenv("DARATESTPEERS") if DaraTESTPEERSString == "" { logger.Fatalf("DARATESTPEERS not set!") } DaraTestPeers, err = strconv.Atoi(DaraTESTPEERSString) if err != nil { logger.Fatalf("DARATESTPEERS not a valid integer %s: %s",DaraTESTPEERSString,err.Error()) } logger.Println("Done Parsing Enviornment") return } func SetupUDPNetworkConnections() { addrstring := fmt.Sprintf(":666%d",DaraPID) addr, err := net.ResolveUDPAddr("udp",addrstring) if err != nil { logger.Fatal(err) } conn, err = net.ListenUDP("udp",addr) if err != nil { logger.Fatal(err) } logger.Println("Done Setting Up Network Connections") }
[ "\"DARAPID\"", "\"DARATESTPEERS\"" ]
[]
[ "DARAPID", "DARATESTPEERS" ]
[]
["DARAPID", "DARATESTPEERS"]
go
2
0
tests/integration/integration_nodejs_test.go
// Copyright 2016-2020, Pulumi Corporation. All rights reserved. // +build nodejs all package ints import ( "bytes" "fmt" "os" "path/filepath" "runtime" "strings" "testing" "time" "github.com/pulumi/pulumi/pkg/v3/resource/deploy/providers" "github.com/pulumi/pulumi/pkg/v3/secrets/cloud" "github.com/pulumi/pulumi/pkg/v3/secrets/passphrase" "github.com/pulumi/pulumi/pkg/v3/testing/integration" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" ptesting "github.com/pulumi/pulumi/sdk/v3/go/common/testing" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/stretchr/testify/assert" ) // TestEmptyNodeJS simply tests that we can run an empty NodeJS project. func TestEmptyNodeJS(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("empty", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, }) } // Tests emitting many engine events doesn't result in a performance problem. func TestEngineEventPerf(t *testing.T) { // Prior to pulumi/pulumi#2303, a preview or update would take ~40s. // Since then, it should now be down to ~4s, with additional padding, // since some Travis machines (especially the macOS ones) seem quite slow // to begin with. benchmarkEnforcer := &assertPerfBenchmark{ T: t, MaxPreviewDuration: 8 * time.Second, MaxUpdateDuration: 8 * time.Second, } integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "ee_perf", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ReportStats: benchmarkEnforcer, // Don't run in parallel since it is sensitive to system resources. NoParallel: true, }) } // TestEngineEvents ensures that the test framework properly records and reads engine events. func TestEngineEvents(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "single_resource", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, NoParallel: true, // avoid contention for Dir ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // Ensure that we have a non-empty list of events. assert.NotEmpty(t, stackInfo.Events) // Ensure that we have two "ResourcePre" events: one for the stack and one for our resource. preEventResourceTypes := []string{} for _, e := range stackInfo.Events { if e.ResourcePreEvent != nil { preEventResourceTypes = append(preEventResourceTypes, e.ResourcePreEvent.Metadata.Type) } } assert.Equal(t, 2, len(preEventResourceTypes)) assert.Contains(t, preEventResourceTypes, "pulumi:pulumi:Stack") assert.Contains(t, preEventResourceTypes, "pulumi-nodejs:dynamic:Resource") }, }) } // TestProjectMain tests out the ability to override the main entrypoint. func TestProjectMain(t *testing.T) { test := integration.ProgramTestOptions{ Dir: "project_main", Dependencies: []string{"@pulumi/pulumi"}, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // Simple runtime validation that just ensures the checkpoint was written and read. assert.NotNil(t, stackInfo.Deployment) }, } integration.ProgramTest(t, &test) t.Run("AbsolutePath", func(t *testing.T) { e := ptesting.NewEnvironment(t) defer func() { if !t.Failed() { e.DeleteEnvironment() } }() e.ImportDirectory("project_main_abs") // write a new Pulumi.yaml using the absolute path of the environment as "main" yamlPath := filepath.Join(e.RootPath, "Pulumi.yaml") absYamlContents := fmt.Sprintf( "name: project_main_abs\ndescription: A program with an absolute entry point\nruntime: nodejs\nmain: %s\n", e.RootPath, ) t.Logf("writing new Pulumi.yaml: \npath: %s\ncontents:%s", yamlPath, absYamlContents) if err := os.WriteFile(yamlPath, []byte(absYamlContents), 0644); err != nil { t.Error(err) return } e.RunCommand("yarn", "link", "@pulumi/pulumi") e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL()) e.RunCommand("pulumi", "stack", "init", "main-abs") e.RunCommand("pulumi", "preview") e.RunCommand("pulumi", "stack", "rm", "--yes") }) t.Run("ParentFolder", func(t *testing.T) { e := ptesting.NewEnvironment(t) defer func() { if !t.Failed() { e.DeleteEnvironment() } }() e.ImportDirectory("project_main_parent") // yarn link first e.RunCommand("yarn", "link", "@pulumi/pulumi") // then virtually change directory to the location of the nested Pulumi.yaml e.CWD = filepath.Join(e.RootPath, "foo", "bar") e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL()) e.RunCommand("pulumi", "stack", "init", "main-parent") e.RunCommand("pulumi", "preview") e.RunCommand("pulumi", "stack", "rm", "--yes") }) } // TestStackProjectName ensures we can read the Pulumi stack and project name from within the program. func TestStackProjectName(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_project_name", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, }) } func TestRemoveWithResourcesBlocked(t *testing.T) { if os.Getenv("PULUMI_ACCESS_TOKEN") == "" { t.Skipf("Skipping: PULUMI_ACCESS_TOKEN is not set") } e := ptesting.NewEnvironment(t) defer func() { if !t.Failed() { e.DeleteEnvironment() } }() stackName, err := resource.NewUniqueHex("rm-test-", 8, -1) contract.AssertNoErrorf(err, "resource.NewUniqueHex should not fail with no maximum length is set") e.ImportDirectory("single_resource") e.RunCommand("pulumi", "stack", "init", stackName) e.RunCommand("yarn", "link", "@pulumi/pulumi") e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview") _, stderr := e.RunCommandExpectError("pulumi", "stack", "rm", "--yes") assert.Contains(t, stderr, "--force") e.RunCommand("pulumi", "destroy", "--skip-preview", "--non-interactive", "--yes") e.RunCommand("pulumi", "stack", "rm", "--yes") } // TestStackOutputs ensures we can export variables from a stack and have them get recorded as outputs. func TestStackOutputsNodeJS(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("stack_outputs", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // Ensure the checkpoint contains a single resource, the Stack, with two outputs. fmt.Printf("Deployment: %v", stackInfo.Deployment) assert.NotNil(t, stackInfo.Deployment) if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) { stackRes := stackInfo.Deployment.Resources[0] assert.NotNil(t, stackRes) assert.Equal(t, resource.RootStackType, stackRes.URN.Type()) assert.Equal(t, 0, len(stackRes.Inputs)) assert.Equal(t, 2, len(stackRes.Outputs)) assert.Equal(t, "ABC", stackRes.Outputs["xyz"]) assert.Equal(t, float64(42), stackRes.Outputs["foo"]) } }, }) } // TestStackOutputsJSON ensures the CLI properly formats stack outputs as JSON when requested. func TestStackOutputsJSON(t *testing.T) { e := ptesting.NewEnvironment(t) defer func() { if !t.Failed() { e.DeleteEnvironment() } }() e.ImportDirectory(filepath.Join("stack_outputs", "nodejs")) e.RunCommand("yarn", "link", "@pulumi/pulumi") e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL()) e.RunCommand("pulumi", "stack", "init", "stack-outs") e.RunCommand("pulumi", "up", "--non-interactive", "--yes", "--skip-preview") stdout, _ := e.RunCommand("pulumi", "stack", "output", "--json") assert.Equal(t, `{ "foo": 42, "xyz": "ABC" } `, stdout) } // TestStackOutputsDisplayed ensures that outputs are printed at the end of an update func TestStackOutputsDisplayed(t *testing.T) { stdout := &bytes.Buffer{} integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("stack_outputs", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: false, Verbose: true, Stdout: stdout, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { output := stdout.String() // ensure we get the outputs info both for the normal update, and for the no-change update. assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n + 1 created") assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n 1 unchanged") }, }) } // TestStackOutputsSuppressed ensures that outputs whose values are intentionally suppresses don't show. func TestStackOutputsSuppressed(t *testing.T) { stdout := &bytes.Buffer{} integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("stack_outputs", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: false, Verbose: true, Stdout: stdout, UpdateCommandlineFlags: []string{"--suppress-outputs"}, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { output := stdout.String() assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n") assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n") }, }) } // TestStackParenting tests out that stacks and components are parented correctly. func TestStackParenting(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_parenting", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // Ensure the checkpoint contains resources parented correctly. This should look like this: // // A F // / \ \ // B C G // / \ // D E // // with the caveat, of course, that A and F will share a common parent, the implicit stack. assert.NotNil(t, stackInfo.Deployment) if assert.Equal(t, 9, len(stackInfo.Deployment.Resources)) { stackRes := stackInfo.Deployment.Resources[0] assert.NotNil(t, stackRes) assert.Equal(t, resource.RootStackType, stackRes.Type) assert.Equal(t, "", string(stackRes.Parent)) urns := make(map[string]resource.URN) for _, res := range stackInfo.Deployment.Resources[1:] { assert.NotNil(t, res) urns[string(res.URN.Name())] = res.URN switch res.URN.Name() { case "a", "f": assert.NotEqual(t, "", res.Parent) assert.Equal(t, stackRes.URN, res.Parent) case "b", "c": assert.Equal(t, urns["a"], res.Parent) case "d", "e": assert.Equal(t, urns["c"], res.Parent) case "g": assert.Equal(t, urns["f"], res.Parent) case "default": // Default providers are not parented. assert.Equal(t, "", string(res.Parent)) default: t.Fatalf("unexpected name %s", res.URN.Name()) } } } }, }) } func TestStackBadParenting(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811") } integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_bad_parenting", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExpectFailure: true, }) } // TestStackDependencyGraph tests that the dependency graph of a stack is saved // in the checkpoint file. func TestStackDependencyGraph(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_dependencies", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotNil(t, stackInfo.Deployment) latest := stackInfo.Deployment assert.True(t, len(latest.Resources) >= 2) sawFirst := false sawSecond := false for _, res := range latest.Resources { urn := string(res.URN) if strings.Contains(urn, "dynamic:Resource::first") { // The first resource doesn't depend on anything. assert.Equal(t, 0, len(res.Dependencies)) sawFirst = true } else if strings.Contains(urn, "dynamic:Resource::second") { // The second resource uses an Output property of the first resource, so it // depends directly on first. assert.Equal(t, 1, len(res.Dependencies)) assert.True(t, strings.Contains(string(res.Dependencies[0]), "dynamic:Resource::first")) sawSecond = true } } assert.True(t, sawFirst && sawSecond) }, }) } // Tests basic configuration from the perspective of a Pulumi program. func TestConfigBasicNodeJS(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("config_basic", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, Config: map[string]string{ "aConfigValue": "this value is a value", }, Secrets: map[string]string{ "bEncryptedSecret": "this super secret is encrypted", }, OrderedConfig: []integration.ConfigValue{ {Key: "outer.inner", Value: "value", Path: true}, {Key: "names[0]", Value: "a", Path: true}, {Key: "names[1]", Value: "b", Path: true}, {Key: "names[2]", Value: "c", Path: true}, {Key: "names[3]", Value: "super secret name", Path: true, Secret: true}, {Key: "servers[0].port", Value: "80", Path: true}, {Key: "servers[0].host", Value: "example", Path: true}, {Key: "a.b[0].c", Value: "true", Path: true}, {Key: "a.b[1].c", Value: "false", Path: true}, {Key: "tokens[0]", Value: "shh", Path: true, Secret: true}, {Key: "foo.bar", Value: "don't tell", Path: true, Secret: true}, }, }) } func TestConfigCaptureNodeJS(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811") } integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("config_capture_e2e", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, Config: map[string]string{ "value": "it works", }, }) } // Tests that accessing config secrets using non-secret APIs results in warnings being logged. func TestConfigSecretsWarnNodeJS(t *testing.T) { // TODO[pulumi/pulumi#7127]: Re-enabled the warning. t.Skip("Temporarily skipping test until we've re-enabled the warning - pulumi/pulumi#7127") integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("config_secrets_warn", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, Config: map[string]string{ "plainstr1": "1", "plainstr2": "2", "plainstr3": "3", "plainstr4": "4", "plainbool1": "true", "plainbool2": "true", "plainbool3": "true", "plainbool4": "true", "plainnum1": "1", "plainnum2": "2", "plainnum3": "3", "plainnum4": "4", "plainobj1": "{}", "plainobj2": "{}", "plainobj3": "{}", "plainobj4": "{}", }, Secrets: map[string]string{ "str1": "1", "str2": "2", "str3": "3", "str4": "4", "bool1": "true", "bool2": "true", "bool3": "true", "bool4": "true", "num1": "1", "num2": "2", "num3": "3", "num4": "4", "obj1": "{}", "obj2": "{}", "obj3": "{}", "obj4": "{}", }, OrderedConfig: []integration.ConfigValue{ {Key: "parent1.foo", Value: "plain1", Path: true}, {Key: "parent1.bar", Value: "secret1", Path: true, Secret: true}, {Key: "parent2.foo", Value: "plain2", Path: true}, {Key: "parent2.bar", Value: "secret2", Path: true, Secret: true}, {Key: "names1[0]", Value: "plain1", Path: true}, {Key: "names1[1]", Value: "secret1", Path: true, Secret: true}, {Key: "names2[0]", Value: "plain2", Path: true}, {Key: "names2[1]", Value: "secret2", Path: true, Secret: true}, }, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotEmpty(t, stackInfo.Events) //nolint:lll expectedWarnings := []string{ "Configuration 'config_secrets_node:str1' value is a secret; use `getSecret` instead of `get`", "Configuration 'config_secrets_node:str2' value is a secret; use `requireSecret` instead of `require`", "Configuration 'config_secrets_node:bool1' value is a secret; use `getSecretBoolean` instead of `getBoolean`", "Configuration 'config_secrets_node:bool2' value is a secret; use `requireSecretBoolean` instead of `requireBoolean`", "Configuration 'config_secrets_node:num1' value is a secret; use `getSecretNumber` instead of `getNumber`", "Configuration 'config_secrets_node:num2' value is a secret; use `requireSecretNumber` instead of `requireNumber`", "Configuration 'config_secrets_node:obj1' value is a secret; use `getSecretObject` instead of `getObject`", "Configuration 'config_secrets_node:obj2' value is a secret; use `requireSecretObject` instead of `requireObject`", "Configuration 'config_secrets_node:parent1' value is a secret; use `getSecretObject` instead of `getObject`", "Configuration 'config_secrets_node:parent2' value is a secret; use `requireSecretObject` instead of `requireObject`", "Configuration 'config_secrets_node:names1' value is a secret; use `getSecretObject` instead of `getObject`", "Configuration 'config_secrets_node:names2' value is a secret; use `requireSecretObject` instead of `requireObject`", } for _, warning := range expectedWarnings { var found bool for _, event := range stackInfo.Events { if event.DiagnosticEvent != nil && event.DiagnosticEvent.Severity == "warning" && strings.Contains(event.DiagnosticEvent.Message, warning) { found = true break } } assert.True(t, found, "expected warning %q", warning) } // These keys should not be in any warning messages. unexpectedWarnings := []string{ "plainstr1", "plainstr2", "plainstr3", "plainstr4", "plainbool1", "plainbool2", "plainbool3", "plainbool4", "plainnum1", "plainnum2", "plainnum3", "plainnum4", "plainobj1", "plainobj2", "plainobj3", "plainobj4", "str3", "str4", "bool3", "bool4", "num3", "num4", "obj3", "obj4", } for _, warning := range unexpectedWarnings { for _, event := range stackInfo.Events { if event.DiagnosticEvent != nil { assert.NotContains(t, event.DiagnosticEvent.Message, warning) } } } }, }) } func TestInvalidVersionInPackageJson(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("invalid_package_json"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, Config: map[string]string{}, }) } // Tests an explicit provider instance. func TestExplicitProvider(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "explicit_provider", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotNil(t, stackInfo.Deployment) latest := stackInfo.Deployment // Expect one stack resource, two provider resources, and two custom resources. assert.True(t, len(latest.Resources) == 5) var defaultProvider *apitype.ResourceV3 var explicitProvider *apitype.ResourceV3 for _, res := range latest.Resources { urn := res.URN switch urn.Name() { case "default": assert.True(t, providers.IsProviderType(res.Type)) assert.Nil(t, defaultProvider) prov := res defaultProvider = &prov case "p": assert.True(t, providers.IsProviderType(res.Type)) assert.Nil(t, explicitProvider) prov := res explicitProvider = &prov case "a": prov, err := providers.ParseReference(res.Provider) assert.NoError(t, err) assert.NotNil(t, defaultProvider) defaultRef, err := providers.NewReference(defaultProvider.URN, defaultProvider.ID) assert.NoError(t, err) assert.Equal(t, defaultRef.String(), prov.String()) case "b": prov, err := providers.ParseReference(res.Provider) assert.NoError(t, err) assert.NotNil(t, explicitProvider) explicitRef, err := providers.NewReference(explicitProvider.URN, explicitProvider.ID) assert.NoError(t, err) assert.Equal(t, explicitRef.String(), prov.String()) } } assert.NotNil(t, defaultProvider) assert.NotNil(t, explicitProvider) }, }) } // Tests that stack references work in Node. func TestStackReferenceNodeJS(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811") } if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" { t.Skipf("Skipping: PULUMI_TEST_OWNER is not set") } opts := &integration.ProgramTestOptions{ Dir: filepath.Join("stack_reference", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, Config: map[string]string{ "org": os.Getenv("PULUMI_TEST_OWNER"), }, EditDirs: []integration.EditDir{ { Dir: "step1", Additive: true, }, { Dir: "step2", Additive: true, }, }, } integration.ProgramTest(t, opts) } // Tests that reads of unknown IDs do not fail. func TestGetCreated(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "get_created", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, }) } // TestProviderSecretConfig that a first class provider can be created when it has secrets as part of its config. func TestProviderSecretConfig(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "provider_secret_config", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, }) } func TestResourceWithSecretSerializationNodejs(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("secret_outputs", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // The program exports three resources: // 1. One named `withSecret` who's prefix property should be secret, specified via `pulumi.secret()`. // 2. One named `withSecretAdditional` who's prefix property should be a secret, specified via // additionalSecretOutputs. // 3. One named `withoutSecret` which should not be a secret. // We serialize both of the these as POJO objects, so they appear as maps in the output. withSecretProps, ok := stackInfo.Outputs["withSecret"].(map[string]interface{}) assert.Truef(t, ok, "POJO output was not serialized as a map") withSecretAdditionalProps, ok := stackInfo.Outputs["withSecretAdditional"].(map[string]interface{}) assert.Truef(t, ok, "POJO output was not serialized as a map") withoutSecretProps, ok := stackInfo.Outputs["withoutSecret"].(map[string]interface{}) assert.Truef(t, ok, "POJO output was not serialized as a map") // The secret prop should have been serialized as a secret secretPropValue, ok := withSecretProps["prefix"].(map[string]interface{}) assert.Truef(t, ok, "secret output was not serialized as a secret") assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string)) // The other secret prop should have been serialized as a secret secretAdditionalPropValue, ok := withSecretAdditionalProps["prefix"].(map[string]interface{}) assert.Truef(t, ok, "secret output was not serialized as a secret") assert.Equal(t, resource.SecretSig, secretAdditionalPropValue[resource.SigKey].(string)) // And here, the prop was not set, it should just be a string value _, isString := withoutSecretProps["prefix"].(string) assert.Truef(t, isString, "non-secret output was not a string") }, }) } func TestStackReferenceSecretsNodejs(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811") } owner := os.Getenv("PULUMI_TEST_OWNER") if owner == "" { t.Skipf("Skipping: PULUMI_TEST_OWNER is not set") } d := "stack_reference_secrets" integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join(d, "nodejs", "step1"), Dependencies: []string{"@pulumi/pulumi"}, Config: map[string]string{ "org": owner, }, Quick: true, EditDirs: []integration.EditDir{ { Dir: filepath.Join(d, "nodejs", "step2"), Additive: true, ExpectNoChanges: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { _, isString := stackInfo.Outputs["refNormal"].(string) assert.Truef(t, isString, "referenced non-secret output was not a string") secretPropValue, ok := stackInfo.Outputs["refSecret"].(map[string]interface{}) assert.Truef(t, ok, "secret output was not serialized as a secret") assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string)) }, }, }, }) } func TestPasswordlessPassphraseSecretsProvider(t *testing.T) { testOptions := integration.ProgramTestOptions{ Dir: "cloud_secrets_provider", Dependencies: []string{"@pulumi/pulumi"}, SecretsProvider: fmt.Sprintf("passphrase"), Env: []string{"PULUMI_CONFIG_PASSPHRASE=\"\""}, NoParallel: true, Secrets: map[string]string{ "mysecret": "THISISASECRET", }, CloudURL: "file://~", } workingTestOptions := testOptions.With(integration.ProgramTestOptions{ ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { os.Setenv("PULUMI_CONFIG_PASSPHRASE", "") secretsProvider := stackInfo.Deployment.SecretsProviders assert.NotNil(t, secretsProvider) assert.Equal(t, secretsProvider.Type, "passphrase") _, err := passphrase.NewPassphaseSecretsManagerFromState(secretsProvider.State) assert.NoError(t, err) out, ok := stackInfo.Outputs["out"].(map[string]interface{}) assert.True(t, ok) _, ok = out["ciphertext"] assert.True(t, ok) os.Unsetenv("PULUMI_CONFIG_PASSPHRASE") }, }) brokenTestOptions := testOptions.With(integration.ProgramTestOptions{ ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { secretsProvider := stackInfo.Deployment.SecretsProviders assert.NotNil(t, secretsProvider) assert.Equal(t, secretsProvider.Type, "passphrase") _, err := passphrase.NewPassphaseSecretsManagerFromState(secretsProvider.State) assert.Error(t, err) }, }) t.Run("works-when-passphrase-set", func(t *testing.T) { integration.ProgramTest(t, &workingTestOptions) }) t.Run("error-when-passphrase-not-set", func(t *testing.T) { integration.ProgramTest(t, &brokenTestOptions) }) } func TestCloudSecretProvider(t *testing.T) { awsKmsKeyAlias := os.Getenv("PULUMI_TEST_KMS_KEY_ALIAS") if awsKmsKeyAlias == "" { t.Skipf("Skipping: PULUMI_TEST_KMS_KEY_ALIAS is not set") } azureKeyVault := os.Getenv("PULUMI_TEST_AZURE_KEY") if azureKeyVault == "" { t.Skipf("Skipping: PULUMI_TEST_AZURE_KEY is not set") } gcpKmsKey := os.Getenv("PULUMI_TEST_GCP_KEY") if azureKeyVault == "" { t.Skipf("Skipping: PULUMI_TEST_GCP_KEY is not set") } // Generic test options for all providers testOptions := integration.ProgramTestOptions{ Dir: "cloud_secrets_provider", Dependencies: []string{"@pulumi/pulumi"}, SecretsProvider: fmt.Sprintf("awskms://alias/%s", awsKmsKeyAlias), Secrets: map[string]string{ "mysecret": "THISISASECRET", }, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { secretsProvider := stackInfo.Deployment.SecretsProviders assert.NotNil(t, secretsProvider) assert.Equal(t, secretsProvider.Type, "cloud") _, err := cloud.NewCloudSecretsManagerFromState(secretsProvider.State) assert.NoError(t, err) out, ok := stackInfo.Outputs["out"].(map[string]interface{}) assert.True(t, ok) _, ok = out["ciphertext"] assert.True(t, ok) }, } localTestOptions := testOptions.With(integration.ProgramTestOptions{ CloudURL: "file://~", }) azureTestOptions := testOptions.With(integration.ProgramTestOptions{ SecretsProvider: fmt.Sprintf("azurekeyvault://%s", azureKeyVault), }) gcpTestOptions := testOptions.With(integration.ProgramTestOptions{ SecretsProvider: fmt.Sprintf("gcpkms://projects/%s", gcpKmsKey), }) // Run with default Pulumi service backend t.Run("service", func(t *testing.T) { integration.ProgramTest(t, &testOptions) }) // Check Azure secrets provider t.Run("azure", func(t *testing.T) { integration.ProgramTest(t, &azureTestOptions) }) // Check gcloud secrets provider t.Run("gcp", func(t *testing.T) { integration.ProgramTest(t, &gcpTestOptions) }) // Also run with local backend t.Run("local", func(t *testing.T) { integration.ProgramTest(t, &localTestOptions) }) } // Tests a resource with a large (>4mb) string prop in Node.js func TestLargeResourceNode(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811") } integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("large_resource", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, }) } // Tests enum outputs func TestEnumOutputNode(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("enums", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) { assert.NotNil(t, stack.Outputs) assert.Equal(t, "Burgundy", stack.Outputs["myTreeType"]) assert.Equal(t, "Pulumi Planters Inc.foo", stack.Outputs["myTreeFarmChanged"]) assert.Equal(t, "My Burgundy Rubber tree is from Pulumi Planters Inc.", stack.Outputs["mySentence"]) }, }) } // Test remote component construction in Node. func TestConstructNode(t *testing.T) { if runtime.GOOS == WindowsOS { t.Skip("Temporarily skipping test on Windows") } tests := []struct { componentDir string expectedResourceCount int env []string }{ { componentDir: "testcomponent", expectedResourceCount: 9, }, { componentDir: "testcomponent-python", expectedResourceCount: 9, env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))}, }, { componentDir: "testcomponent-go", expectedResourceCount: 8, // One less because no dynamic provider. }, } for _, test := range tests { t.Run(test.componentDir, func(t *testing.T) { pathEnv := pathEnv(t, filepath.Join("construct_component", test.componentDir)) integration.ProgramTest(t, optsForConstructNode(t, test.expectedResourceCount, append(test.env, pathEnv)...)) }) } } func optsForConstructNode(t *testing.T, expectedResourceCount int, env ...string) *integration.ProgramTestOptions { return &integration.ProgramTestOptions{ Env: env, Dir: filepath.Join("construct_component", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Secrets: map[string]string{ "secret": "this super secret is encrypted", }, Quick: true, NoParallel: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotNil(t, stackInfo.Deployment) if assert.Equal(t, expectedResourceCount, len(stackInfo.Deployment.Resources)) { stackRes := stackInfo.Deployment.Resources[0] assert.NotNil(t, stackRes) assert.Equal(t, resource.RootStackType, stackRes.Type) assert.Equal(t, "", string(stackRes.Parent)) // Check that dependencies flow correctly between the originating program and the remote component // plugin. urns := make(map[string]resource.URN) for _, res := range stackInfo.Deployment.Resources[1:] { assert.NotNil(t, res) urns[string(res.URN.Name())] = res.URN switch res.URN.Name() { case "child-a", "child-b": for _, deps := range res.PropertyDependencies { assert.Empty(t, deps) } case "child-c": assert.Equal(t, []resource.URN{urns["child-a"]}, res.PropertyDependencies["echo"]) case "a", "b", "c": secretPropValue, ok := res.Outputs["secret"].(map[string]interface{}) assert.Truef(t, ok, "secret output was not serialized as a secret") assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string)) } } } }, } } // Test remote component construction with a child resource that takes a long time to be created, ensuring it's created. func TestConstructSlowNode(t *testing.T) { pathEnv := testComponentSlowPathEnv(t) var opts *integration.ProgramTestOptions opts = &integration.ProgramTestOptions{ Env: []string{pathEnv}, Dir: filepath.Join("construct_component_slow", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotNil(t, stackInfo.Deployment) if assert.Equal(t, 5, len(stackInfo.Deployment.Resources)) { stackRes := stackInfo.Deployment.Resources[0] assert.NotNil(t, stackRes) assert.Equal(t, resource.RootStackType, stackRes.Type) assert.Equal(t, "", string(stackRes.Parent)) } }, } integration.ProgramTest(t, opts) } // Test remote component construction with prompt inputs. func TestConstructPlainNode(t *testing.T) { tests := []struct { componentDir string expectedResourceCount int env []string }{ { componentDir: "testcomponent", expectedResourceCount: 9, }, { componentDir: "testcomponent-python", expectedResourceCount: 9, env: []string{pulumiRuntimeVirtualEnv(t, filepath.Join("..", ".."))}, }, { componentDir: "testcomponent-go", expectedResourceCount: 8, // One less because no dynamic provider. }, } for _, test := range tests { t.Run(test.componentDir, func(t *testing.T) { pathEnv := pathEnv(t, filepath.Join("construct_component_plain", test.componentDir)) integration.ProgramTest(t, optsForConstructPlainNode(t, test.expectedResourceCount, append(test.env, pathEnv)...)) }) } } func optsForConstructPlainNode(t *testing.T, expectedResourceCount int, env ...string) *integration.ProgramTestOptions { return &integration.ProgramTestOptions{ Env: env, Dir: filepath.Join("construct_component_plain", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, NoParallel: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { assert.NotNil(t, stackInfo.Deployment) assert.Equal(t, expectedResourceCount, len(stackInfo.Deployment.Resources)) }, } } // Test remote component inputs properly handle unknowns. func TestConstructUnknownNode(t *testing.T) { testConstructUnknown(t, "nodejs", "@pulumi/pulumi") } func TestGetResourceNode(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("get_resource", "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, AllowEmptyPreviewChanges: true, ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) { assert.NotNil(t, stack.Outputs) assert.Equal(t, "foo", stack.Outputs["foo"]) }, }) } func TestComponentProviderSchemaNode(t *testing.T) { path := filepath.Join("component_provider_schema", "testcomponent", "pulumi-resource-testcomponent") if runtime.GOOS == WindowsOS { path += ".cmd" } testComponentProviderSchema(t, path) } // Test throwing an error within an apply in a remote component written in nodejs. // The provider should return the error and shutdown gracefully rather than hanging. func TestConstructNodeErrorApply(t *testing.T) { dir := "construct_component_error_apply" componentDir := "testcomponent" stderr := &bytes.Buffer{} expectedError := "intentional error from within an apply" opts := &integration.ProgramTestOptions{ Env: []string{pathEnv(t, filepath.Join(dir, componentDir))}, Dir: filepath.Join(dir, "nodejs"), Dependencies: []string{"@pulumi/pulumi"}, Quick: true, NoParallel: true, Stderr: stderr, ExpectFailure: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { output := stderr.String() assert.Contains(t, output, expectedError) }, } t.Run(componentDir, func(t *testing.T) { integration.ProgramTest(t, opts) }) }
[ "\"PULUMI_ACCESS_TOKEN\"", "\"PULUMI_TEST_OWNER\"", "\"PULUMI_TEST_OWNER\"", "\"PULUMI_TEST_OWNER\"", "\"PULUMI_TEST_KMS_KEY_ALIAS\"", "\"PULUMI_TEST_AZURE_KEY\"", "\"PULUMI_TEST_GCP_KEY\"" ]
[]
[ "PULUMI_ACCESS_TOKEN", "PULUMI_TEST_KMS_KEY_ALIAS", "PULUMI_TEST_GCP_KEY", "PULUMI_TEST_OWNER", "PULUMI_TEST_AZURE_KEY" ]
[]
["PULUMI_ACCESS_TOKEN", "PULUMI_TEST_KMS_KEY_ALIAS", "PULUMI_TEST_GCP_KEY", "PULUMI_TEST_OWNER", "PULUMI_TEST_AZURE_KEY"]
go
5
0
api/rabbit-listener.go
package main import ( "encoding/json" "gAPIManagement/api/config" "gAPIManagement/api/logs" "gAPIManagement/api/rabbit" "gAPIManagement/api/utils" "os" "strconv" "github.com/streadway/amqp" ) var ELASTIC_URL string var ELASTICPORT string func main() { config.LoadURLConstants() workers := 1 if os.Getenv("RABBIT_LISTENER_WORKERS") != "" { workers, _ = strconv.Atoi(os.Getenv("RABBIT_LISTENER_WORKERS")) Start(workers) } else { StartListeningToRabbit(1) } } func Start(workers int) { /* if (os.Getenv("ELASTICSEARCH_HOST") != "") { ELASTIC_URL = os.Getenv("ELASTICSEARCH_HOST") } */ forever := make(chan bool) for i := 0; i < workers; i++ { go StartListeningToRabbit(i) } <-forever } func failOnError(err error, msg string) { if err != nil { utils.LogMessage(msg+err.Error(), utils.ErrorLogType) } } func PreventCrash() { if r := recover(); r != nil { utils.LogMessage("Rabbit Listener Crashed", utils.ErrorLogType) StartListeningToRabbit(1) } } func StartListeningToRabbit(workerId int) { defer PreventCrash() ELASTIC_URL = os.Getenv("ELASTICSEARCH_HOST") ELASTICPORT = os.Getenv("ELASTICSEARCH_PORT") conn := rabbit.ConnectToRabbit() defer conn.Close() ch, err := conn.Channel() failOnError(err, "Failed to open a channel 2") defer ch.Close() q, err := ch.QueueDeclare( rabbit.Queue(), // name true, // durable false, // delete when usused false, // exclusive false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare a queue 2") msgs, err := ch.Consume( q.Name, // queue "", // consumer true, // auto-ack false, // exclusive false, // no-local false, // no-wait nil, // args ) failOnError(err, "Failed to register a consumer 2") forever := make(chan bool) go ReceiveAndPublish(workerId, msgs) utils.LogMessage(" [*] Waiting for messages. To exit press CTRL+C", utils.InfoLogType) <-forever } func ReceiveAndPublish(workerId int, msgs <-chan amqp.Delivery) { for d := range msgs { var reqLogging logs.RequestLogging err := json.Unmarshal(d.Body, &reqLogging) if err == nil { utils.LogMessage("Publish to elasticsearch from #"+strconv.Itoa(workerId)+" - "+string(d.Body), utils.InfoLogType) logs.PublishElastic(&reqLogging) } else { utils.LogMessage("Error logging message: "+string(d.Body), utils.ErrorLogType) } } }
[ "\"RABBIT_LISTENER_WORKERS\"", "\"RABBIT_LISTENER_WORKERS\"", "\"ELASTICSEARCH_HOST\"", "\"ELASTICSEARCH_HOST\"", "\"ELASTICSEARCH_HOST\"", "\"ELASTICSEARCH_PORT\"" ]
[]
[ "RABBIT_LISTENER_WORKERS", "ELASTICSEARCH_PORT", "ELASTICSEARCH_HOST" ]
[]
["RABBIT_LISTENER_WORKERS", "ELASTICSEARCH_PORT", "ELASTICSEARCH_HOST"]
go
3
0
packit_service/models.py
# Copyright Contributors to the Packit project. # SPDX-License-Identifier: MIT """ Data layer on top of PSQL using sqlalch """ import enum import logging import os from contextlib import contextmanager from datetime import datetime, timedelta from typing import ( Dict, Iterable, List, Optional, TYPE_CHECKING, Tuple, Type, Union, ) from urllib.parse import urlparse from sqlalchemy import ( Boolean, Column, DateTime, Enum, ForeignKey, Integer, JSON, String, Text, create_engine, desc, func, null, case, ) from sqlalchemy.dialects.postgresql import array as psql_array from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, relationship, scoped_session, sessionmaker from sqlalchemy.types import ARRAY from packit.config import JobConfigTriggerType from packit.exceptions import PackitException from packit_service.constants import ALLOWLIST_CONSTANTS logger = logging.getLogger(__name__) # SQLAlchemy session, get it with `get_sa_session` session_instance = None def get_pg_url() -> str: """create postgresql connection string""" return ( f"postgresql+psycopg2://{os.getenv('POSTGRESQL_USER')}" f":{os.getenv('POSTGRESQL_PASSWORD')}@{os.getenv('POSTGRESQL_HOST', 'postgres')}" f":{os.getenv('POSTGRESQL_PORT', '5432')}/{os.getenv('POSTGRESQL_DATABASE')}" ) engine = create_engine(get_pg_url()) ScopedSession = scoped_session(sessionmaker(bind=engine)) @contextmanager def get_sa_session() -> Session: """get SQLAlchemy session""" session = ScopedSession() try: yield session session.commit() except Exception as ex: logger.warning(f"Exception while working with database: {ex!r}") session.rollback() raise def optional_time( datetime_object: Union[datetime, None], fmt: str = "%d/%m/%Y %H:%M:%S" ) -> Union[str, None]: """ Returns a formatted date-time string if argument is a datetime object. Args: datetime_object: date-time to be converted to string fmt: format string to be used to produce the string. Defaults to `"%d/%m/%Y %H:%M:%S"`. Returns: Formatted date-time or `None` if no datetime is provided. """ if datetime_object is None: return None return datetime_object.strftime(fmt) def optional_timestamp(datetime_object: Optional[datetime]) -> Optional[int]: """ Returns a UNIX timestamp if argument is a datetime object. Args: datetime_object: Date-time to be converted to timestamp. Returns: UNIX timestamp or `None` if no datetime object is provided. """ if datetime_object is None: return None return int(datetime_object.timestamp()) # https://github.com/python/mypy/issues/2477#issuecomment-313984522 ^_^ if TYPE_CHECKING: Base = object else: Base = declarative_base() class JobTriggerModelType(str, enum.Enum): pull_request = "pull_request" branch_push = "branch_push" release = "release" issue = "issue" class BuildsAndTestsConnector: """ Abstract class that is inherited by trigger models to share methods for accessing build/test models.. """ id: int job_trigger_model_type: JobTriggerModelType def get_runs(self) -> List["PipelineModel"]: with get_sa_session() as session: trigger_list = ( session.query(JobTriggerModel) .filter_by(type=self.job_trigger_model_type, trigger_id=self.id) .all() ) if len(trigger_list) > 1: msg = ( f"There are multiple run models for type {self.job_trigger_model_type}" f"and id={self.id}." ) logger.error(msg) raise PackitException(msg) return trigger_list[0].runs if trigger_list else [] def _get_run_item( self, model_type: Type["AbstractBuildTestDbType"] ) -> List["AbstractBuildTestDbType"]: runs = self.get_runs() models = [] if model_type == CoprBuildTargetModel: models = [run.copr_build for run in runs] if model_type == KojiBuildTargetModel: models = [run.koji_build for run in runs] if model_type == SRPMBuildModel: models = [run.srpm_build for run in runs] if model_type == TFTTestRunTargetModel: models = [run.test_run for run in runs] return list({model for model in models if model is not None}) def get_copr_builds(self): return self._get_run_item(model_type=CoprBuildTargetModel) def get_koji_builds(self): return self._get_run_item(model_type=KojiBuildTargetModel) def get_srpm_builds(self): return self._get_run_item(model_type=SRPMBuildModel) def get_test_runs(self): return self._get_run_item(model_type=TFTTestRunTargetModel) class ProjectAndTriggersConnector: """ Abstract class that is inherited by build/test models to share methods for accessing project and trigger models. """ runs: Optional[List["PipelineModel"]] def get_job_trigger_model(self) -> Optional["JobTriggerModel"]: if not self.runs: return None return self.runs[0].job_trigger def get_trigger_object(self) -> Optional["AbstractTriggerDbType"]: job_trigger = self.get_job_trigger_model() if not job_trigger: return None return job_trigger.get_trigger_object() def get_project(self) -> Optional["GitProjectModel"]: trigger_object = self.get_trigger_object() if not trigger_object: return None return trigger_object.project def get_pr_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, PullRequestModel): return trigger_object.pr_id return None def get_issue_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if not isinstance(trigger_object, IssueModel): return None return trigger_object.issue_id def get_branch_name(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, GitBranchModel): return trigger_object.name return None def get_release_tag(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, ProjectReleaseModel): return trigger_object.tag_name return None class GitProjectModel(Base): __tablename__ = "git_projects" id = Column(Integer, primary_key=True) # github.com/NAMESPACE/REPO_NAME # git.centos.org/NAMESPACE/REPO_NAME namespace = Column(String, index=True) repo_name = Column(String, index=True) pull_requests = relationship("PullRequestModel", back_populates="project") branches = relationship("GitBranchModel", back_populates="project") releases = relationship("ProjectReleaseModel", back_populates="project") issues = relationship("IssueModel", back_populates="project") project_authentication_issue = relationship( "ProjectAuthenticationIssueModel", back_populates="project" ) # Git URL of the repo # Example: https://github.com/packit/hello-world.git https_url = Column(String) project_url = Column(String) instance_url = Column(String, nullable=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.instance_url = urlparse(self.project_url).hostname @classmethod def get_or_create( cls, namespace: str, repo_name: str, project_url: str ) -> "GitProjectModel": with get_sa_session() as session: project = ( session.query(GitProjectModel) .filter_by( namespace=namespace, repo_name=repo_name, project_url=project_url ) .first() ) if not project: project = cls( repo_name=repo_name, namespace=namespace, project_url=project_url ) session.add(project) return project @classmethod def get_projects(cls, first: int, last: int) -> Iterable["GitProjectModel"]: with get_sa_session() as session: return ( session.query(GitProjectModel) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_forge( cls, first: int, last: int, forge: str ) -> Iterable["GitProjectModel"]: """Return projects of given forge""" with get_sa_session() as session: return ( session.query(GitProjectModel) .filter_by(instance_url=forge) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_namespace(cls, forge: str, namespace: str) -> Iterable["GitProjectModel"]: """Return projects of given forge and namespace""" with get_sa_session() as session: projects = ( session.query(GitProjectModel).filter_by(namespace=namespace).all() ) matched_projects = [] for project in projects: forge_domain = urlparse(project.project_url).hostname if forge == forge_domain: matched_projects.append(project) return matched_projects @classmethod def get_project( cls, forge: str, namespace: str, repo_name: str ) -> Optional["GitProjectModel"]: """Return one project which matches said criteria""" with get_sa_session() as session: project = ( session.query(cls) .filter_by(instance_url=forge, namespace=namespace, repo_name=repo_name) .one_or_none() ) return project @classmethod def get_project_prs( cls, first: int, last: int, forge: str, namespace: str, repo_name: str ) -> Iterable["PullRequestModel"]: with get_sa_session() as session: return ( session.query(PullRequestModel) .join(GitProjectModel) .filter( PullRequestModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .order_by(desc(PullRequestModel.pr_id)) .slice(first, last) ) @classmethod def get_project_issues( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["IssueModel"]]: with get_sa_session() as session: issues = ( session.query(IssueModel) .join(GitProjectModel) .filter( IssueModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return issues @classmethod def get_project_branches( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["GitBranchModel"]]: with get_sa_session() as session: branches = ( session.query(GitBranchModel) .join(GitProjectModel) .filter( GitBranchModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return branches @classmethod def get_project_releases( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["ProjectReleaseModel"]]: with get_sa_session() as session: releases = ( session.query(ProjectReleaseModel) .join(GitProjectModel) .filter( ProjectReleaseModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return releases def __repr__(self): return ( f"GitProjectModel(name={self.namespace}/{self.repo_name}, " f"project_url='{self.project_url}')" ) class PullRequestModel(BuildsAndTestsConnector, Base): __tablename__ = "pull_requests" id = Column(Integer, primary_key=True) # our database PK # GitHub PR ID # this is not our PK b/c: # 1) we don't control it # 2) we want sensible auto-incremented ID, not random numbers # 3) it's not unique across projects obviously, so why am I even writing this? pr_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="pull_requests") # CentOS Pagure only bugzilla = relationship("BugzillaModel", back_populates="pull_request") job_config_trigger_type = JobConfigTriggerType.pull_request job_trigger_model_type = JobTriggerModelType.pull_request @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str ) -> "PullRequestModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) pr = ( session.query(PullRequestModel) .filter_by(pr_id=pr_id, project_id=project.id) .first() ) if not pr: pr = PullRequestModel() pr.pr_id = pr_id pr.project_id = project.id session.add(pr) return pr @classmethod def get_by_id(cls, id_: int) -> Optional["PullRequestModel"]: with get_sa_session() as session: return session.query(PullRequestModel).filter_by(id=id_).first() def __repr__(self): return f"PullRequestModel(pr_id={self.pr_id}, project={self.project})" class IssueModel(BuildsAndTestsConnector, Base): __tablename__ = "project_issues" id = Column(Integer, primary_key=True) # our database PK issue_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="issues") # TODO: Fix this hardcoding! This is only to make propose-downstream work! job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.issue @classmethod def get_or_create( cls, issue_id: int, namespace: str, repo_name: str, project_url: str ) -> "IssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) issue = ( session.query(IssueModel) .filter_by(issue_id=issue_id, project_id=project.id) .first() ) if not issue: issue = IssueModel() issue.issue_id = issue_id issue.project_id = project.id session.add(issue) return issue @classmethod def get_by_id(cls, id_: int) -> Optional["IssueModel"]: with get_sa_session() as session: return session.query(IssueModel).filter_by(id=id_).first() def __repr__(self): return f"IssueModel(id={self.issue_id}, project={self.project})" class GitBranchModel(BuildsAndTestsConnector, Base): __tablename__ = "git_branches" id = Column(Integer, primary_key=True) # our database PK name = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="branches") job_config_trigger_type = JobConfigTriggerType.commit job_trigger_model_type = JobTriggerModelType.branch_push @classmethod def get_or_create( cls, branch_name: str, namespace: str, repo_name: str, project_url: str ) -> "GitBranchModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) git_branch = ( session.query(GitBranchModel) .filter_by(name=branch_name, project_id=project.id) .first() ) if not git_branch: git_branch = GitBranchModel() git_branch.name = branch_name git_branch.project_id = project.id session.add(git_branch) return git_branch @classmethod def get_by_id(cls, id_: int) -> Optional["GitBranchModel"]: with get_sa_session() as session: return session.query(GitBranchModel).filter_by(id=id_).first() def __repr__(self): return f"GitBranchModel(name={self.name}, project={self.project})" class BugzillaModel(Base): __tablename__ = "bugzillas" id = Column(Integer, primary_key=True) bug_id = Column(Integer, index=True) bug_url = Column(String) pull_request_id = Column(Integer, ForeignKey("pull_requests.id")) pull_request = relationship("PullRequestModel", back_populates="bugzilla") @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, bug_id: int = None, bug_url: str = None, ) -> "BugzillaModel": with get_sa_session() as session: pull_request = PullRequestModel.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) bugzilla = ( session.query(BugzillaModel) .filter_by(pull_request_id=pull_request.id) .first() ) if not bugzilla and bug_id and bug_url: bugzilla = BugzillaModel() bugzilla.bug_id = bug_id bugzilla.bug_url = bug_url bugzilla.pull_request_id = pull_request.id session.add(bugzilla) return bugzilla @classmethod def get_by_pr( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, ) -> Optional["BugzillaModel"]: return cls.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) def __repr__(self): return f"BugzillaModel(bug_id={self.bug_id}, bug_url={self.bug_url})" class ProjectReleaseModel(Base): __tablename__ = "project_releases" id = Column(Integer, primary_key=True) # our database PK tag_name = Column(String) commit_hash = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="releases") job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.release @classmethod def get_or_create( cls, tag_name: str, namespace: str, repo_name: str, project_url: str, commit_hash: Optional[str] = None, ) -> "ProjectReleaseModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_release = ( session.query(ProjectReleaseModel) .filter_by(tag_name=tag_name, project_id=project.id) .first() ) if not project_release: project_release = ProjectReleaseModel() project_release.tag_name = tag_name project_release.project = project project_release.commit_hash = commit_hash session.add(project_release) return project_release @classmethod def get_by_id(cls, id_: int) -> Optional["ProjectReleaseModel"]: with get_sa_session() as session: return session.query(ProjectReleaseModel).filter_by(id=id_).first() def __repr__(self): return ( f"ProjectReleaseModel(" f"tag_name={self.tag_name}, " f"project={self.project})" ) AbstractTriggerDbType = Union[ PullRequestModel, ProjectReleaseModel, GitBranchModel, IssueModel, ] MODEL_FOR_TRIGGER: Dict[JobTriggerModelType, Type[AbstractTriggerDbType]] = { JobTriggerModelType.pull_request: PullRequestModel, JobTriggerModelType.branch_push: GitBranchModel, JobTriggerModelType.release: ProjectReleaseModel, JobTriggerModelType.issue: IssueModel, } class JobTriggerModel(Base): """ Model representing a trigger of some packit task. It connects PipelineModel (and built/test models via that model) with models like PullRequestModel, GitBranchModel or ProjectReleaseModel. * It contains type and id of the other database_model. * We know table and id that we need to find in that table. * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "job_triggers" id = Column(Integer, primary_key=True) # our database PK type = Column(Enum(JobTriggerModelType)) trigger_id = Column(Integer) runs = relationship("PipelineModel", back_populates="job_trigger") @classmethod def get_or_create( cls, type: JobTriggerModelType, trigger_id: int ) -> "JobTriggerModel": with get_sa_session() as session: trigger = ( session.query(JobTriggerModel) .filter_by(type=type, trigger_id=trigger_id) .first() ) if not trigger: trigger = JobTriggerModel() trigger.type = type trigger.trigger_id = trigger_id session.add(trigger) return trigger @classmethod def get_by_id(cls, id_: int) -> "JobTriggerModel": with get_sa_session() as session: return session.query(JobTriggerModel).filter_by(id=id_).first() def get_trigger_object(self) -> Optional[AbstractTriggerDbType]: with get_sa_session() as session: return ( session.query(MODEL_FOR_TRIGGER[self.type]) .filter_by(id=self.trigger_id) .first() ) def __repr__(self): return f"JobTriggerModel(type={self.type}, trigger_id={self.trigger_id})" class PipelineModel(Base): """ Represents one pipeline. Connects JobTriggerModel (and triggers like PullRequestModel via that model) with build/test models like SRPMBuildModel, CoprBuildTargetModel, KojiBuildTargetModel, and TFTTestRunTargetModel. * One model of each build/test model can be connected. * Each build/test model can be connected to multiple PipelineModels (e.g. on retrigger). * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "pipelines" id = Column(Integer, primary_key=True) # our database PK # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made datetime = Column(DateTime, default=datetime.utcnow) job_trigger_id = Column(Integer, ForeignKey("job_triggers.id")) job_trigger = relationship("JobTriggerModel", back_populates="runs") srpm_build_id = Column(Integer, ForeignKey("srpm_builds.id")) srpm_build = relationship("SRPMBuildModel", back_populates="runs") copr_build_id = Column(Integer, ForeignKey("copr_build_targets.id")) copr_build = relationship("CoprBuildTargetModel", back_populates="runs") koji_build_id = Column(Integer, ForeignKey("koji_build_targets.id")) koji_build = relationship("KojiBuildTargetModel", back_populates="runs") test_run_id = Column(Integer, ForeignKey("tft_test_run_targets.id")) test_run = relationship("TFTTestRunTargetModel", back_populates="runs") propose_downstream_run_id = Column( Integer, ForeignKey("propose_downstream_runs.id") ) propose_downstream_run = relationship( "ProposeDownstreamModel", back_populates="runs" ) @classmethod def create(cls, type: JobTriggerModelType, trigger_id: int) -> "PipelineModel": with get_sa_session() as session: run_model = PipelineModel() run_model.job_trigger = JobTriggerModel.get_or_create( type=type, trigger_id=trigger_id ) session.add(run_model) return run_model def get_trigger_object(self) -> AbstractTriggerDbType: return self.job_trigger.get_trigger_object() def __repr__(self): return f"PipelineModel(id={self.id}, datetime='{datetime}', job_trigger={self.job_trigger})" @classmethod def __query_merged_runs(cls, session): return session.query( func.min(PipelineModel.id).label("merged_id"), PipelineModel.srpm_build_id, func.array_agg(psql_array([PipelineModel.copr_build_id])).label( "copr_build_id" ), func.array_agg(psql_array([PipelineModel.koji_build_id])).label( "koji_build_id" ), func.array_agg(psql_array([PipelineModel.test_run_id])).label( "test_run_id" ), func.array_agg(psql_array([PipelineModel.propose_downstream_run_id])).label( "propose_downstream_run_id", ), ) @classmethod def get_merged_chroots(cls, first: int, last: int) -> Iterable["PipelineModel"]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .order_by(desc("merged_id")) .slice(first, last) ) @classmethod def get_merged_run(cls, first_id: int) -> Optional[Iterable["PipelineModel"]]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .filter( PipelineModel.id >= first_id, PipelineModel.id <= first_id + 100 ) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .first() ) @classmethod def get_run(cls, id_: int) -> Optional["PipelineModel"]: with get_sa_session() as session: return session.query(PipelineModel).filter_by(id=id_).first() class CoprBuildTargetModel(ProjectAndTriggersConnector, Base): """ Representation of Copr build for one target. """ __tablename__ = "copr_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # copr build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to copr web ui for the particular build web_url = Column(String) # url to copr build logs build_logs_url = Column(String) # for monitoring: time when we set the status about accepted task task_accepted_time = Column(DateTime) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the copr build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # project name as shown in copr project_name = Column(String) owner = Column(String) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # info about built packages we get from Copr, e.g. # [ # { # "arch": "noarch", # "epoch": 0, # "name": "python3-packit", # "release": "1.20210930124525726166.main.0.g0b7b36b.fc36", # "version": "0.38.0", # } # ] built_packages = Column(JSON) runs = relationship("PipelineModel", back_populates="copr_build") def set_built_packages(self, built_packages): with get_sa_session() as session: self.built_packages = built_packages session.add(self) def set_start_time(self, start_time: datetime): with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime): with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["CoprBuildTargetModel"]: with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["CoprBuildTargetModel"]]: with get_sa_session() as session: return ( session.query(CoprBuildTargetModel) .order_by(desc(CoprBuildTargetModel.id)) .all() ) @classmethod def get_merged_chroots( cls, first: int, last: int ) -> Iterable["CoprBuildTargetModel"]: """Returns a list of unique build ids with merged status, chroots Details: https://github.com/packit/packit-service/pull/674#discussion_r439819852 """ with get_sa_session() as session: return ( session.query( # We need something to order our merged builds by, # so set new_id to be min(ids of to-be-merged rows) func.min(CoprBuildTargetModel.id).label("new_id"), # Select identical element(s) CoprBuildTargetModel.build_id, # Merge chroots and statuses from different rows into one func.array_agg(psql_array([CoprBuildTargetModel.target])).label( "target" ), func.array_agg(psql_array([CoprBuildTargetModel.status])).label( "status" ), func.array_agg(psql_array([CoprBuildTargetModel.id])).label( "packit_id_per_chroot" ), ) .group_by( CoprBuildTargetModel.build_id ) # Group by identical element(s) .order_by(desc("new_id")) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["CoprBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds which currently have the given status.""" with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(status=status) # returns the build matching the build_id and the target @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: str = None ) -> Optional["CoprBuildTargetModel"]: if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE copr_builds.build_id = 1245767 AND copr_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(build_id=build_id) if target: query = query.filter_by(target=target) return query.first() @staticmethod def get_all_by( project_name: str, commit_sha: str, owner: str = None, target: str = None, ) -> Optional[Iterable["CoprBuildTargetModel"]]: """ All owner/project_name builds sorted from latest to oldest with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = ( session.query(CoprBuildTargetModel) .filter_by(**non_none_args) .order_by(CoprBuildTargetModel.build_id.desc()) ) return query.all() @classmethod def get_all_by_commit( cls, commit_sha: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds that match a given commit sha""" with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(commit_sha=commit_sha) return query.all() @classmethod def create( cls, build_id: str, commit_sha: str, project_name: str, owner: str, web_url: str, target: str, status: str, run_model: "PipelineModel", task_accepted_time: Optional[datetime] = None, ) -> "CoprBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.project_name = project_name build.owner = owner build.commit_sha = commit_sha build.web_url = web_url build.target = target build.task_accepted_time = task_accepted_time session.add(build) if run_model.copr_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = build session.add(new_run_model) else: run_model.copr_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> "CoprBuildTargetModel": return cls.get_by_build_id(build_id, target) def __repr__(self): return f"COPRBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class KojiBuildTargetModel(ProjectAndTriggersConnector, Base): """we create an entry for every target""" __tablename__ = "koji_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # koji build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to koji web ui for the particular build web_url = Column(String) # url to koji build logs build_logs_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the koji build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # it is a scratch build? scratch = Column(Boolean) runs = relationship("PipelineModel", back_populates="koji_build") def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) def set_build_start_time(self, build_start_time: Optional[DateTime]): with get_sa_session() as session: self.build_start_time = build_start_time session.add(self) def set_build_finished_time(self, build_finished_time: Optional[DateTime]): with get_sa_session() as session: self.build_finished_time = build_finished_time session.add(self) def set_build_submitted_time(self, build_submitted_time: Optional[DateTime]): with get_sa_session() as session: self.build_submitted_time = build_submitted_time session.add(self) def set_scratch(self, value: bool): with get_sa_session() as session: self.scratch = value session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["KojiBuildTargetModel"]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["KojiBuildTargetModel"]]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).all() @classmethod def get_range(cls, first: int, last: int) -> Iterable["KojiBuildTargetModel"]: with get_sa_session() as session: return ( session.query(KojiBuildTargetModel) .order_by(desc(KojiBuildTargetModel.id)) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["KojiBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: Optional[str] = None ) -> Optional["KojiBuildTargetModel"]: """ Returns the build matching the build_id and the target. """ if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE koji_builds.build_id = 1245767 AND koji_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: if target: return ( session.query(KojiBuildTargetModel) .filter_by(build_id=build_id, target=target) .first() ) return ( session.query(KojiBuildTargetModel).filter_by(build_id=build_id).first() ) @classmethod def create( cls, build_id: str, commit_sha: str, web_url: str, target: str, status: str, scratch: bool, run_model: "PipelineModel", ) -> "KojiBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.commit_sha = commit_sha build.web_url = web_url build.target = target build.scratch = scratch session.add(build) if run_model.koji_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.koji_build = build session.add(new_run_model) else: run_model.koji_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> Optional["KojiBuildTargetModel"]: return cls.get_by_build_id(build_id, target) def __repr__(self): return ( f"KojiBuildTargetModel(id={self.id}, " f"build_submitted_time={self.build_submitted_time})" ) class SRPMBuildModel(ProjectAndTriggersConnector, Base): __tablename__ = "srpm_builds" id = Column(Integer, primary_key=True) status = Column(String) # our logs we want to show to the user logs = Column(Text) build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) commit_sha = Column(String) # url for downloading the SRPM url = Column(Text) # attributes for SRPM built by Copr logs_url = Column(Text) copr_build_id = Column(String, index=True) copr_web_url = Column(Text) runs = relationship("PipelineModel", back_populates="srpm_build") @classmethod def create_with_new_run( cls, trigger_model: AbstractTriggerDbType, commit_sha: str, copr_build_id: Optional[str] = None, copr_web_url: Optional[str] = None, ) -> Tuple["SRPMBuildModel", "PipelineModel"]: """ Create a new model for SRPM and connect it to the PipelineModel. * New SRPMBuildModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one pull-request can have multiple runs). More specifically: * On PR creation: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * On `/packit build` comment or new push: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is reused. * On `/packit test` comment: -> SRPMBuildModel and CoprBuildTargetModel are reused. -> New TFTTestRunTargetModel is created. -> New PipelineModel is created and collects this new TFTTestRunTargetModel with old SRPMBuildModel and CoprBuildTargetModel. """ with get_sa_session() as session: srpm_build = cls() srpm_build.status = "pending" srpm_build.commit_sha = commit_sha srpm_build.copr_build_id = copr_build_id srpm_build.copr_web_url = copr_web_url session.add(srpm_build) # Create a new run model, reuse trigger_model if it exists: new_run_model = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) new_run_model.srpm_build = srpm_build session.add(new_run_model) return srpm_build, new_run_model @classmethod def get_by_id( cls, id_: int, ) -> Optional["SRPMBuildModel"]: with get_sa_session() as session: return session.query(SRPMBuildModel).filter_by(id=id_).first() @classmethod def get(cls, first: int, last: int) -> Iterable["SRPMBuildModel"]: with get_sa_session() as session: return ( session.query(SRPMBuildModel) .order_by(desc(SRPMBuildModel.id)) .slice(first, last) ) @classmethod def get_by_copr_build_id( cls, copr_build_id: Union[str, int] ) -> Optional["SRPMBuildModel"]: if isinstance(copr_build_id, int): copr_build_id = str(copr_build_id) with get_sa_session() as session: return ( session.query(SRPMBuildModel) .filter_by(copr_build_id=copr_build_id) .first() ) @classmethod def get_older_than(cls, delta: timedelta) -> Iterable["SRPMBuildModel"]: """Return builds older than delta, whose logs/artifacts haven't been discarded yet.""" delta_ago = datetime.utcnow() - delta with get_sa_session() as session: return session.query(SRPMBuildModel).filter( SRPMBuildModel.build_submitted_time < delta_ago, SRPMBuildModel.logs.isnot(None), ) def set_url(self, url: Optional[str]) -> None: with get_sa_session() as session: self.url = null() if url is None else url session.add(self) def set_logs(self, logs: Optional[str]) -> None: with get_sa_session() as session: self.logs = null() if logs is None else logs session.add(self) def set_start_time(self, start_time: datetime) -> None: with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime) -> None: with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_build_logs_url(self, logs_url: str) -> None: with get_sa_session() as session: self.logs_url = logs_url session.add(self) def set_status(self, status: str) -> None: with get_sa_session() as session: self.status = status session.add(self) def __repr__(self): return f"SRPMBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class AllowlistStatus(str, enum.Enum): approved_automatically = ALLOWLIST_CONSTANTS["approved_automatically"] waiting = ALLOWLIST_CONSTANTS["waiting"] approved_manually = ALLOWLIST_CONSTANTS["approved_manually"] denied = ALLOWLIST_CONSTANTS["denied"] class AllowlistModel(Base): __tablename__ = "allowlist" id = Column(Integer, primary_key=True) namespace = Column(String, index=True) # renamed from account_name status = Column(Enum(AllowlistStatus)) fas_account = Column(String) @classmethod def add_namespace( cls, namespace: str, status: str, fas_account: Optional[str] = None ): """ Adds namespace with specific status to the allowlist. If namespace is present, just changes the status. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. status (str): Status to be set. AllowlistStatus enumeration as string. fas_account (Optional[str]): FAS login, in case the namespace was automatically approved through the FAS login of user that installed GitHub App. Defaults to `None`. Returns: Newly created entry or entry that represents requested namespace. """ with get_sa_session() as session: namespace_entry = cls.get_namespace(namespace) if not namespace_entry: namespace_entry = cls() namespace_entry.namespace = namespace namespace_entry.status = status if fas_account: namespace_entry.fas_account = fas_account session.add(namespace_entry) return namespace_entry @classmethod def get_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: """ Retrieves namespace from the allowlist. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. Returns: Entry that represents namespace or `None` if cannot be found. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(namespace=namespace).first() @classmethod def get_namespaces_by_status( cls, status: str ) -> Optional[Iterable["AllowlistModel"]]: """ Get list of namespaces with specific status. Args: status (str): Status of the namespaces. AllowlistStatus enumeration as string. Returns: List of the namespaces with set status. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(status=status) @classmethod def remove_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: with get_sa_session() as session: namespace_entry = session.query(AllowlistModel).filter_by( namespace=namespace ) if namespace_entry: namespace_entry.delete() return namespace_entry @classmethod def get_all(cls) -> Optional[Iterable["AllowlistModel"]]: with get_sa_session() as session: return session.query(AllowlistModel).all() def to_dict(self) -> Dict[str, str]: return { "namespace": self.namespace, "status": self.status, "fas_account": self.fas_account, } def __repr__(self): return ( f'<AllowlistModel(namespace="{self.namespace}", ' f'status="{self.status}", ' f'fas_account="{self.fas_account}")>' ) class TestingFarmResult(str, enum.Enum): new = "new" queued = "queued" running = "running" passed = "passed" failed = "failed" skipped = "skipped" error = "error" unknown = "unknown" needs_inspection = "needs_inspection" class TFTTestRunTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "tft_test_run_targets" id = Column(Integer, primary_key=True) pipeline_id = Column(String, index=True) commit_sha = Column(String) status = Column(Enum(TestingFarmResult)) target = Column(String) web_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made submitted_time = Column(DateTime, default=datetime.utcnow) data = Column(JSON) runs = relationship("PipelineModel", back_populates="test_run") def set_status(self, status: TestingFarmResult, created: Optional[DateTime] = None): """ set status of the TF run and optionally set the created datetime as well """ with get_sa_session() as session: self.status = status if created and not self.submitted_time: self.submitted_time = created session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) @classmethod def create( cls, pipeline_id: str, commit_sha: str, status: TestingFarmResult, target: str, run_model: "PipelineModel", web_url: Optional[str] = None, data: dict = None, ) -> "TFTTestRunTargetModel": with get_sa_session() as session: test_run = cls() test_run.pipeline_id = pipeline_id test_run.commit_sha = commit_sha test_run.status = status test_run.target = target test_run.web_url = web_url test_run.data = data session.add(test_run) if run_model.test_run: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = run_model.copr_build new_run_model.test_run = test_run session.add(new_run_model) else: run_model.test_run = test_run session.add(run_model) return test_run @classmethod def get_by_pipeline_id(cls, pipeline_id: str) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .filter_by(pipeline_id=pipeline_id) .first() ) @classmethod def get_all_by_status( cls, *status: TestingFarmResult ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """Returns all runs which currently have their status set to one of the requested statuses.""" with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter( TFTTestRunTargetModel.status.in_(status) ) @classmethod def get_by_id(cls, id: int) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter_by(id=id).first() @staticmethod def get_all_by_commit_target( commit_sha: str, target: str = None, ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """ All tests with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = session.query(TFTTestRunTargetModel).filter_by(**non_none_args) return query.all() @classmethod def get_range( cls, first: int, last: int ) -> Optional[Iterable["TFTTestRunTargetModel"]]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .order_by(desc(TFTTestRunTargetModel.id)) .slice(first, last) ) def __repr__(self): return f"TFTTestRunTargetModel(id={self.id}, pipeline_id={self.pipeline_id})" class ProposeDownstreamTargetStatus(str, enum.Enum): queued = "queued" running = "running" error = "error" retry = "retry" submitted = "submitted" class ProposeDownstreamTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_run_targets" id = Column(Integer, primary_key=True) branch = Column(String, default="unknown") downstream_pr_url = Column(String) status = Column(Enum(ProposeDownstreamTargetStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) start_time = Column(DateTime) finished_time = Column(DateTime) logs = Column(Text) propose_downstream_id = Column(Integer, ForeignKey("propose_downstream_runs.id")) propose_downstream = relationship( "ProposeDownstreamModel", back_populates="propose_downstream_targets" ) def __repr__(self) -> str: return f"ProposeDownstreamTargetModel(id={self.id})" @classmethod def create( cls, status: ProposeDownstreamTargetStatus, ) -> "ProposeDownstreamTargetModel": with get_sa_session() as session: downstream_pr = cls() downstream_pr.status = status session.add(downstream_pr) return downstream_pr def set_status(self, status: ProposeDownstreamTargetStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) def set_downstream_pr_url(self, downstream_pr_url: str) -> None: with get_sa_session() as session: self.downstream_pr_url = downstream_pr_url session.add(self) def set_start_time(self, start_time: DateTime) -> None: with get_sa_session() as session: self.start_time = start_time session.add(self) def set_finished_time(self, finished_time: DateTime) -> None: with get_sa_session() as session: self.finished_time = finished_time session.add(self) def set_branch(self, branch: str) -> None: with get_sa_session() as session: self.branch = branch session.add(self) def set_logs(self, logs: str) -> None: with get_sa_session() as session: self.logs = logs session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamTargetModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamTargetModel).filter_by(id=id_).first() class ProposeDownstreamStatus(str, enum.Enum): running = "running" finished = "finished" error = "error" class ProposeDownstreamModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_runs" id = Column(Integer, primary_key=True) status = Column(Enum(ProposeDownstreamStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) runs = relationship("PipelineModel", back_populates="propose_downstream_run") propose_downstream_targets = relationship( "ProposeDownstreamTargetModel", back_populates="propose_downstream" ) def __repr__(self) -> str: return f"ProposeDownstreamModel(id={self.id}, submitted_time={self.submitted_time})" @classmethod def create_with_new_run( cls, status: ProposeDownstreamStatus, trigger_model: AbstractTriggerDbType, ) -> Tuple["ProposeDownstreamModel", "PipelineModel"]: """ Create a new model for ProposeDownstream and connect it to the PipelineModel. * New ProposeDownstreamModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one IssueModel can have multiple runs). More specifically: * On `/packit propose-downstream` issue comment: -> ProposeDownstreamModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * Something went wrong, after correction and another `/packit propose-downstream` comment: -> ProposeDownstreamModel is created. -> PipelineModel is created. -> JobTriggerModel is reused. * TODO: we will use propose-downstream in commit-checks - fill in once it's implemented """ with get_sa_session() as session: propose_downstream = cls() propose_downstream.status = status session.add(propose_downstream) # Create a pipeline, reuse trigger_model if it exists: pipeline = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) pipeline.propose_downstream_run = propose_downstream session.add(pipeline) return propose_downstream, pipeline def set_status(self, status: ProposeDownstreamStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(id=id_).first() @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["ProposeDownstreamModel"]]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(status=status) @classmethod def get_range(cls, first: int, last: int) -> Iterable["ProposeDownstreamModel"]: with get_sa_session() as session: return ( session.query(ProposeDownstreamModel) .order_by(desc(ProposeDownstreamModel.id)) .slice(first, last) ) AbstractBuildTestDbType = Union[ CoprBuildTargetModel, KojiBuildTargetModel, SRPMBuildModel, TFTTestRunTargetModel, ProposeDownstreamModel, ] class ProjectAuthenticationIssueModel(Base): __tablename__ = "project_authentication_issue" id = Column(Integer, primary_key=True) project = relationship( "GitProjectModel", back_populates="project_authentication_issue" ) # Check to know if we created a issue for the repo. issue_created = Column(Boolean) project_id = Column(Integer, ForeignKey("git_projects.id")) @classmethod def get_project( cls, namespace: str, repo_name: str, project_url: str ) -> Optional["ProjectAuthenticationIssueModel"]: with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) return ( session.query(ProjectAuthenticationIssueModel) .filter_by(project_id=project.id) .first() ) @classmethod def create( cls, namespace: str, repo_name: str, project_url: str, issue_created: bool ) -> "ProjectAuthenticationIssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_authentication_issue = cls() project_authentication_issue.issue_created = issue_created project_authentication_issue.project_id = project.id session.add(project_authentication_issue) return project_authentication_issue def __repr__(self): return ( f"ProjectAuthenticationIssueModel(project={self.project}, " f"issue_created={self.issue_created})" ) class GithubInstallationModel(Base): __tablename__ = "github_installations" id = Column(Integer, primary_key=True, autoincrement=True) # information about account (user/organization) into which the app has been installed account_login = Column(String) account_id = Column(Integer) account_url = Column(String) account_type = Column(String) # information about user who installed the app into 'account' sender_id = Column(Integer) sender_login = Column(String) created_at = Column(DateTime, default=datetime.utcnow) repositories = Column(ARRAY(Integer, ForeignKey("git_projects.id"))) @classmethod def get_project(cls, repository: str): namespace, repo_name = repository.split("/") return GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=f"https://github.com/{namespace}/{repo_name}", ) @classmethod def get_by_id(cls, id: int) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return session.query(GithubInstallationModel).filter_by(id=id).first() @classmethod def get_by_account_login( cls, account_login: str ) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return ( session.query(GithubInstallationModel) .filter_by(account_login=account_login) .first() ) @classmethod def get_all(cls) -> Optional[Iterable["GithubInstallationModel"]]: with get_sa_session() as session: return session.query(GithubInstallationModel).all() @classmethod def create(cls, event): with get_sa_session() as session: installation = cls.get_by_account_login(event.account_login) if not installation: installation = cls() installation.account_login = event.account_login installation.account_id = event.account_id installation.account_url = event.account_url installation.account_type = event.account_type installation.sender_login = event.sender_login installation.sender_id = event.sender_id installation.created_at = event.created_at installation.repositories = [ cls.get_project(repo).id for repo in event.repositories ] session.add(installation) return installation def to_dict(self): return { "account_login": self.account_login, "account_id": self.account_id, "account_type": self.account_type, "account_url": self.account_url, "sender_login": self.sender_login, "sender_id": self.sender_id, # Inconsistent with other API endpoints, kept for readability for # internal use, if necessary "created_at": optional_time(self.created_at), } def __repr__(self): return f"GithubInstallationModel(id={self.id}, account={self.account_login})" class SourceGitPRDistGitPRModel(Base): __tablename__ = "source_git_pr_dist_git_pr" id = Column(Integer, primary_key=True) # our database PK source_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) dist_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) source_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.source_git_pull_request_id==PullRequestModel.id", uselist=False, ) dist_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.dist_git_pull_request_id==PullRequestModel.id", uselist=False, ) @classmethod def get_or_create( cls, source_git_pr_id: int, source_git_namespace: str, source_git_repo_name: str, source_git_project_url: str, dist_git_pr_id: int, dist_git_namespace: str, dist_git_repo_name: str, dist_git_project_url: str, ) -> "SourceGitPRDistGitPRModel": with get_sa_session() as session: source_git_pull_request = PullRequestModel.get_or_create( pr_id=source_git_pr_id, namespace=source_git_namespace, repo_name=source_git_repo_name, project_url=source_git_project_url, ) dist_git_pull_request = PullRequestModel.get_or_create( pr_id=dist_git_pr_id, namespace=dist_git_namespace, repo_name=dist_git_repo_name, project_url=dist_git_project_url, ) rel = ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=source_git_pull_request.id) .filter_by(dist_git_pull_request_id=dist_git_pull_request.id) .one_or_none() ) if not rel: rel = SourceGitPRDistGitPRModel() rel.source_git_pull_request_id = source_git_pull_request.id rel.dist_git_pull_request_id = dist_git_pull_request.id session.add(rel) return rel @classmethod def get_by_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel).filter_by(id=id_).one_or_none() ) @classmethod def get_by_source_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=id_) .one_or_none() ) @classmethod def get_by_dist_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(dist_git_pull_request_id=id_) .one_or_none() )
[]
[]
[ "POSTGRESQL_HOST", "POSTGRESQL_PORT", "POSTGRESQL_DATABASE", "POSTGRESQL_PASSWORD", "POSTGRESQL_USER" ]
[]
["POSTGRESQL_HOST", "POSTGRESQL_PORT", "POSTGRESQL_DATABASE", "POSTGRESQL_PASSWORD", "POSTGRESQL_USER"]
python
5
0
tscreen_linux.go
// +build linux // Copyright 2019 The TCell Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use file except in compliance with the License. // You may obtain a copy of the license at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tcell import ( "os" "os/signal" "strconv" "syscall" "golang.org/x/sys/unix" ) type termiosPrivate struct { tio *unix.Termios } func (t *tScreen) termioInit() error { var e error var raw *unix.Termios var tio *unix.Termios if t.in, e = os.OpenFile("/dev/tty", os.O_RDONLY, 0); e != nil { goto failed } if t.out, e = os.OpenFile("/dev/tty", os.O_WRONLY, 0); e != nil { goto failed } tio, e = unix.IoctlGetTermios(int(t.out.(*os.File).Fd()), unix.TCGETS) if e != nil { goto failed } t.tiosp = &termiosPrivate{tio: tio} // make a local copy, to make it raw raw = &unix.Termios{ Cflag: tio.Cflag, Oflag: tio.Oflag, Iflag: tio.Iflag, Lflag: tio.Lflag, Cc: tio.Cc, } raw.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) raw.Oflag &^= unix.OPOST raw.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) raw.Cflag &^= (unix.CSIZE | unix.PARENB) raw.Cflag |= unix.CS8 // This is setup for blocking reads. In the past we attempted to // use non-blocking reads, but now a separate input loop and timer // copes with the problems we had on some systems (BSD/Darwin) // where close hung forever. raw.Cc[unix.VMIN] = 1 raw.Cc[unix.VTIME] = 0 e = unix.IoctlSetTermios(int(t.out.(*os.File).Fd()), unix.TCSETS, raw) if e != nil { goto failed } signal.Notify(t.sigwinch, syscall.SIGWINCH) if w, h, e := t.getWinSize(); e == nil && w != 0 && h != 0 { t.cells.Resize(w, h) } return nil failed: if t.in != nil { t.in.(*os.File).Close() } if t.out != nil { t.out.(*os.File).Close() } return e } func (t *tScreen) termioFini() { signal.Stop(t.sigwinch) <-t.indoneq if t.out != nil && t.tiosp != nil { unix.IoctlSetTermios(int(t.out.(*os.File).Fd()), unix.TCSETSF, t.tiosp.tio) t.out.(*os.File).Close() } if t.in != nil { t.in.(*os.File).Close() } } func (t *tScreen) getWinSize() (int, int, error) { wsz, err := unix.IoctlGetWinsize(int(t.out.(*os.File).Fd()), unix.TIOCGWINSZ) if err != nil { return -1, -1, err } cols := int(wsz.Col) rows := int(wsz.Row) if cols == 0 { colsEnv := os.Getenv("COLUMNS") if colsEnv != "" { if cols, err = strconv.Atoi(colsEnv); err != nil { return -1, -1, err } } else { cols = t.ti.Columns } } if rows == 0 { rowsEnv := os.Getenv("LINES") if rowsEnv != "" { if rows, err = strconv.Atoi(rowsEnv); err != nil { return -1, -1, err } } else { rows = t.ti.Lines } } return cols, rows, nil }
[ "\"COLUMNS\"", "\"LINES\"" ]
[]
[ "LINES", "COLUMNS" ]
[]
["LINES", "COLUMNS"]
go
2
0
sdk/vcs/git/git.go
package git import ( "fmt" "io" "io/ioutil" "log" "net/url" "os" "os/exec" "path/filepath" "strings" "syscall" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/vcs" ) var ( verbose bool //LogFunc can be overrided LogFunc = log.Printf ) func init() { if os.Getenv("CDS_VERBOSE") == "true" { verbose = true } } // AuthOpts is a optional structs for git command type AuthOpts struct { Username string Password string PrivateKey vcs.SSHKey SignKey vcs.PGPKey } // OutputOpts is a optional structs for git clone command type OutputOpts struct { Stdout io.Writer Stderr io.Writer } type cmds []cmd func (c cmds) Strings() []string { res := make([]string, len(c)) for i := range c { res[i] = c[i].String() } return res } type cmd struct { workdir string cmd string args []string } func (c cmd) String() string { return c.cmd + " " + strings.Join(c.args, " ") } func getRepoURL(repo string, auth *AuthOpts) (string, error) { if strings.HasPrefix(repo, "http://") || strings.HasPrefix(repo, "ftp://") || strings.HasPrefix(repo, "ftps://") { return "", sdk.WithStack(fmt.Errorf("Git protocol not supported")) } if auth != nil && strings.HasPrefix(repo, "https://") { u, err := url.Parse(repo) if err != nil { return "", sdk.WithStack(err) } u.User = url.UserPassword(auth.Username, auth.Password) return u.String(), nil } return repo, nil } func runGitCommands(repo string, commands []cmd, auth *AuthOpts, output *OutputOpts) error { if strings.HasPrefix(repo, "https://") { return runGitCommandRaw(commands, output) } return runGitCommandsOverSSH(commands, auth, output) } func runGitCommandsOverSSH(commands []cmd, auth *AuthOpts, output *OutputOpts) error { if auth == nil { return sdk.WithStack(fmt.Errorf("Authentication is required for git over ssh")) } pkAbsFileName, err := filepath.Abs(auth.PrivateKey.Filename) if err != nil { return sdk.WithStack(err) } keyDir := filepath.Dir(pkAbsFileName) gitSSHCmd := exec.Command("ssh").Path gitSSHCmd += " -F /dev/null -o IdentitiesOnly=yes -o StrictHostKeyChecking=no" gitSSHCmd += " -i " + pkAbsFileName var wrapper string if sdk.GOOS == "windows" { gitSSHCmd += " %*" wrapper = gitSSHCmd } else { gitSSHCmd += ` "$@"` wrapper = `#!/bin/sh ` + gitSSHCmd } wrapperPath := filepath.Join(keyDir, "gitwrapper") if err := ioutil.WriteFile(wrapperPath, []byte(wrapper), os.FileMode(0700)); err != nil { return sdk.WithStack(err) } return runGitCommandRaw(commands, output, "GIT_SSH="+wrapperPath) } func runGitCommandRaw(cmds cmds, output *OutputOpts, envs ...string) error { osEnv := os.Environ() for _, e := range envs { osEnv = append(osEnv, e) } for _, c := range cmds { for i, arg := range c.args { c.args[i] = os.ExpandEnv(arg) } cmd := exec.Command(c.cmd, c.args...) cmd.Dir = c.workdir cmd.Env = osEnv if verbose { LogFunc("Executing Command %s - %v", c, envs) } if output != nil { cmd.Stdout = output.Stdout cmd.Stderr = output.Stderr } if err := cmd.Start(); err != nil { return sdk.WithStack(err) } //close stdin stdin, _ := cmd.StdinPipe() if stdin != nil { stdin.Close() } if err := cmd.Wait(); err != nil { if exiterr, ok := err.(*exec.ExitError); ok { if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { if verbose { LogFunc("Command status code %d", status.ExitStatus()) } return fmt.Errorf("Command fail: %d", status.ExitStatus()) } return sdk.WithStack(exiterr) } return sdk.WithStack(err) } } return nil }
[ "\"CDS_VERBOSE\"" ]
[]
[ "CDS_VERBOSE" ]
[]
["CDS_VERBOSE"]
go
1
0
examples/charge.py
import os import blockchyp # initialize a client. client = blockchyp.Client( api_key=os.environ["BC_API_KEY"], bearer_token=os.environ["BC_BEARER_TOKEN"], signing_key=os.environ["BC_SIGNING_KEY"], ) # populate request parameters. request = { "test": True, "terminalName": "Test Terminal", "amount": "55.00", } # run the transaction. response = client.charge(request) print("Response: %r" % response)
[]
[]
[ "BC_BEARER_TOKEN", "BC_API_KEY", "BC_SIGNING_KEY" ]
[]
["BC_BEARER_TOKEN", "BC_API_KEY", "BC_SIGNING_KEY"]
python
3
0
comparison_code_and_bloatlibd_results/decompiledLib/org/codehaus/plexus/util/cli/CommandLineUtils.java
package org.codehaus.plexus.util.cli; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.StringTokenizer; import java.util.Vector; import org.codehaus.plexus.util.Os; import org.codehaus.plexus.util.StringUtils; public abstract class CommandLineUtils { private static final long MILLIS_PER_SECOND = 1000L; private static final long NANOS_PER_SECOND = 1000000000L; public static class StringStreamConsumer implements StreamConsumer { private StringBuffer string = new StringBuffer(); private String ls = System.getProperty("line.separator"); public void consumeLine(String line) { string.append(line).append(ls); } public String getOutput() { return string.toString(); } } public static int executeCommandLine(Commandline cl, StreamConsumer systemOut, StreamConsumer systemErr) throws CommandLineException { return executeCommandLine(cl, null, systemOut, systemErr, 0); } public static int executeCommandLine(Commandline cl, StreamConsumer systemOut, StreamConsumer systemErr, int timeoutInSeconds) throws CommandLineException { return executeCommandLine(cl, null, systemOut, systemErr, timeoutInSeconds); } public static int executeCommandLine(Commandline cl, InputStream systemIn, StreamConsumer systemOut, StreamConsumer systemErr) throws CommandLineException { return executeCommandLine(cl, systemIn, systemOut, systemErr, 0); } public static int executeCommandLine(Commandline cl, InputStream systemIn, StreamConsumer systemOut, StreamConsumer systemErr, int timeoutInSeconds) throws CommandLineException { CommandLineCallable future = executeCommandLineAsCallable(cl, systemIn, systemOut, systemErr, timeoutInSeconds); return future.call().intValue(); } public static CommandLineCallable executeCommandLineAsCallable(Commandline cl, InputStream systemIn, final StreamConsumer systemOut, final StreamConsumer systemErr, final int timeoutInSeconds) throws CommandLineException { if (cl == null) { throw new IllegalArgumentException("cl cannot be null."); } final Process p = cl.execute(); final Thread processHook = new Thread() { public void run() { val$p.destroy(); } }; ShutdownHookUtils.addShutDownHook(processHook); new CommandLineCallable() { public Integer call() throws CommandLineException { StreamFeeder inputFeeder = null; StreamPumper outputPumper = null; StreamPumper errorPumper = null; boolean success = false; try { if (val$systemIn != null) { inputFeeder = new StreamFeeder(val$systemIn, p.getOutputStream()); inputFeeder.start(); } outputPumper = new StreamPumper(p.getInputStream(), systemOut); outputPumper.start(); errorPumper = new StreamPumper(p.getErrorStream(), systemErr); errorPumper.start(); int returnValue; long now; int returnValue; if (timeoutInSeconds <= 0) { returnValue = p.waitFor(); } else { now = System.nanoTime(); long timeout = now + 1000000000L * timeoutInSeconds; while ((CommandLineUtils.isAlive(p)) && (System.nanoTime() < timeout)) { Thread.sleep(999L); } if (CommandLineUtils.isAlive(p)) { throw new InterruptedException(String.format("Process timed out after %d seconds.", new Object[] { Integer.valueOf(timeoutInSeconds) })); } returnValue = p.exitValue(); } if (inputFeeder != null) { inputFeeder.waitUntilDone(); } outputPumper.waitUntilDone(); errorPumper.waitUntilDone(); if (inputFeeder != null) { inputFeeder.close(); CommandLineUtils.handleException(inputFeeder, "stdin"); } outputPumper.close(); CommandLineUtils.handleException(outputPumper, "stdout"); errorPumper.close(); CommandLineUtils.handleException(errorPumper, "stderr"); success = true; return Integer.valueOf(returnValue); } catch (InterruptedException ex) { throw new CommandLineTimeOutException("Error while executing external command, process killed.", ex); } finally { if (inputFeeder != null) { inputFeeder.disable(); } if (outputPumper != null) { outputPumper.disable(); } if (errorPumper != null) { errorPumper.disable(); } try { ShutdownHookUtils.removeShutdownHook(processHook); processHook.run(); } finally { try { if (inputFeeder != null) { inputFeeder.close(); if (success) { success = false; CommandLineUtils.handleException(inputFeeder, "stdin"); success = true; } } } finally { try { if (outputPumper != null) { outputPumper.close(); if (success) { success = false; CommandLineUtils.handleException(outputPumper, "stdout"); success = true; } } } finally { if (errorPumper != null) { errorPumper.close(); if (success) { CommandLineUtils.handleException(errorPumper, "stderr"); } } } } } } } }; } private static void handleException(StreamPumper streamPumper, String streamName) throws CommandLineException { if (streamPumper.getException() != null) { throw new CommandLineException(String.format("Failure processing %s.", new Object[] { streamName }), streamPumper.getException()); } } private static void handleException(StreamFeeder streamFeeder, String streamName) throws CommandLineException { if (streamFeeder.getException() != null) { throw new CommandLineException(String.format("Failure processing %s.", new Object[] { streamName }), streamFeeder.getException()); } } public static Properties getSystemEnvVars() throws IOException { return getSystemEnvVars(!Os.isFamily("windows")); } public static Properties getSystemEnvVars(boolean caseSensitive) throws IOException { Properties envVars = new Properties(); Map<String, String> envs = System.getenv(); for (String key : envs.keySet()) { String value = (String)envs.get(key); if (!caseSensitive) { key = key.toUpperCase(Locale.ENGLISH); } envVars.put(key, value); } return envVars; } public static boolean isAlive(Process p) { if (p == null) { return false; } try { p.exitValue(); return false; } catch (IllegalThreadStateException e) {} return true; } public static String[] translateCommandline(String toProcess) throws Exception { if ((toProcess == null) || (toProcess.length() == 0)) { return new String[0]; } int normal = 0; int inQuote = 1; int inDoubleQuote = 2; int state = 0; StringTokenizer tok = new StringTokenizer(toProcess, "\"' ", true); Vector<String> v = new Vector(); StringBuilder current = new StringBuilder(); while (tok.hasMoreTokens()) { String nextTok = tok.nextToken(); switch (state) { case 1: if ("'".equals(nextTok)) { state = 0; } else { current.append(nextTok); } break; case 2: if ("\"".equals(nextTok)) { state = 0; } else { current.append(nextTok); } break; default: if ("'".equals(nextTok)) { state = 1; } else if ("\"".equals(nextTok)) { state = 2; } else if (" ".equals(nextTok)) { if (current.length() != 0) { v.addElement(current.toString()); current.setLength(0); } } else { current.append(nextTok); } break; } } if (current.length() != 0) { v.addElement(current.toString()); } if ((state == 1) || (state == 2)) { throw new CommandLineException("unbalanced quotes in " + toProcess); } String[] args = new String[v.size()]; v.copyInto(args); return args; } /** * @deprecated */ public static String quote(String argument) throws CommandLineException { return quote(argument, false, false, true); } /** * @deprecated */ public static String quote(String argument, boolean wrapExistingQuotes) throws CommandLineException { return quote(argument, false, false, wrapExistingQuotes); } /** * @deprecated */ public static String quote(String argument, boolean escapeSingleQuotes, boolean escapeDoubleQuotes, boolean wrapExistingQuotes) throws CommandLineException { if (argument.contains("\"")) { if (argument.contains("'")) { throw new CommandLineException("Can't handle single and double quotes in same argument"); } if (escapeSingleQuotes) { return "\\'" + argument + "\\'"; } if (wrapExistingQuotes) { return '\'' + argument + '\''; } } else if (argument.contains("'")) { if (escapeDoubleQuotes) { return "\\\"" + argument + "\\\""; } if (wrapExistingQuotes) { return '"' + argument + '"'; } } else if (argument.contains(" ")) { if (escapeDoubleQuotes) { return "\\\"" + argument + "\\\""; } return '"' + argument + '"'; } return argument; } public static String toString(String[] line) { if ((line == null) || (line.length == 0)) { return ""; } StringBuilder result = new StringBuilder(); for (int i = 0; i < line.length; i++) { if (i > 0) { result.append(' '); } try { result.append(StringUtils.quoteAndEscape(line[i], '"')); } catch (Exception e) { System.err.println("Error quoting argument: " + e.getMessage()); } } return result.toString(); } } /* Location: * Qualified Name: org.codehaus.plexus.util.cli.CommandLineUtils * Java Class Version: 6 (50.0) * JD-Core Version: 0.7.1 */
[]
[]
[]
[]
[]
java
0
0
src/bpm/runc/client/client.go
// Copyright (C) 2017-Present CloudFoundry.org Foundation, Inc. All rights reserved. // // This program and the accompanying materials are made available under // the terms of the under the Apache License, Version 2.0 (the "License”); // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package client import ( "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "regexp" "syscall" specs "github.com/opencontainers/runtime-spec/specs-go" ) type Signal int const ( Term Signal = iota Quit ) func (s Signal) String() string { switch s { case Term: return "TERM" case Quit: return "QUIT" default: return "unknown" } } // https://github.com/opencontainers/runc/blob/master/list.go#L24-L45 type ContainerState struct { // ID is the container ID ID string `json:"id"` // InitProcessPid is the init process id in the parent namespace InitProcessPid int `json:"pid"` // Status is the current status of the container, running, paused, ... Status string `json:"status"` } type RuncClient struct { runcPath string runcRoot string inSystemd bool } func NewRuncClient(runcPath, runcRoot string, inSystemd bool) *RuncClient { return &RuncClient{ runcPath: runcPath, runcRoot: runcRoot, inSystemd: inSystemd, } } func (*RuncClient) CreateBundle( bundlePath string, jobSpec specs.Spec, user specs.User, ) error { err := os.MkdirAll(bundlePath, 0700) if err != nil { return err } rootfsPath := filepath.Join(bundlePath, "rootfs") err = os.MkdirAll(rootfsPath, 0755) if err != nil { return err } f, err := os.OpenFile(filepath.Join(bundlePath, "config.json"), os.O_RDWR|os.O_CREATE, 0600) if err != nil { // This is super hard to test as we are root. return err } defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", "\t") return enc.Encode(&jobSpec) } func (c *RuncClient) RunContainer(pidFilePath, bundlePath, containerID string, detach bool, stdout, stderr io.Writer) (int, error) { args := []string{ "--bundle", bundlePath, } if detach { args = append(args, "--pid-file", pidFilePath) args = append(args, "--detach") } args = append(args, containerID) runcCmd := c.buildCmd("run", args...) runcCmd.Stdout = stdout runcCmd.Stderr = stderr if err := runcCmd.Run(); err != nil { if status, ok := runcCmd.ProcessState.Sys().(syscall.WaitStatus); ok { return status.ExitStatus(), err } // If we can't get the exit status for some reason then make // sure to at least return a generic failure. return 1, err } return 0, nil } // Exec assumes you are launching an interactive shell. // We should improve the interface to mirror `runc exec` more generally. func (c *RuncClient) Exec(containerID, command string, stdin io.Reader, stdout, stderr io.Writer) error { runcCmd := c.buildCmd( "exec", "--tty", "--env", fmt.Sprintf("TERM=%s", os.Getenv("TERM")), containerID, command, ) runcCmd.Stdin = stdin runcCmd.Stdout = stdout runcCmd.Stderr = stderr return runcCmd.Run() } // ContainerState returns the following: // - state, nil if the job is running,and no errors were encountered. // - nil,nil if the container state is not running and no other errors were encountered // - nil,error if there is any other error getting the container state // (e.g. the container is running but in an unreachable state) func (c *RuncClient) ContainerState(containerID string) (*specs.State, error) { runcCmd := c.buildCmd( "state", containerID, ) var state specs.State data, err := runcCmd.CombinedOutput() if err != nil { return nil, decodeContainerStateErr(data, err) } err = json.Unmarshal(data, &state) if err != nil { return nil, err } return &state, nil } func decodeContainerStateErr(b []byte, err error) error { r := regexp.MustCompile(`\s*container "[^"]*" does not exist\s*`) if r.MatchString(string(b)) { return nil } return err } func (c *RuncClient) ListContainers() ([]ContainerState, error) { runcCmd := c.buildCmd( "list", "--format", "json", ) data, err := runcCmd.Output() if err != nil { return []ContainerState{}, err } var containerStates []ContainerState err = json.Unmarshal(data, &containerStates) if err != nil { return []ContainerState{}, err } return containerStates, nil } func (c *RuncClient) SignalContainer(containerID string, signal Signal) error { runcCmd := c.buildCmd( "kill", containerID, signal.String(), ) return runcCmd.Run() } func (c *RuncClient) DeleteContainer(containerID string) error { runcCmd := c.buildCmd( "delete", "--force", containerID, ) return runcCmd.Run() } func (*RuncClient) DestroyBundle(bundlePath string) error { return os.RemoveAll(bundlePath) } func (c *RuncClient) buildCmd(command string, extra ...string) *exec.Cmd { args := []string{"--root", c.runcRoot} if c.inSystemd { args = append(args, "--systemd-cgroup") } args = append(args, command) args = append(args, extra...) return exec.Command(c.runcPath, args...) }
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
gocrud.go
package main import ( "fmt" "github.com/gorilla/mux" _ "github.com/jinzhu/gorm/dialects/mysql" "go-crud/entity" "go-crud/utils" "log" "net/http" "os" ) func main() { db, _ := utils.DbConnection() db.AutoMigrate(&entity.User{}) defer utils.DbCloseConnection(db) myRouter := mux.NewRouter().StrictSlash(true) port := os.Getenv("port") if port == "" { port = "3000" log.Printf("Defaulting to port %s", port) } Routes(myRouter) log.Printf("Listening on port %s", port) log.Printf("Open http://localhost:%s in the browser", port) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), myRouter)) }
[ "\"port\"" ]
[]
[ "port" ]
[]
["port"]
go
1
0
modules/text2vec-transformers/module.go
// _ _ // __ _____ __ ___ ___ __ _| |_ ___ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ // \ V V / __/ (_| |\ V /| | (_| | || __/ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| // // Copyright © 2016 - 2021 SeMI Technologies B.V. All rights reserved. // // CONTACT: [email protected] // package modtransformers import ( "context" "net/http" "os" "time" "github.com/pkg/errors" "github.com/semi-technologies/weaviate/entities/models" "github.com/semi-technologies/weaviate/entities/modulecapabilities" "github.com/semi-technologies/weaviate/entities/moduletools" "github.com/semi-technologies/weaviate/modules/text2vec-transformers/clients" "github.com/semi-technologies/weaviate/modules/text2vec-transformers/vectorizer" "github.com/sirupsen/logrus" ) func New() *TransformersModule { return &TransformersModule{} } type TransformersModule struct { vectorizer textVectorizer metaProvider metaProvider graphqlProvider modulecapabilities.GraphQLArguments searcher modulecapabilities.Searcher } type textVectorizer interface { Object(ctx context.Context, obj *models.Object, settings vectorizer.ClassSettings) error Texts(ctx context.Context, input []string, settings vectorizer.ClassSettings) ([]float32, error) // TODO all of these should be moved out of here, gh-1470 MoveTo(source, target []float32, weight float32) ([]float32, error) MoveAwayFrom(source, target []float32, weight float32) ([]float32, error) CombineVectors([][]float32) []float32 } type metaProvider interface { MetaInfo() (map[string]interface{}, error) } func (m *TransformersModule) Name() string { return "text2vec-transformers" } func (m *TransformersModule) Init(ctx context.Context, params moduletools.ModuleInitParams) error { if err := m.initVectorizer(ctx, params.GetLogger()); err != nil { return errors.Wrap(err, "init vectorizer") } if err := m.initNearText(); err != nil { return errors.Wrap(err, "init near text") } return nil } func (m *TransformersModule) initVectorizer(ctx context.Context, logger logrus.FieldLogger) error { // TODO: gh-1486 proper config management uri := os.Getenv("TRANSFORMERS_INFERENCE_API") if uri == "" { return errors.Errorf("required variable TRANSFORMERS_INFERENCE_API is not set") } client := clients.New(uri, logger) if err := client.WaitForStartup(ctx, 1*time.Second); err != nil { return errors.Wrap(err, "init remote vectorizer") } m.vectorizer = vectorizer.New(client) m.metaProvider = client return nil } func (m *TransformersModule) RootHandler() http.Handler { // TODO: remove once this is a capability interface return nil } func (m *TransformersModule) VectorizeObject(ctx context.Context, obj *models.Object, cfg moduletools.ClassConfig) error { icheck := vectorizer.NewClassSettings(cfg) return m.vectorizer.Object(ctx, obj, icheck) } func (m *TransformersModule) MetaInfo() (map[string]interface{}, error) { return m.metaProvider.MetaInfo() } // verify we implement the modules.Module interface var ( _ = modulecapabilities.Module(New()) _ = modulecapabilities.Vectorizer(New()) _ = modulecapabilities.MetaProvider(New()) )
[ "\"TRANSFORMERS_INFERENCE_API\"" ]
[]
[ "TRANSFORMERS_INFERENCE_API" ]
[]
["TRANSFORMERS_INFERENCE_API"]
go
1
0
pkg/metricsclient/metricsclient.go
// Copyright Contributors to the Open Cluster Management project package metricsclient import ( "bytes" "context" "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io" "io/ioutil" "net" "net/http" "os" "strconv" "strings" "time" "github.com/cenkalti/backoff" "github.com/go-kit/kit/log" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" clientmodel "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql" "github.com/stolostron/metrics-collector/pkg/logger" "github.com/stolostron/metrics-collector/pkg/reader" ) const ( nameLabelName = "__name__" maxSeriesLength = 10000 ) var ( gaugeRequestRetrieve = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "metricsclient_request_retrieve", Help: "Tracks the number of metrics retrievals", }, []string{"client", "status_code"}) gaugeRequestSend = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "metricsclient_request_send", Help: "Tracks the number of metrics sends", }, []string{"client", "status_code"}) ) func init() { prometheus.MustRegister( gaugeRequestRetrieve, gaugeRequestSend, ) } type Client struct { client *http.Client maxBytes int64 timeout time.Duration metricsName string logger log.Logger } type PartitionedMetrics struct { Families []*clientmodel.MetricFamily } func New(logger log.Logger, client *http.Client, maxBytes int64, timeout time.Duration, metricsName string) *Client { return &Client{ client: client, maxBytes: maxBytes, timeout: timeout, metricsName: metricsName, logger: log.With(logger, "component", "metricsclient"), } } type MetricsJson struct { Status string `json:"status"` Data MetricsData `json:"data"` } type MetricsData struct { Type string `json:"resultType"` Result []MetricsResult `json:"result"` } type MetricsResult struct { Metric map[string]string `json:"metric"` Value []interface{} `json:"value"` } func (c *Client) RetrievRecordingMetrics(ctx context.Context, req *http.Request, name string) ([]*clientmodel.MetricFamily, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) req = req.WithContext(ctx) defer cancel() families := make([]*clientmodel.MetricFamily, 0, 100) err := withCancel(ctx, c.client, req, func(resp *http.Response) error { switch resp.StatusCode { case http.StatusOK: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "200").Inc() case http.StatusUnauthorized: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "401").Inc() return fmt.Errorf("Prometheus server requires authentication: %s", resp.Request.URL) case http.StatusForbidden: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "403").Inc() return fmt.Errorf("Prometheus server forbidden: %s", resp.Request.URL) case http.StatusBadRequest: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "400").Inc() return fmt.Errorf("bad request: %s", resp.Request.URL) default: gaugeRequestRetrieve.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() return fmt.Errorf("Prometheus server reported unexpected error code: %d", resp.StatusCode) } decoder := json.NewDecoder(resp.Body) var data MetricsJson err := decoder.Decode(&data) if err != nil { logger.Log(c.logger, logger.Error, "msg", "failed to decode", "err", err) return nil } vec := make(promql.Vector, 0, 100) for _, r := range data.Data.Result { var t int64 var v float64 t = int64(r.Value[0].(float64) * 1000) v, _ = strconv.ParseFloat(r.Value[1].(string), 64) ls := []labels.Label{} for k, v := range r.Metric { l := &labels.Label{ Name: k, Value: v, } ls = append(ls, *l) } vec = append(vec, promql.Sample{ Metric: ls, Point: promql.Point{T: t, V: v}, }) } for _, s := range vec { protMetric := &clientmodel.Metric{ Untyped: &clientmodel.Untyped{}, } protMetricFam := &clientmodel.MetricFamily{ Type: clientmodel.MetricType_UNTYPED.Enum(), Name: proto.String(name), } for _, l := range s.Metric { if l.Value == "" { // No value means unset. Never consider those labels. // This is also important to protect against nameless metrics. continue } protMetric.Label = append(protMetric.Label, &clientmodel.LabelPair{ Name: proto.String(l.Name), Value: proto.String(l.Value), }) } protMetric.TimestampMs = proto.Int64(s.T) protMetric.Untyped.Value = proto.Float64(s.V) protMetricFam.Metric = append(protMetricFam.Metric, protMetric) families = append(families, protMetricFam) } return nil }) if err != nil { return nil, err } return families, nil } func (c *Client) Retrieve(ctx context.Context, req *http.Request) ([]*clientmodel.MetricFamily, error) { if req.Header == nil { req.Header = make(http.Header) } req.Header.Set("Accept", strings.Join([]string{string(expfmt.FmtProtoDelim), string(expfmt.FmtText)}, " , ")) ctx, cancel := context.WithTimeout(ctx, c.timeout) req = req.WithContext(ctx) defer cancel() families := make([]*clientmodel.MetricFamily, 0, 100) err := withCancel(ctx, c.client, req, func(resp *http.Response) error { switch resp.StatusCode { case http.StatusOK: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "200").Inc() case http.StatusUnauthorized: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "401").Inc() return fmt.Errorf("Prometheus server requires authentication: %s", resp.Request.URL) case http.StatusForbidden: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "403").Inc() return fmt.Errorf("Prometheus server forbidden: %s", resp.Request.URL) case http.StatusBadRequest: gaugeRequestRetrieve.WithLabelValues(c.metricsName, "400").Inc() return fmt.Errorf("bad request: %s", resp.Request.URL) default: gaugeRequestRetrieve.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() return fmt.Errorf("Prometheus server reported unexpected error code: %d", resp.StatusCode) } // read the response into memory format := expfmt.ResponseFormat(resp.Header) r := &reader.LimitedReader{R: resp.Body, N: c.maxBytes} decoder := expfmt.NewDecoder(r, format) for { family := &clientmodel.MetricFamily{} families = append(families, family) if err := decoder.Decode(family); err != nil { if err != io.EOF { logger.Log(c.logger, logger.Error, "msg", "error reading body", "err", err) } break } } return nil }) if err != nil { return nil, err } return families, nil } func (c *Client) Send(ctx context.Context, req *http.Request, families []*clientmodel.MetricFamily) error { buf := &bytes.Buffer{} if err := Write(buf, families); err != nil { return err } if req.Header == nil { req.Header = make(http.Header) } req.Header.Set("Content-Type", string(expfmt.FmtProtoDelim)) req.Header.Set("Content-Encoding", "snappy") req.Body = ioutil.NopCloser(buf) ctx, cancel := context.WithTimeout(ctx, c.timeout) req = req.WithContext(ctx) defer cancel() logger.Log(c.logger, logger.Debug, "msg", "start to send") return withCancel(ctx, c.client, req, func(resp *http.Response) error { defer func() { if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { logger.Log(c.logger, logger.Error, "msg", "error copying body", "err", err) } if err := resp.Body.Close(); err != nil { logger.Log(c.logger, logger.Error, "msg", "error closing body", "err", err) } }() logger.Log(c.logger, logger.Debug, "msg", resp.StatusCode) switch resp.StatusCode { case http.StatusOK: gaugeRequestSend.WithLabelValues(c.metricsName, "200").Inc() case http.StatusUnauthorized: gaugeRequestSend.WithLabelValues(c.metricsName, "401").Inc() return fmt.Errorf("gateway server requires authentication: %s", resp.Request.URL) case http.StatusForbidden: gaugeRequestSend.WithLabelValues(c.metricsName, "403").Inc() return fmt.Errorf("gateway server forbidden: %s", resp.Request.URL) case http.StatusBadRequest: gaugeRequestSend.WithLabelValues(c.metricsName, "400").Inc() logger.Log(c.logger, logger.Debug, "msg", resp.Body) return fmt.Errorf("gateway server bad request: %s", resp.Request.URL) default: gaugeRequestSend.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() body, _ := ioutil.ReadAll(resp.Body) if len(body) > 1024 { body = body[:1024] } return fmt.Errorf("gateway server reported unexpected error code: %d: %s", resp.StatusCode, string(body)) } return nil }) } func Read(r io.Reader) ([]*clientmodel.MetricFamily, error) { decompress := snappy.NewReader(r) decoder := expfmt.NewDecoder(decompress, expfmt.FmtProtoDelim) families := make([]*clientmodel.MetricFamily, 0, 100) for { family := &clientmodel.MetricFamily{} if err := decoder.Decode(family); err != nil { if err == io.EOF { break } return nil, err } families = append(families, family) } return families, nil } func Write(w io.Writer, families []*clientmodel.MetricFamily) error { // output the filtered set compress := snappy.NewBufferedWriter(w) encoder := expfmt.NewEncoder(compress, expfmt.FmtProtoDelim) for _, family := range families { if family == nil { continue } if err := encoder.Encode(family); err != nil { return err } } if err := compress.Flush(); err != nil { return err } return nil } func withCancel(ctx context.Context, client *http.Client, req *http.Request, fn func(*http.Response) error) error { resp, err := client.Do(req) defer func() error { if resp != nil { if err = resp.Body.Close(); err != nil { return err } } return nil }() if err != nil { return err } done := make(chan struct{}) go func() { err = fn(resp) close(done) }() select { case <-ctx.Done(): closeErr := resp.Body.Close() // wait for the goroutine to finish. <-done // err is propagated from the goroutine above // if it is nil, we bubble up the close err, if any. if err == nil { err = closeErr } // if there is no close err, // we propagate the context context error. if err == nil { err = ctx.Err() } case <-done: // propagate the err from the spawned goroutine, if any. } return err } func MTLSTransport(logger log.Logger) (*http.Transport, error) { testMode := os.Getenv("UNIT_TEST") != "" caCertFile := "/tlscerts/ca/ca.crt" tlsKeyFile := "/tlscerts/certs/tls.key" tlsCrtFile := "/tlscerts/certs/tls.crt" if testMode { caCertFile = "./testdata/ca.crt" tlsKeyFile = "./testdata/tls.key" tlsCrtFile = "./testdata/tls.crt" } // Load Server CA cert caCert, err := ioutil.ReadFile(caCertFile) if err != nil { return nil, errors.Wrap(err, "failed to load server ca cert file") } // Load client cert signed by Client CA cert, err := tls.LoadX509KeyPair(tlsCrtFile, tlsKeyFile) if err != nil { return nil, errors.Wrap(err, "failed to load client ca cert") } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) // Setup HTTPS client tlsConfig := &tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, MinVersion: tls.VersionTLS12, } return &http.Transport{ Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, DisableKeepAlives: true, TLSClientConfig: tlsConfig, }, nil } func DefaultTransport(logger log.Logger, isTLS bool) *http.Transport { return &http.Transport{ Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, DisableKeepAlives: true, } } func convertToTimeseries(p *PartitionedMetrics, now time.Time) ([]prompb.TimeSeries, error) { var timeseries []prompb.TimeSeries timestamp := now.UnixNano() / int64(time.Millisecond) for _, f := range p.Families { for _, m := range f.Metric { var ts prompb.TimeSeries labelpairs := []prompb.Label{{ Name: nameLabelName, Value: *f.Name, }} for _, l := range m.Label { labelpairs = append(labelpairs, prompb.Label{ Name: *l.Name, Value: *l.Value, }) } s := prompb.Sample{ Timestamp: *m.TimestampMs, } // If the sample is in the future, overwrite it. if *m.TimestampMs > timestamp { s.Timestamp = timestamp } switch *f.Type { case clientmodel.MetricType_COUNTER: s.Value = *m.Counter.Value case clientmodel.MetricType_GAUGE: s.Value = *m.Gauge.Value case clientmodel.MetricType_UNTYPED: s.Value = *m.Untyped.Value default: return nil, fmt.Errorf("metric type %s not supported", f.Type.String()) } ts.Labels = append(ts.Labels, labelpairs...) ts.Samples = append(ts.Samples, s) timeseries = append(timeseries, ts) } } return timeseries, nil } // RemoteWrite is used to push the metrics to remote thanos endpoint func (c *Client) RemoteWrite(ctx context.Context, req *http.Request, families []*clientmodel.MetricFamily, interval time.Duration) error { timeseries, err := convertToTimeseries(&PartitionedMetrics{Families: families}, time.Now()) if err != nil { msg := "failed to convert timeseries" logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) return fmt.Errorf(msg) } if len(timeseries) == 0 { logger.Log(c.logger, logger.Info, "msg", "no time series to forward to receive endpoint") return nil } logger.Log(c.logger, logger.Debug, "timeseries number", len(timeseries)) for i := 0; i < len(timeseries); i += maxSeriesLength { length := len(timeseries) if i+maxSeriesLength < length { length = i + maxSeriesLength } subTimeseries := timeseries[i:length] wreq := &prompb.WriteRequest{Timeseries: subTimeseries} data, err := proto.Marshal(wreq) if err != nil { msg := "failed to marshal proto" logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) return fmt.Errorf(msg) } compressed := snappy.Encode(nil, data) // retry RemoteWrite with exponential back-off b := backoff.NewExponentialBackOff() // Do not set max elapsed time more than half the scrape interval halfInterval := len(timeseries) * 2 / maxSeriesLength if halfInterval < 2 { halfInterval = 2 } b.MaxElapsedTime = interval / time.Duration(halfInterval) retryable := func() error { return c.sendRequest(req.URL.String(), compressed) } notify := func(err error, t time.Duration) { msg := fmt.Sprintf("error: %v happened at time: %v", err, t) logger.Log(c.logger, logger.Warn, "msg", msg) } err = backoff.RetryNotify(retryable, b, notify) if err != nil { return err } } msg := fmt.Sprintf("Metrics pushed successfully") logger.Log(c.logger, logger.Info, "msg", msg) return nil } func (c *Client) sendRequest(serverURL string, body []byte) error { req1, err := http.NewRequest(http.MethodPost, serverURL, bytes.NewBuffer(body)) if err != nil { msg := "failed to create forwarding request" logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) return fmt.Errorf(msg) } //req.Header.Add("THANOS-TENANT", tenantID) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() req1 = req1.WithContext(ctx) resp, err := c.client.Do(req1) if err != nil { msg := "failed to forward request" logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) return fmt.Errorf(msg) } if resp.StatusCode/100 != 2 { // surfacing upstreams error to our users too bodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Log(c.logger, logger.Warn, err) } bodyString := string(bodyBytes) msg := fmt.Sprintf("response status code is %s, response body is %s", resp.Status, bodyString) logger.Log(c.logger, logger.Warn, msg) if resp.StatusCode != http.StatusConflict { return fmt.Errorf(msg) } } return nil }
[ "\"UNIT_TEST\"" ]
[]
[ "UNIT_TEST" ]
[]
["UNIT_TEST"]
go
1
0
internal/microservices/profile/repository/profile_test.go
package repository import ( "database/sql/driver" "errors" "log" "os" "regexp" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/stretchr/testify/assert" "golang.org/x/crypto/bcrypt" "2021_2_LostPointer/internal/constants" "2021_2_LostPointer/internal/microservices/profile/proto" ) func TestUserSettingsStorage_GetSettings(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const ( avatar = "testAvatar" userID = 1 ) settings := &proto.UserSettings{ Email: "testEmail", Nickname: "testNickname", } expectedSettings := &proto.UserSettings{ Email: "testEmail", Nickname: "testNickname", SmallAvatar: os.Getenv("USERS_ROOT_PREFIX") + avatar + constants.UserAvatarExtension150px, BigAvatar: os.Getenv("USERS_ROOT_PREFIX") + avatar + constants.UserAvatarExtension500px, } tests := []struct { name string mock func() expected *proto.UserSettings expectedError bool }{ { name: "get settings success", mock: func() { row := mock.NewRows([]string{"email", "avatar", "name"}) row.AddRow(settings.Email, avatar, settings.Nickname) mock.ExpectQuery(regexp.QuoteMeta(`SELECT email, avatar, nickname FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expected: expectedSettings, }, { name: "can not get settings", mock: func() { row := mock.NewRows([]string{"email", "avatar", "name"}) mock.ExpectQuery(regexp.QuoteMeta(`SELECT email, avatar, nickname FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, { name: "query returns error", mock: func() { row := mock.NewRows([]string{"email", "avatar", "name"}) row.AddRow(settings.Email, avatar, settings.Nickname) mock.ExpectQuery(regexp.QuoteMeta(`SELECT email, avatar, nickname FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnError(errors.New("error")) }, expectedError: true, }, { name: "scan returns error", mock: func() { const newArg = 1 row := mock.NewRows([]string{"email", "avatar", "name", "newArg"}) row.AddRow(settings.Email, avatar, settings.Nickname, newArg) mock.ExpectQuery(regexp.QuoteMeta(`SELECT email, avatar, nickname FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, { name: "row.Err() returns error", mock: func() { row := mock.NewRows([]string{"email", "avatar", "name"}).RowError(0, errors.New("error")) row.AddRow(settings.Email, avatar, settings.Nickname) mock.ExpectQuery(regexp.QuoteMeta(`SELECT email, avatar, nickname FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() result, err := repository.GetSettings(userID) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, currentTest.expected, result) } }) } } func TestUserSettingsStorage_UpdateEmail(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const userID = 1 tests := []struct { name string email string mock func() expectedError bool }{ { name: "update email success", email: "[email protected]", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET email=$1 WHERE id=$2`)).WithArgs(driver.Value("[email protected]"), driver.Value(userID)).WillReturnRows(row) }, }, { name: "update email with xss success", email: "<script>alert()</script>", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET email=$1 WHERE id=$2`)).WithArgs(driver.Value("alert()"), driver.Value(userID)).WillReturnRows(row) }, }, { name: "query returns error", email: "[email protected]", mock: func() { mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET email=$1 WHERE id=$2`)).WithArgs(driver.Value("[email protected]"), driver.Value(userID)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() err := repository.UpdateEmail(userID, currentTest.email) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } func TestUserSettingsStorage_UpdateNickname(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const userID = 1 tests := []struct { name string nickname string mock func() expectedError bool }{ { name: "update nickname success", nickname: "test", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET nickname=$1 WHERE id=$2`)).WithArgs(driver.Value("test"), driver.Value(userID)).WillReturnRows(row) }, }, { name: "update nickname with xss success", nickname: "<script>alert()</script>", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET nickname=$1 WHERE id=$2`)).WithArgs(driver.Value("alert()"), driver.Value(userID)).WillReturnRows(row) }, }, { name: "query returns error", nickname: "test", mock: func() { mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET nickname=$1 WHERE id=$2`)).WithArgs(driver.Value("test"), driver.Value(userID)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() err := repository.UpdateNickname(userID, currentTest.nickname) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } func TestUserSettingsStorage_UpdateAvatar(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const userID = 1 tests := []struct { name string filename string mock func() expectedError bool }{ { name: "update avatar success", filename: "test", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET avatar=$1 WHERE id=$2`)).WithArgs(driver.Value("test"), driver.Value(userID)).WillReturnRows(row) }, }, { name: "query returns error", filename: "test", mock: func() { mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET avatar=$1 WHERE id=$2`)).WithArgs(driver.Value("test"), driver.Value(userID)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() err := repository.UpdateAvatar(userID, currentTest.filename) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } func TestUserSettingsStorage_UpdatePassword(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const ( userID = 1 ) tests := []struct { name string password string mock func() expectedError bool }{ { name: "update password success", password: "test", mock: func() { row := mock.NewRows([]string{"success"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET password=$1, salt=$2 WHERE id=$3`)).WillReturnRows(row) }, }, { name: "query returns error", password: "test", mock: func() { mock.ExpectQuery(regexp.QuoteMeta(`UPDATE users SET password=$1, salt=$2 WHERE id=$3`)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() err := repository.UpdatePassword(userID, currentTest.password) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } func TestUserSettingsStorage_IsEmailUnique(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const ( email = "testEmail" emailArgument = "testemail" ) tests := []struct { name string mock func() expected bool expectedError bool }{ { name: "email is unique", mock: func() { row := mock.NewRows([]string{"id"}) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(email)=$1`)).WithArgs(driver.Value(emailArgument)).WillReturnRows(row) }, expected: true, }, { name: "email is not unique", mock: func() { row := mock.NewRows([]string{"id"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(email)=$1`)).WithArgs(driver.Value(emailArgument)).WillReturnRows(row) }, }, { name: "query returns error", mock: func() { row := mock.NewRows([]string{"id"}) row.AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(email)=$1`)).WithArgs(driver.Value(emailArgument)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() result, err := repository.IsEmailUnique(email) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, currentTest.expected, result) } }) } } func TestUserSettingsStorage_IsNicknameUnique(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const ( nickname = "testEmail" nicknameArgument = "testemail" ) tests := []struct { name string mock func() expected bool expectedError bool }{ { name: "nickname is unique", mock: func() { row := mock.NewRows([]string{"id"}) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(nickname)=$1`)).WithArgs(driver.Value(nicknameArgument)).WillReturnRows(row) }, expected: true, }, { name: "nickname is not unique", mock: func() { row := mock.NewRows([]string{"id"}).AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(nickname)=$1`)).WithArgs(driver.Value(nicknameArgument)).WillReturnRows(row) }, }, { name: "query returns error", mock: func() { row := mock.NewRows([]string{"id"}) row.AddRow(1) mock.ExpectQuery(regexp.QuoteMeta(`SELECT id FROM users WHERE lower(nickname)=$1`)).WithArgs(driver.Value(nicknameArgument)).WillReturnError(errors.New("error")) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() result, err := repository.IsNicknameUnique(nickname) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, currentTest.expected, result) } }) } } func TestUserSettingsStorage_CheckPasswordByUserID(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) return } repository := NewUserSettingsStorage(db) const ( userID = 1 oldPassword = "testPassword" salt = "testSalt" ) password, _ := bcrypt.GenerateFromPassword([]byte(oldPassword+salt), bcrypt.DefaultCost) tests := []struct { name string mock func() expected bool expectedError bool }{ { name: "password exists", mock: func() { row := mock.NewRows([]string{"password", "salt"}).AddRow(password, salt) mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expected: true, }, { name: "can not find password in database", mock: func() { row := mock.NewRows([]string{"password", "salt"}) mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, }, { name: "query returns error", mock: func() { mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnError(errors.New("error")) }, expectedError: true, }, { name: "wrong credentials", mock: func() { row := mock.NewRows([]string{"password", "salt"}).AddRow("wrongCredentials", salt) mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, { name: "scan returns error", mock: func() { const newArg = 1 row := mock.NewRows([]string{"password", "salt", "newArg"}).AddRow(password, salt, newArg) mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, { name: "row.Err() returns error", mock: func() { const newArg = 1 row := mock.NewRows([]string{"password", "salt", "newArg"}).AddRow(password, salt, newArg).RowError(0, errors.New("error")) mock.ExpectQuery(regexp.QuoteMeta(`SELECT password, salt FROM users WHERE id=$1`)).WithArgs(driver.Value(userID)).WillReturnRows(row) }, expectedError: true, }, } for _, test := range tests { currentTest := test t.Run(currentTest.name, func(t *testing.T) { currentTest.mock() result, err := repository.CheckPasswordByUserID(userID, oldPassword) if currentTest.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, currentTest.expected, result) } }) } }
[ "\"USERS_ROOT_PREFIX\"", "\"USERS_ROOT_PREFIX\"" ]
[]
[ "USERS_ROOT_PREFIX" ]
[]
["USERS_ROOT_PREFIX"]
go
1
0
django7h/settings.py
""" Django settings for django7h project. Generated by 'django-admin startproject' using Django 3.2.8. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path from django.conf import settings from django.conf.urls.static import static import os # Build paths inside the project like this: BASE_DIR / 'subdir'. #BASE_DIR = Path(__file__).resolve().parent.parent # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['127.0.0.1', 'django7h-env.eba-3mmnp6am.eu-central-1.elasticbeanstalk.com'] # '127.0.0.1:8000', # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'base.apps.BaseConfig', 'rest_framework', "corsheaders", 'storages', ] ##### this is needed to use our custom user model for authentication, not django default model AUTH_USER_MODEL = 'base.User' MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', "corsheaders.middleware.CorsMiddleware", "django.middleware.common.CommonMiddleware", ] ROOT_URLCONF = 'django7h.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'django7h.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'aws_django7h_db', 'USER':'aws_rds_user', 'PASSWORD':'postgres', 'HOST':os.getenv('RDS_DATABASE_DJANGO_DISC'), 'PORT':'5432' } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ #STATIC_URL = '/static/' MEDIA_URL = '/images/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static') ] MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images') # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' CORS_ALLOW_ALL_ORIGINS = True #S3 Buckets config from storages.backends.s3boto3 import S3Boto3Storage class MediaStorage(S3Boto3Storage): location = 'media' default_acl = 'public-read' file_overwrite = False class StaticStorage(S3Boto3Storage): location = 'static' default_acl = 'public-read' AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID_1') AWS_SECERET_ACCESS_KEY = os.getenv('AWS_SECERET_ACCESS_KEY_1') AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME_1') AWS_S3_REGION_NAME="eu-central-1" AWS_S3_CUSTOM_DOMAIN = 'test1bucket1pm.s3.eu-central-1.amazonaws.com' AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=31536000'} AWS_LOCATION = 'static' STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{AWS_LOCATION}/' STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' PUBLIC_MEDIA_LOCATION = 'media' MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/' DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
[]
[]
[ "AWS_STORAGE_BUCKET_NAME_1", "RDS_DATABASE_DJANGO_DISC", "SECRET_KEY", "AWS_SECERET_ACCESS_KEY_1", "AWS_ACCESS_KEY_ID_1" ]
[]
["AWS_STORAGE_BUCKET_NAME_1", "RDS_DATABASE_DJANGO_DISC", "SECRET_KEY", "AWS_SECERET_ACCESS_KEY_1", "AWS_ACCESS_KEY_ID_1"]
python
5
0
pkg/provider/gke/gke.go
// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gke import ( "context" "encoding/base64" "encoding/json" "fmt" "io/ioutil" "log" "os" "regexp" "strings" gke "cloud.google.com/go/container/apiv1" "github.com/pkg/errors" k8sProvider "github.com/prometheus/test-infra/pkg/provider/k8s" "github.com/prometheus/test-infra/pkg/provider" containerpb "google.golang.org/genproto/googleapis/container/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "gopkg.in/alecthomas/kingpin.v2" yamlGo "gopkg.in/yaml.v2" "google.golang.org/api/option" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // New is the GKE constructor. func New() *GKE { return &GKE{ DeploymentVars: make(map[string]string), } } type Resource = provider.Resource // GKE holds the fields used to generate an API request. type GKE struct { // The auth used to authenticate the cli. // Can be a file path or an env variable that includes the json data. Auth string // The project id for all requests. ProjectID string // The gke client used when performing GKE requests. clientGKE *gke.ClusterManagerClient // The k8s provider used when we work with the manifest files. k8sProvider *k8sProvider.K8s // DeploymentFiles files provided from the cli. DeploymentFiles []string // Variables to substitute in the DeploymentFiles. // These are also used when the command requires some variables that are not provided by the deployment file. DeploymentVars map[string]string // Content bytes after parsing the template variables, grouped by filename. gkeResources []Resource // K8s resource.runtime objects after parsing the template variables, grouped by filename. k8sResources []k8sProvider.Resource ctx context.Context } // NewGKEClient sets the GKE client used when performing GKE requests. func (c *GKE) NewGKEClient(*kingpin.ParseContext) error { // Set the auth env variable needed to the gke client. if c.Auth != "" { } else if c.Auth = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); c.Auth == "" { return errors.Errorf("no auth provided! Need to either set the auth flag or the GOOGLE_APPLICATION_CREDENTIALS env variable") } // When the auth variable points to a file // put the file content in the variable. if content, err := ioutil.ReadFile(c.Auth); err == nil { c.Auth = string(content) } // Check if auth data is base64 encoded and decode it. encoded, err := regexp.MatchString("^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$", c.Auth) if err != nil { return err } if encoded { auth, err := base64.StdEncoding.DecodeString(c.Auth) if err != nil { return errors.Wrap(err, "could not decode auth data") } c.Auth = string(auth) } // Create temporary file to store the credentials. saFile, err := ioutil.TempFile("", "service-account") if err != nil { return errors.Wrap(err, "could not create temp file") } defer saFile.Close() if _, err := saFile.Write([]byte(c.Auth)); err != nil { return errors.Wrap(err, "could not write to temp file") } // Set the auth env variable needed to the k8s client. // The client looks for this special variable name and it is the only way to set the auth for now. // TODO: Remove when the client supports an auth config option in NewDefaultClientConfig. os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", saFile.Name()) opts := option.WithCredentialsJSON([]byte(c.Auth)) cl, err := gke.NewClusterManagerClient(context.Background(), opts) if err != nil { return errors.Wrap(err, "could not create the gke client") } c.clientGKE = cl c.ctx = context.Background() return nil } // GKEDeploymentsParse parses the cluster/nodepool deployment files and saves the result as bytes grouped by the filename. // Any variables passed to the cli will be replaced in the resources files following the golang text template format. func (c *GKE) GKEDeploymentsParse(*kingpin.ParseContext) error { c.setProjectID() deploymentResource, err := provider.DeploymentsParse(c.DeploymentFiles, c.DeploymentVars) if err != nil { log.Fatalf("Couldn't parse deployment files: %v", err) } c.gkeResources = deploymentResource return nil } // K8SDeploymentsParse parses the k8s objects deployment files and saves the result as k8s objects grouped by the filename. // Any variables passed to the cli will be replaced in the resources files following the golang text template format. func (c *GKE) K8SDeploymentsParse(*kingpin.ParseContext) error { c.setProjectID() deploymentResource, err := provider.DeploymentsParse(c.DeploymentFiles, c.DeploymentVars) if err != nil { log.Fatalf("Couldn't parse deployment files: %v", err) } for _, deployment := range deploymentResource { decode := scheme.Codecs.UniversalDeserializer().Decode k8sObjects := make([]runtime.Object, 0) for _, text := range strings.Split(string(deployment.Content), provider.Separator) { text = strings.TrimSpace(text) if len(text) == 0 { continue } resource, _, err := decode([]byte(text), nil, nil) if err != nil { return errors.Wrapf(err, "decoding the resource file:%v, section:%v...", deployment.FileName, text[:100]) } if resource == nil { continue } k8sObjects = append(k8sObjects, resource) } if len(k8sObjects) > 0 { c.k8sResources = append(c.k8sResources, k8sProvider.Resource{FileName: deployment.FileName, Objects: k8sObjects}) } } return nil } // setProjectID either from the cli arg or read it from the auth data. func (c *GKE) setProjectID() { if v, ok := c.DeploymentVars["PROJECT_ID"]; !ok || v == "" { d := make(map[string]interface{}) if err := json.Unmarshal([]byte(c.Auth), &d); err != nil { log.Fatalf("Couldn't parse auth file: %v", err) } v, ok := d["project_id"].(string) if !ok { log.Fatal("Couldn't get project id from the auth file") } c.DeploymentVars["PROJECT_ID"] = v } } // ClusterCreate create a new cluster or applies changes to an existing cluster. func (c *GKE) ClusterCreate(*kingpin.ParseContext) error { req := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, req); err != nil { log.Fatalf("Error parsing the cluster deployment file %s:%v", deployment.FileName, err) } log.Printf("Cluster create request: name:'%v', project `%s`,zone `%s`", req.Cluster.Name, req.ProjectId, req.Zone) _, err := c.clientGKE.CreateCluster(c.ctx, req) if err != nil { log.Fatalf("Couldn't create cluster '%v', file:%v ,err: %v", req.Cluster.Name, deployment.FileName, err) } err = provider.RetryUntilTrue( fmt.Sprintf("creating cluster:%v", req.Cluster.Name), provider.GlobalRetryCount, func() (bool, error) { return c.clusterRunning(req.Zone, req.ProjectId, req.Cluster.Name) }) if err != nil { log.Fatalf("creating cluster err:%v", err) } } return nil } // ClusterDelete deletes a k8s cluster. func (c *GKE) ClusterDelete(*kingpin.ParseContext) error { // Use CreateClusterRequest struct to pass the UnmarshalStrict validation and // than use the result to create the DeleteClusterRequest reqC := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, reqC); err != nil { log.Fatalf("Error parsing the cluster deployment file %s:%v", deployment.FileName, err) } reqD := &containerpb.DeleteClusterRequest{ ProjectId: reqC.ProjectId, Zone: reqC.Zone, ClusterId: reqC.Cluster.Name, } log.Printf("Removing cluster '%v', project '%v', zone '%v'", reqD.ClusterId, reqD.ProjectId, reqD.Zone) err := provider.RetryUntilTrue( fmt.Sprintf("deleting cluster:%v", reqD.ClusterId), provider.GlobalRetryCount, func() (bool, error) { return c.clusterDeleted(reqD) }) if err != nil { log.Fatalf("removing cluster err:%v", err) } } return nil } // clusterDeleted checks whether a cluster has been deleted. func (c *GKE) clusterDeleted(req *containerpb.DeleteClusterRequest) (bool, error) { rep, err := c.clientGKE.DeleteCluster(c.ctx, req) if err != nil { st, ok := status.FromError(err) if !ok { return false, fmt.Errorf("unknown reply status error %v", err) } if st.Code() == codes.NotFound { return true, nil } if st.Code() == codes.FailedPrecondition { log.Printf("Cluster in 'FailedPrecondition' state '%s'", err) return false, nil } return false, errors.Wrapf(err, "deleting cluster:%v", req.ClusterId) } log.Printf("cluster status: `%v`", rep.Status) return false, nil } // clusterRunning checks whether a cluster is in a running state. func (c *GKE) clusterRunning(zone, projectID, clusterID string) (bool, error) { req := &containerpb.GetClusterRequest{ ProjectId: projectID, Zone: zone, ClusterId: clusterID, } cluster, err := c.clientGKE.GetCluster(c.ctx, req) if err != nil { // We don't consider none existing cluster error a failure. So don't return an error here. if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { return false, nil } return false, fmt.Errorf("Couldn't get cluster status:%v", err) } if cluster.Status == containerpb.Cluster_ERROR || cluster.Status == containerpb.Cluster_STATUS_UNSPECIFIED || cluster.Status == containerpb.Cluster_STOPPING { return false, fmt.Errorf("Cluster not in a status to become ready - %s", cluster.Status) } if cluster.Status == containerpb.Cluster_RUNNING { return true, nil } log.Printf("Cluster '%v' status:%v , %v", projectID, cluster.Status, cluster.StatusMessage) return false, nil } // NodePoolCreate creates a new k8s node-pool in an existing cluster. func (c *GKE) NodePoolCreate(*kingpin.ParseContext) error { reqC := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, reqC); err != nil { log.Fatalf("Error parsing the cluster deployment file %s:%v", deployment.FileName, err) } for _, node := range reqC.Cluster.NodePools { reqN := &containerpb.CreateNodePoolRequest{ ProjectId: reqC.ProjectId, Zone: reqC.Zone, ClusterId: reqC.Cluster.Name, NodePool: node, } log.Printf("Cluster nodepool create request: cluster '%v', nodepool '%v' , project `%s`,zone `%s`", reqN.ClusterId, reqN.NodePool.Name, reqN.ProjectId, reqN.Zone) err := provider.RetryUntilTrue( fmt.Sprintf("nodepool creation:%v", reqN.NodePool.Name), provider.GlobalRetryCount, func() (bool, error) { return c.nodePoolCreated(reqN) }) if err != nil { log.Fatalf("Couldn't create cluster nodepool '%v', file:%v ,err: %v", node.Name, deployment.FileName, err) } err = provider.RetryUntilTrue( fmt.Sprintf("checking nodepool running status for:%v", reqN.NodePool.Name), provider.GlobalRetryCount, func() (bool, error) { return c.nodePoolRunning(reqN.Zone, reqN.ProjectId, reqN.ClusterId, reqN.NodePool.Name) }) if err != nil { log.Fatalf("Couldn't create cluster nodepool '%v', file:%v ,err: %v", node.Name, deployment.FileName, err) } } } return nil } // nodePoolCreated checks if there is any ongoing NodePool operation on the cluster // when creating a NodePool. func (c *GKE) nodePoolCreated(req *containerpb.CreateNodePoolRequest) (bool, error) { rep, err := c.clientGKE.CreateNodePool(c.ctx, req) if err != nil { st, ok := status.FromError(err) if !ok { return false, fmt.Errorf("unknown reply status error %v", err) } if st.Code() == codes.FailedPrecondition { // GKE cannot have two simultaneous nodepool operations running on it // Waiting for any ongoing operation to complete before starting new one log.Printf("Cluster in 'FailedPrecondition' state '%s'", err) return false, nil } return false, err } log.Printf("cluster node pool status: `%v`", rep.Status) return true, nil } // NodePoolDelete deletes a new k8s node-pool in an existing cluster. func (c *GKE) NodePoolDelete(*kingpin.ParseContext) error { // Use CreateNodePoolRequest struct to pass the UnmarshalStrict validation and // than use the result to create the DeleteNodePoolRequest reqC := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, reqC); err != nil { log.Fatalf("Error parsing the cluster deployment file %s:%v", deployment.FileName, err) } for _, node := range reqC.Cluster.NodePools { reqD := &containerpb.DeleteNodePoolRequest{ ProjectId: reqC.ProjectId, Zone: reqC.Zone, ClusterId: reqC.Cluster.Name, NodePoolId: node.Name, } log.Printf("Removing cluster node pool: `%v`, cluster '%v', project '%v', zone '%v'", reqD.NodePoolId, reqD.ClusterId, reqD.ProjectId, reqD.Zone) err := provider.RetryUntilTrue( fmt.Sprintf("deleting nodepool:%v", reqD.NodePoolId), provider.GlobalRetryCount, func() (bool, error) { return c.nodePoolDeleted(reqD) }) if err != nil { log.Fatalf("Couldn't delete cluster nodepool '%v', file:%v ,err: %v", node.Name, deployment.FileName, err) } } } return nil } // nodePoolDeleted checks whether a nodepool has been deleted. func (c *GKE) nodePoolDeleted(req *containerpb.DeleteNodePoolRequest) (bool, error) { rep, err := c.clientGKE.DeleteNodePool(c.ctx, req) if err != nil { st, ok := status.FromError(err) if !ok { return false, fmt.Errorf("unknown reply status error %v", err) } if st.Code() == codes.NotFound { return true, nil } if st.Code() == codes.FailedPrecondition { // GKE cannot have two simultaneous nodepool operations running on it // Waiting for any ongoing operation to complete before starting new one log.Printf("Cluster in 'FailedPrecondition' state '%s'", err) return false, nil } return false, err } log.Printf("cluster node pool status: `%v`", rep.Status) return false, nil } // nodePoolRunning checks whether a nodepool has been created and is running. func (c *GKE) nodePoolRunning(zone, projectID, clusterID, poolName string) (bool, error) { req := &containerpb.GetNodePoolRequest{ ProjectId: projectID, Zone: zone, ClusterId: clusterID, NodePoolId: poolName, } rep, err := c.clientGKE.GetNodePool(c.ctx, req) if err != nil { // We don't consider none existing cluster node pool a failure. So don't return an error here. if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { return false, nil } return false, fmt.Errorf("Couldn't get node pool status:%v", err) } if rep.Status == containerpb.NodePool_RUNNING { return true, nil } if rep.Status == containerpb.NodePool_ERROR || rep.Status == containerpb.NodePool_RUNNING_WITH_ERROR || rep.Status == containerpb.NodePool_STOPPING || rep.Status == containerpb.NodePool_STATUS_UNSPECIFIED { log.Fatalf("NodePool %s not in a status to become ready: %v", rep.Name, rep.StatusMessage) } log.Printf("Current cluster node pool '%v' status:%v , %v", rep.Name, rep.Status, rep.StatusMessage) return false, nil } // AllNodepoolsRunning returns an error if at least one node pool is not running. func (c *GKE) AllNodepoolsRunning(*kingpin.ParseContext) error { reqC := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, reqC); err != nil { return errors.Errorf("error parsing the cluster deployment file %s:%v", deployment.FileName, err) } for _, node := range reqC.Cluster.NodePools { isRunning, err := c.nodePoolRunning(reqC.Zone, reqC.ProjectId, reqC.Cluster.Name, node.Name) if err != nil { log.Fatalf("error fetching nodePool info") } if !isRunning { log.Fatalf("nodepool not running name: %v", node.Name) } } } return nil } // AllNodepoolsDeleted returns an error if at least one nodepool is not deleted. func (c *GKE) AllNodepoolsDeleted(*kingpin.ParseContext) error { reqC := &containerpb.CreateClusterRequest{} for _, deployment := range c.gkeResources { if err := yamlGo.UnmarshalStrict(deployment.Content, reqC); err != nil { return errors.Errorf("error parsing the cluster deployment file %s:%v", deployment.FileName, err) } for _, node := range reqC.Cluster.NodePools { isRunning, err := c.nodePoolRunning(reqC.Zone, reqC.ProjectId, reqC.Cluster.Name, node.Name) if err != nil { log.Fatalf("error fetching nodePool info") } if isRunning { log.Fatalf("nodepool running name: %v", node.Name) } } } return nil } // NewK8sProvider sets the k8s provider used for deploying k8s manifests. func (c *GKE) NewK8sProvider(*kingpin.ParseContext) error { projectID, ok := c.DeploymentVars["PROJECT_ID"] if !ok { return fmt.Errorf("missing required PROJECT_ID variable") } zone, ok := c.DeploymentVars["ZONE"] if !ok { return fmt.Errorf("missing required ZONE variable") } clusterID, ok := c.DeploymentVars["CLUSTER_NAME"] if !ok { return fmt.Errorf("missing required CLUSTER_NAME variable") } // Get the authentication certificate for the cluster using the GKE client. req := &containerpb.GetClusterRequest{ ProjectId: projectID, Zone: zone, ClusterId: clusterID, } rep, err := c.clientGKE.GetCluster(c.ctx, req) if err != nil { log.Fatalf("failed to get cluster details: %v", err) } // The master auth retrieved from GCP it is base64 encoded so it must be decoded first. caCert, err := base64.StdEncoding.DecodeString(rep.MasterAuth.GetClusterCaCertificate()) if err != nil { log.Fatalf("failed to decode certificate: %v", err.Error()) } cluster := clientcmdapi.NewCluster() cluster.CertificateAuthorityData = []byte(caCert) cluster.Server = fmt.Sprintf("https://%v", rep.Endpoint) context := clientcmdapi.NewContext() context.Cluster = rep.Name context.AuthInfo = rep.Zone authInfo := clientcmdapi.NewAuthInfo() authInfo.AuthProvider = &clientcmdapi.AuthProviderConfig{ Name: "gcp", Config: map[string]string{ "cmd-args": "config config-helper --format=json", "expiry-key": "{.credential.token_expiry}", "token-key": "{.credential.access_token}", }, } config := clientcmdapi.NewConfig() config.Clusters[rep.Name] = cluster config.Contexts[rep.Zone] = context config.AuthInfos[rep.Zone] = authInfo config.CurrentContext = rep.Zone c.k8sProvider, err = k8sProvider.New(c.ctx, config) if err != nil { log.Fatal("k8s provider error", err) } return nil } // ResourceApply calls k8s.ResourceApply to apply the k8s objects in the manifest files. func (c *GKE) ResourceApply(*kingpin.ParseContext) error { if err := c.k8sProvider.ResourceApply(c.k8sResources); err != nil { log.Fatal("error while applying a resource err:", err) } return nil } // ResourceDelete calls k8s.ResourceDelete to apply the k8s objects in the manifest files. func (c *GKE) ResourceDelete(*kingpin.ParseContext) error { if err := c.k8sProvider.ResourceDelete(c.k8sResources); err != nil { log.Fatal("error while deleting objects from a manifest file err:", err) } return nil }
[ "\"GOOGLE_APPLICATION_CREDENTIALS\"" ]
[]
[ "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_APPLICATION_CREDENTIALS"]
go
1
0
mysql_test.go
package idbenchmark_test import ( "database/sql" "log" "os" "testing" _ "github.com/go-sql-driver/mysql" ) const ( m1 = "INSERT INTO m1 VALUES (NULL)" m2 = "REPLACE INTO m2 VALUES (NULL)" m3 = "UPDATE m3 SET id=LAST_INSERT_ID(id+1)" m4 = "UPDATE m3 SET id=LAST_INSERT_ID(id+1) LIMIT 1" i1 = "INSERT INTO i1 VALUES (NULL)" i2 = "REPLACE INTO i2 VALUES (NULL)" i3 = "UPDATE i3 SET id=LAST_INSERT_ID(id+1)" i4 = "UPDATE i3 SET id=LAST_INSERT_ID(id+1) LIMIT 1" ) var mysqlDSN string = "root:@tcp(127.0.0.1:3306)/idbenchmark" func init() { idbenchmarkDSN := os.Getenv("idbenchmark_DSN") if idbenchmarkDSN != "" { mysqlDSN = idbenchmarkDSN } } func mysqlConnect() (db *sql.DB, err error) { db, err = sql.Open("mysql", mysqlDSN) if err != nil { log.Println(err) return nil, err } return db, nil } func runMysql(b *testing.B, db *sql.DB, s string) { stmt, err := db.Prepare(s) if err != nil { log.Printf("Prepare error: %v: %s", err, s) return } defer stmt.Close() runMysqlStmt(b, db, s, stmt) } func runMysqlStmt(b *testing.B, db *sql.DB, s string, stmt *sql.Stmt) { var res sql.Result var err error var id int64 for n := 0; n < b.N; n++ { res, err = stmt.Exec() if err != nil { log.Printf("Exec error: %v: %d: %s", err, n, s) break } id, err = res.LastInsertId() if err != nil { log.Printf("LastInsertId error: %v", err) break } if id == 0 { log.Printf("id=0") break } } } func BenchmarkMysqlInsert(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, m1) b.StopTimer() } func BenchmarkMysqlReplace(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, m2) b.StopTimer() } func BenchmarkMysqlUpdate(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, m3) b.StopTimer() } func BenchmarkMysqlUpdateLimit1(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, m4) b.StopTimer() } func BenchmarkInnoDBInsert(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, i1) b.StopTimer() } func BenchmarkInnoDBReplace(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, i2) b.StopTimer() } func BenchmarkInnoDBUpdate(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, i3) b.StopTimer() } func BenchmarkInnoDBUpdateLimit1(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() runMysql(b, db, i4) b.StopTimer() } func BenchmarkMysqlInsertParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, m1) } }) b.StopTimer() } func BenchmarkMysqlReplaceParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, m2) } }) b.StopTimer() } func BenchmarkMysqlUpdateParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, m3) } }) b.StopTimer() } func BenchmarkMysqlUpdateLimit1Parallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, m4) } }) b.StopTimer() } func BenchmarkInnoDBInsertParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, i1) } }) b.StopTimer() } func BenchmarkInnoDBReplaceParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, i2) } }) b.StopTimer() } func BenchmarkInnoDBUpdateParallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, i3) } }) b.StopTimer() } func BenchmarkInnoDBUpdateLimit1Parallel(b *testing.B) { db, err := mysqlConnect() if err != nil { return } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { runMysql(b, db, i4) } }) b.StopTimer() }
[ "\"idbenchmark_DSN\"" ]
[]
[ "idbenchmark_DSN" ]
[]
["idbenchmark_DSN"]
go
1
0
DQM/Integration/scripts/harvesting_tools/cmsHarvester.py
#!/usr/bin/env python ########################################################################### ## File : cmsHarvest.py ## Authors : Jeroen Hegeman ([email protected]) ## Niklas Pietsch ([email protected]) ## Franseco Costanza ([email protected]) ## Last change: 20100308 ## ## Purpose : Main program to run all kinds of harvesting. ## For more information please refer to the CMS Twiki url ## mentioned just below here. ########################################################################### """Main program to run all kinds of harvesting. These are the basic kinds of harvesting implemented (contact me if your favourite is missing): - RelVal : Run for release validation samples. Makes heavy use of MC truth information. - RelValFS: FastSim RelVal. - MC : Run for MC samples. - DQMOffline : Run for real data (could also be run for MC). For the mappings of these harvesting types to sequence names please see the setup_harvesting_info() and option_handler_list_types() methods. """ from __future__ import print_function ########################################################################### from builtins import range __version__ = "3.8.2p1" # (version jump to match release) __author__ = "Jeroen Hegeman ([email protected])," \ "Niklas Pietsch ([email protected])" twiki_url = "https://twiki.cern.ch/twiki/bin/view/CMS/CmsHarvester" ########################################################################### ########################################################################### ## TODO list ## ## !!! Some code refactoring is in order. A lot of the code that loads ## and builds dataset and run lists is duplicated. !!! ## ## - SPECIAL (future): ## After discussing all these harvesting issues yet again with Luca, ## it looks like we'll need something special to handle harvesting ## for (collisions) data in reprocessing. Stuff with a special DBS ## instance in which `rolling' reports of reprocessed datasets is ## publised. In this case we will have to check (w.r.t. the parent ## dataset) how much of a given run is ready, and submit once we're ## satisfied (let's say 90%). ## ## - We could get rid of most of the `and dataset.status = VALID' ## pieces in the DBS queries. ## - Change to a more efficient grid scheduler. ## - Implement incremental harvesting. Requires some changes to the ## book keeping to store the harvested number of events for each ## run. Also requires some changes to the dataset checking to see if ## additional statistics have become available. ## - Emphasize the warnings in case we're running in force ## mode. Otherwise they may get lost a bit in the output. ## - Fix the creation of the CASTOR dirs. The current approach works ## but is a) too complicated and b) too inefficient. ## - Fully implement all harvesting types. ## --> Discuss with Andreas what exactly should be in there. And be ## careful with the run numbers! ## - Add the (second-step) harvesting config file to the (first-step) ## ME extraction job to make sure it does not get lost. ## - Improve sanity checks on harvesting type vs. data type. ## - Implement reference histograms. ## 1) User-specified reference dataset. ## 2) Educated guess based on dataset name. ## 3) References from GlobalTag. ## 4) No reference at all. ## - Is this options.evt_type used anywhere? ## - Combine all these dbs_resolve_xxx into a single call to DBS(?). ## - Implement CRAB server use? ## - Add implementation of email address of user. (Only necessary for ## CRAB server.) ########################################################################### import os import sys import commands import re import logging import optparse import datetime import copy from inspect import getargspec from random import choice import six # These we need to communicate with DBS global DBSAPI from DBSAPI.dbsApi import DbsApi import DBSAPI.dbsException import DBSAPI.dbsApiException from functools import reduce # and these we need to parse the DBS output. global xml global SAXParseException import xml.sax from xml.sax import SAXParseException import Configuration.PyReleaseValidation from Configuration.PyReleaseValidation.ConfigBuilder import \ ConfigBuilder, defaultOptions # from Configuration.PyReleaseValidation.cmsDriverOptions import options, python_config_filename #import FWCore.ParameterSet.Config as cms # Debugging stuff. import pdb try: import debug_hook except ImportError: pass ########################################################################### ## Helper class: Usage exception. ########################################################################### class Usage(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) # End of Usage. ########################################################################### ## Helper class: Error exception. ########################################################################### class Error(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) ########################################################################### ## Helper class: CMSHarvesterHelpFormatter. ########################################################################### class CMSHarvesterHelpFormatter(optparse.IndentedHelpFormatter): """Helper class to add some customised help output to cmsHarvester. We want to add some instructions, as well as a pointer to the CMS Twiki. """ def format_usage(self, usage): usage_lines = [] sep_line = "-" * 60 usage_lines.append(sep_line) usage_lines.append("Welcome to the CMS harvester, a (hopefully useful)") usage_lines.append("tool to create harvesting configurations.") usage_lines.append("For more information please have a look at the CMS Twiki:") usage_lines.append(" %s" % twiki_url) usage_lines.append(sep_line) usage_lines.append("") # Since we only add to the output, we now just append the # original output from IndentedHelpFormatter. usage_lines.append(optparse.IndentedHelpFormatter. \ format_usage(self, usage)) formatted_usage = "\n".join(usage_lines) return formatted_usage # End of CMSHarvesterHelpFormatter. ########################################################################### ## Helper class: DBSXMLHandler. ########################################################################### class DBSXMLHandler(xml.sax.handler.ContentHandler): """XML handler class to parse DBS results. The tricky thing here is that older DBS versions (2.0.5 and earlier) return results in a different XML format than newer versions. Previously the result values were returned as attributes to the `result' element. The new approach returns result values as contents of named elements. The old approach is handled directly in startElement(), the new approach in characters(). NOTE: All results are returned in the form of string values of course! """ # This is the required mapping from the name of the variable we # ask for to what we call it ourselves. (Effectively this is the # mapping between the old attribute key name and the new element # name.) mapping = { "dataset" : "PATH", "dataset.tag" : "PROCESSEDDATASET_GLOBALTAG", "datatype.type" : "PRIMARYDSTYPE_TYPE", "run" : "RUNS_RUNNUMBER", "run.number" : "RUNS_RUNNUMBER", "file.name" : "FILES_LOGICALFILENAME", "file.numevents" : "FILES_NUMBEROFEVENTS", "algo.version" : "APPVERSION_VERSION", "site" : "STORAGEELEMENT_SENAME", } def __init__(self, tag_names): # This is a list used as stack to keep track of where we are # in the element tree. self.element_position = [] self.tag_names = tag_names self.results = {} def startElement(self, name, attrs): self.element_position.append(name) self.current_value = [] #---------- # This is to catch results from DBS 2.0.5 and earlier. if name == "result": for name in self.tag_names: key = DBSXMLHandler.mapping[name] value = str(attrs[key]) try: self.results[name].append(value) except KeyError: self.results[name] = [value] #---------- def endElement(self, name): assert self.current_element() == name, \ "closing unopenend element `%s'" % name if self.current_element() in self.tag_names: contents = "".join(self.current_value) if self.current_element() in self.results: self.results[self.current_element()].append(contents) else: self.results[self.current_element()] = [contents] self.element_position.pop() def characters(self, content): # NOTE: It is possible that the actual contents of the tag # gets split into multiple pieces. This method will be called # for each of the pieces. This means we have to concatenate # everything ourselves. if self.current_element() in self.tag_names: self.current_value.append(content) def current_element(self): return self.element_position[-1] def check_results_validity(self): """Make sure that all results arrays have equal length. We should have received complete rows from DBS. I.e. all results arrays in the handler should be of equal length. """ results_valid = True res_names = self.results.keys() if len(res_names) > 1: for res_name in res_names[1:]: res_tmp = self.results[res_name] if len(res_tmp) != len(self.results[res_names[0]]): results_valid = False return results_valid # End of DBSXMLHandler. ########################################################################### ## CMSHarvester class. ########################################################################### class CMSHarvester(object): """Class to perform CMS harvesting. More documentation `obviously' to follow. """ ########## def __init__(self, cmd_line_opts=None): "Initialize class and process command line options." self.version = __version__ # These are the harvesting types allowed. See the head of this # file for more information. self.harvesting_types = [ "RelVal", "RelValFS", "MC", "DQMOffline", ] # These are the possible harvesting modes: # - Single-step: harvesting takes place on-site in a single # step. For each samples only a single ROOT file containing # the harvesting results is returned. # - Single-step-allow-partial: special hack to allow # harvesting of partial statistics using single-step # harvesting on spread-out samples. # - Two-step: harvesting takes place in two steps. The first # step returns a series of monitoring elenent summaries for # each sample. The second step then merges these summaries # locally and does the real harvesting. This second step # produces the ROOT file containing the harvesting results. self.harvesting_modes = [ "single-step", "single-step-allow-partial", "two-step" ] # It is possible to specify a GlobalTag that will override any # choices (regarding GlobalTags) made by the cmsHarvester. # BUG BUG BUG # For the moment, until I figure out a way to obtain the # GlobalTag with which a given data (!) dataset was created, # it is necessary to specify a GlobalTag for harvesting of # data. # BUG BUG BUG end self.globaltag = None # It's also possible to switch off the use of reference # histograms altogether. self.use_ref_hists = True # The database name and account are hard-coded. They are not # likely to change before the end-of-life of this tool. But of # course there is a way to override this from the command # line. One can even override the Frontier connection used for # the GlobalTag and for the reference histograms # independently. Please only use this for testing purposes. self.frontier_connection_name = {} self.frontier_connection_name["globaltag"] = "frontier://" \ "FrontierProd/" self.frontier_connection_name["refhists"] = "frontier://" \ "FrontierProd/" self.frontier_connection_overridden = {} for key in self.frontier_connection_name.keys(): self.frontier_connection_overridden[key] = False # This contains information specific to each of the harvesting # types. Used to create the harvesting configuration. It is # filled by setup_harvesting_info(). self.harvesting_info = None ### # These are default `unused' values that will be filled in # depending on the command line options. # The type of harvesting we're doing. See # self.harvesting_types for allowed types. self.harvesting_type = None # The harvesting mode, popularly known as single-step # vs. two-step. The thing to remember at this point is that # single-step is only possible for samples located completely # at a single site (i.e. SE). self.harvesting_mode = None # BUG BUG BUG # Default temporarily set to two-step until we can get staged # jobs working with CRAB. self.harvesting_mode_default = "single-step" # BUG BUG BUG end # The input method: are we reading a dataset name (or regexp) # directly from the command line or are we reading a file # containing a list of dataset specifications. Actually we # keep one of each for both datasets and runs. self.input_method = {} self.input_method["datasets"] = {} self.input_method["datasets"]["use"] = None self.input_method["datasets"]["ignore"] = None self.input_method["runs"] = {} self.input_method["runs"]["use"] = None self.input_method["runs"]["ignore"] = None self.input_method["runs"]["ignore"] = None # The name of whatever input we're using. self.input_name = {} self.input_name["datasets"] = {} self.input_name["datasets"]["use"] = None self.input_name["datasets"]["ignore"] = None self.input_name["runs"] = {} self.input_name["runs"]["use"] = None self.input_name["runs"]["ignore"] = None self.Jsonlumi = False self.Jsonfilename = "YourJSON.txt" self.Jsonrunfilename = "YourJSON.txt" self.todofile = "YourToDofile.txt" # If this is true, we're running in `force mode'. In this case # the sanity checks are performed but failure will not halt # everything. self.force_running = None # The base path of the output dir in CASTOR. self.castor_base_dir = None self.castor_base_dir_default = "/castor/cern.ch/" \ "cms/store/temp/" \ "dqm/offline/harvesting_output/" # The name of the file to be used for book keeping: which # datasets, runs, etc. we have already processed. self.book_keeping_file_name = None self.book_keeping_file_name_default = "harvesting_accounting.txt" # The dataset name to reference histogram name mapping is read # from a text file. The name of this file is kept in the # following variable. self.ref_hist_mappings_file_name = None # And this is the default value. self.ref_hist_mappings_file_name_default = "harvesting_ref_hist_mappings.txt" # Hmmm, hard-coded prefix of the CERN CASTOR area. This is the # only supported CASTOR area. # NOTE: Make sure this one starts with a `/'. self.castor_prefix = "/castor/cern.ch" # Normally the central harvesting should be done using the # `t1access' grid role. To be able to run without T1 access # the --no-t1access flag can be used. This variable keeps # track of that special mode. self.non_t1access = False self.caf_access = False self.saveByLumiSection = False self.crab_submission = False self.nr_max_sites = 1 self.preferred_site = "no preference" # This will become the list of datasets and runs to consider self.datasets_to_use = {} # and this will become the list of datasets and runs to skip. self.datasets_to_ignore = {} # This, in turn, will hold all book keeping information. self.book_keeping_information = {} # And this is where the dataset name to reference histogram # name mapping is stored. self.ref_hist_mappings = {} # We're now also allowing run selection. This means we also # have to keep list of runs requested and vetoed by the user. self.runs_to_use = {} self.runs_to_ignore = {} # Cache for CMSSW version availability at different sites. self.sites_and_versions_cache = {} # Cache for checked GlobalTags. self.globaltag_check_cache = [] # Global flag to see if there were any jobs for which we could # not find a matching site. self.all_sites_found = True # Helper string centrally defined. self.no_matching_site_found_str = "no_matching_site_found" # Store command line options for later use. if cmd_line_opts is None: cmd_line_opts = sys.argv[1:] self.cmd_line_opts = cmd_line_opts # Set up the logger. log_handler = logging.StreamHandler() # This is the default log formatter, the debug option switches # on some more information. log_formatter = logging.Formatter("%(message)s") log_handler.setFormatter(log_formatter) logger = logging.getLogger() logger.name = "main" logger.addHandler(log_handler) self.logger = logger # The default output mode is quite verbose. self.set_output_level("NORMAL") #logger.debug("Initialized successfully") # End of __init__. ########## def cleanup(self): "Clean up after ourselves." # NOTE: This is the safe replacement of __del__. #self.logger.debug("All done -> shutting down") logging.shutdown() # End of cleanup. ########## def time_stamp(self): "Create a timestamp to use in the created config files." time_now = datetime.datetime.utcnow() # We don't care about the microseconds. time_now = time_now.replace(microsecond = 0) time_stamp = "%sUTC" % datetime.datetime.isoformat(time_now) # End of time_stamp. return time_stamp ########## def ident_string(self): "Spit out an identification string for cmsHarvester.py." ident_str = "`cmsHarvester.py " \ "version %s': cmsHarvester.py %s" % \ (__version__, reduce(lambda x, y: x+' '+y, sys.argv[1:])) return ident_str ########## def format_conditions_string(self, globaltag): """Create the conditions string needed for `cmsDriver'. Just glueing the FrontierConditions bit in front of it really. """ # Not very robust but okay. The idea is that if the user # specified (since this cannot happen with GlobalTags coming # from DBS) something containing `conditions', they probably # know what they're doing and we should not muck things up. In # all other cases we just assume we only received the # GlobalTag part and we built the usual conditions string from # that . if globaltag.lower().find("conditions") > -1: conditions_string = globaltag else: conditions_string = "FrontierConditions_GlobalTag,%s" % \ globaltag # End of format_conditions_string. return conditions_string ########## def db_account_name_cms_cond_globaltag(self): """Return the database account name used to store the GlobalTag. The name of the database account depends (albeit weakly) on the CMSSW release version. """ # This never changed, unlike the cms_cond_31X_DQM_SUMMARY -> # cms_cond_34X_DQM transition. account_name = "CMS_COND_31X_GLOBALTAG" # End of db_account_name_cms_cond_globaltag. return account_name ########## def db_account_name_cms_cond_dqm_summary(self): """See db_account_name_cms_cond_globaltag.""" account_name = None version = self.cmssw_version[6:11] if version < "3_4_0": account_name = "CMS_COND_31X_DQM_SUMMARY" else: account_name = "CMS_COND_34X" # End of db_account_name_cms_cond_dqm_summary. return account_name ########## def config_file_header(self): "Create a nice header to be used to mark the generated files." tmp = [] time_stamp = self.time_stamp() ident_str = self.ident_string() tmp.append("# %s" % time_stamp) tmp.append("# WARNING: This file was created automatically!") tmp.append("") tmp.append("# Created by %s" % ident_str) header = "\n".join(tmp) # End of config_file_header. return header ########## def set_output_level(self, output_level): """Adjust the level of output generated. Choices are: - normal : default level of output - quiet : less output than the default - verbose : some additional information - debug : lots more information, may be overwhelming NOTE: The debug option is a bit special in the sense that it also modifies the output format. """ # NOTE: These levels are hooked up to the ones used in the # logging module. output_levels = { "NORMAL" : logging.INFO, "QUIET" : logging.WARNING, "VERBOSE" : logging.INFO, "DEBUG" : logging.DEBUG } output_level = output_level.upper() try: # Update the logger. self.log_level = output_levels[output_level] self.logger.setLevel(self.log_level) except KeyError: # Show a complaint self.logger.fatal("Unknown output level `%s'" % ouput_level) # and re-raise an exception. raise Exception # End of set_output_level. ########## def option_handler_debug(self, option, opt_str, value, parser): """Switch to debug mode. This both increases the amount of output generated, as well as changes the format used (more detailed information is given). """ # Switch to a more informative log formatter for debugging. log_formatter_debug = logging.Formatter("[%(levelname)s] " \ # NOTE: funcName was # only implemented # starting with python # 2.5. #"%(funcName)s() " \ #"@%(filename)s:%(lineno)d " \ "%(message)s") # Hmmm, not very nice. This assumes there's only a single # handler associated with the current logger. log_handler = self.logger.handlers[0] log_handler.setFormatter(log_formatter_debug) self.set_output_level("DEBUG") # End of option_handler_debug. ########## def option_handler_quiet(self, option, opt_str, value, parser): "Switch to quiet mode: less verbose." self.set_output_level("QUIET") # End of option_handler_quiet. ########## def option_handler_force(self, option, opt_str, value, parser): """Switch on `force mode' in which case we don't brake for nobody. In so-called `force mode' all sanity checks are performed but we don't halt on failure. Of course this requires some care from the user. """ self.logger.debug("Switching on `force mode'.") self.force_running = True # End of option_handler_force. ########## def option_handler_harvesting_type(self, option, opt_str, value, parser): """Set the harvesting type to be used. This checks that no harvesting type is already set, and sets the harvesting type to be used to the one specified. If a harvesting type is already set an exception is thrown. The same happens when an unknown type is specified. """ # Check for (in)valid harvesting types. # NOTE: The matching is done in a bit of a complicated # way. This allows the specification of the type to be # case-insensitive while still ending up with the properly # `cased' version afterwards. value = value.lower() harvesting_types_lowered = [i.lower() for i in self.harvesting_types] try: type_index = harvesting_types_lowered.index(value) # If this works, we now have the index to the `properly # cased' version of the harvesting type. except ValueError: self.logger.fatal("Unknown harvesting type `%s'" % \ value) self.logger.fatal(" possible types are: %s" % ", ".join(self.harvesting_types)) raise Usage("Unknown harvesting type `%s'" % \ value) # Check if multiple (by definition conflicting) harvesting # types are being specified. if not self.harvesting_type is None: msg = "Only one harvesting type should be specified" self.logger.fatal(msg) raise Usage(msg) self.harvesting_type = self.harvesting_types[type_index] self.logger.info("Harvesting type to be used: `%s'" % \ self.harvesting_type) # End of option_handler_harvesting_type. ########## def option_handler_harvesting_mode(self, option, opt_str, value, parser): """Set the harvesting mode to be used. Single-step harvesting can be used for samples that are located completely at a single site (= SE). Otherwise use two-step mode. """ # Check for valid mode. harvesting_mode = value.lower() if not harvesting_mode in self.harvesting_modes: msg = "Unknown harvesting mode `%s'" % harvesting_mode self.logger.fatal(msg) self.logger.fatal(" possible modes are: %s" % \ ", ".join(self.harvesting_modes)) raise Usage(msg) # Check if we've been given only a single mode, otherwise # complain. if not self.harvesting_mode is None: msg = "Only one harvesting mode should be specified" self.logger.fatal(msg) raise Usage(msg) self.harvesting_mode = harvesting_mode self.logger.info("Harvesting mode to be used: `%s'" % \ self.harvesting_mode) # End of option_handler_harvesting_mode. ########## def option_handler_globaltag(self, option, opt_str, value, parser): """Set the GlobalTag to be used, overriding our own choices. By default the cmsHarvester will use the GlobalTag with which a given dataset was created also for the harvesting. The --globaltag option is the way to override this behaviour. """ # Make sure that this flag only occurred once. if not self.globaltag is None: msg = "Only one GlobalTag should be specified" self.logger.fatal(msg) raise Usage(msg) self.globaltag = value self.logger.info("GlobalTag to be used: `%s'" % \ self.globaltag) # End of option_handler_globaltag. ########## def option_handler_no_ref_hists(self, option, opt_str, value, parser): "Switch use of all reference histograms off." self.use_ref_hists = False self.logger.warning("Switching off all use of reference histograms") # End of option_handler_no_ref_hists. ########## def option_handler_frontier_connection(self, option, opt_str, value, parser): """Override the default Frontier connection string. Please only use this for testing (e.g. when a test payload has been inserted into cms_orc_off instead of cms_orc_on). This method gets called for three different command line options: - --frontier-connection, - --frontier-connection-for-globaltag, - --frontier-connection-for-refhists. Appropriate care has to be taken to make sure things are only specified once. """ # Figure out with which command line option we've been called. frontier_type = opt_str.split("-")[-1] if frontier_type == "connection": # Main option: change all connection strings. frontier_types = self.frontier_connection_name.keys() else: frontier_types = [frontier_type] # Make sure that each Frontier connection is specified only # once. (Okay, in a bit of a dodgy way...) for connection_name in frontier_types: if self.frontier_connection_overridden[connection_name] == True: msg = "Please specify either:\n" \ " `--frontier-connection' to change the " \ "Frontier connection used for everything, or\n" \ "either one or both of\n" \ " `--frontier-connection-for-globaltag' to " \ "change the Frontier connection used for the " \ "GlobalTag and\n" \ " `--frontier-connection-for-refhists' to change " \ "the Frontier connection used for the " \ "reference histograms." self.logger.fatal(msg) raise Usage(msg) frontier_prefix = "frontier://" if not value.startswith(frontier_prefix): msg = "Expecting Frontier connections to start with " \ "`%s'. You specified `%s'." % \ (frontier_prefix, value) self.logger.fatal(msg) raise Usage(msg) # We also kind of expect this to be either FrontierPrep or # FrontierProd (but this is just a warning). if value.find("FrontierProd") < 0 and \ value.find("FrontierProd") < 0: msg = "Expecting Frontier connections to contain either " \ "`FrontierProd' or `FrontierPrep'. You specified " \ "`%s'. Are you sure?" % \ value self.logger.warning(msg) if not value.endswith("/"): value += "/" for connection_name in frontier_types: self.frontier_connection_name[connection_name] = value self.frontier_connection_overridden[connection_name] = True frontier_type_str = "unknown" if connection_name == "globaltag": frontier_type_str = "the GlobalTag" elif connection_name == "refhists": frontier_type_str = "the reference histograms" self.logger.warning("Overriding default Frontier " \ "connection for %s " \ "with `%s'" % \ (frontier_type_str, self.frontier_connection_name[connection_name])) # End of option_handler_frontier_connection ########## def option_handler_input_todofile(self, option, opt_str, value, parser): self.todofile = value # End of option_handler_input_todofile. ########## def option_handler_input_Jsonfile(self, option, opt_str, value, parser): self.Jsonfilename = value # End of option_handler_input_Jsonfile. ########## def option_handler_input_Jsonrunfile(self, option, opt_str, value, parser): self.Jsonrunfilename = value # End of option_handler_input_Jsonrunfile. ########## def option_handler_input_spec(self, option, opt_str, value, parser): """TODO TODO TODO Document this... """ # Figure out if we were called for the `use these' or the # `ignore these' case. if opt_str.lower().find("ignore") > -1: spec_type = "ignore" else: spec_type = "use" # Similar: are we being called for datasets or for runs? if opt_str.lower().find("dataset") > -1: select_type = "datasets" else: select_type = "runs" if not self.input_method[select_type][spec_type] is None: msg = "Please only specify one input method " \ "(for the `%s' case)" % opt_str self.logger.fatal(msg) raise Usage(msg) input_method = opt_str.replace("-", "").replace("ignore", "") self.input_method[select_type][spec_type] = input_method self.input_name[select_type][spec_type] = value self.logger.debug("Input method for the `%s' case: %s" % \ (spec_type, input_method)) # End of option_handler_input_spec ########## def option_handler_book_keeping_file(self, option, opt_str, value, parser): """Store the name of the file to be used for book keeping. The only check done here is that only a single book keeping file is specified. """ file_name = value if not self.book_keeping_file_name is None: msg = "Only one book keeping file should be specified" self.logger.fatal(msg) raise Usage(msg) self.book_keeping_file_name = file_name self.logger.info("Book keeping file to be used: `%s'" % \ self.book_keeping_file_name) # End of option_handler_book_keeping_file. ########## def option_handler_ref_hist_mapping_file(self, option, opt_str, value, parser): """Store the name of the file for the ref. histogram mapping. """ file_name = value if not self.ref_hist_mappings_file_name is None: msg = "Only one reference histogram mapping file " \ "should be specified" self.logger.fatal(msg) raise Usage(msg) self.ref_hist_mappings_file_name = file_name self.logger.info("Reference histogram mapping file " \ "to be used: `%s'" % \ self.ref_hist_mappings_file_name) # End of option_handler_ref_hist_mapping_file. ########## # OBSOLETE OBSOLETE OBSOLETE ## def option_handler_dataset_name(self, option, opt_str, value, parser): ## """Specify the name(s) of the dataset(s) to be processed. ## It is checked to make sure that no dataset name or listfile ## names are given yet. If all is well (i.e. we still have a ## clean slate) the dataset name is stored for later use, ## otherwise a Usage exception is raised. ## """ ## if not self.input_method is None: ## if self.input_method == "dataset": ## raise Usage("Please only feed me one dataset specification") ## elif self.input_method == "listfile": ## raise Usage("Cannot specify both dataset and input list file") ## else: ## assert False, "Unknown input method `%s'" % self.input_method ## self.input_method = "dataset" ## self.input_name = value ## self.logger.info("Input method used: %s" % self.input_method) ## # End of option_handler_dataset_name. ## ########## ## def option_handler_listfile_name(self, option, opt_str, value, parser): ## """Specify the input list file containing datasets to be processed. ## It is checked to make sure that no dataset name or listfile ## names are given yet. If all is well (i.e. we still have a ## clean slate) the listfile name is stored for later use, ## otherwise a Usage exception is raised. ## """ ## if not self.input_method is None: ## if self.input_method == "listfile": ## raise Usage("Please only feed me one list file") ## elif self.input_method == "dataset": ## raise Usage("Cannot specify both dataset and input list file") ## else: ## assert False, "Unknown input method `%s'" % self.input_method ## self.input_method = "listfile" ## self.input_name = value ## self.logger.info("Input method used: %s" % self.input_method) ## # End of option_handler_listfile_name. # OBSOLETE OBSOLETE OBSOLETE end ########## def option_handler_castor_dir(self, option, opt_str, value, parser): """Specify where on CASTOR the output should go. At the moment only output to CERN CASTOR is supported. Eventually the harvested results should go into the central place for DQM on CASTOR anyway. """ # Check format of specified CASTOR area. castor_dir = value #castor_dir = castor_dir.lstrip(os.path.sep) castor_prefix = self.castor_prefix # Add a leading slash if necessary and clean up the path. castor_dir = os.path.join(os.path.sep, castor_dir) self.castor_base_dir = os.path.normpath(castor_dir) self.logger.info("CASTOR (base) area to be used: `%s'" % \ self.castor_base_dir) # End of option_handler_castor_dir. ########## def option_handler_no_t1access(self, option, opt_str, value, parser): """Set the self.no_t1access flag to try and create jobs that run without special `t1access' role. """ self.non_t1access = True self.logger.warning("Running in `non-t1access' mode. " \ "Will try to create jobs that run " \ "without special rights but no " \ "further promises...") # End of option_handler_no_t1access. ########## def option_handler_caf_access(self, option, opt_str, value, parser): """Set the self.caf_access flag to try and create jobs that run on the CAF. """ self.caf_access = True self.logger.warning("Running in `caf_access' mode. " \ "Will try to create jobs that run " \ "on CAF but no" \ "further promises...") # End of option_handler_caf_access. ########## def option_handler_saveByLumiSection(self, option, opt_str, value, parser): """Set process.dqmSaver.saveByLumiSectiont=1 in cfg harvesting file """ self.saveByLumiSection = True self.logger.warning("waning concerning saveByLumiSection option") # End of option_handler_saveByLumiSection. ########## def option_handler_crab_submission(self, option, opt_str, value, parser): """Crab jobs are not created and "submitted automatically", """ self.crab_submission = True # End of option_handler_crab_submission. ########## def option_handler_sites(self, option, opt_str, value, parser): self.nr_max_sites = value ########## def option_handler_preferred_site(self, option, opt_str, value, parser): self.preferred_site = value ########## def option_handler_list_types(self, option, opt_str, value, parser): """List all harvesting types and their mappings. This lists all implemented harvesting types with their corresponding mappings to sequence names. This had to be separated out from the help since it depends on the CMSSW version and was making things a bit of a mess. NOTE: There is no way (at least not that I could come up with) to code this in a neat generic way that can be read both by this method and by setup_harvesting_info(). Please try hard to keep these two methods in sync! """ sep_line = "-" * 50 sep_line_short = "-" * 20 print(sep_line) print("The following harvesting types are available:") print(sep_line) print("`RelVal' maps to:") print(" pre-3_3_0 : HARVESTING:validationHarvesting") print(" 3_4_0_pre2 and later: HARVESTING:validationHarvesting+dqmHarvesting") print(" Exceptions:") print(" 3_3_0_pre1-4 : HARVESTING:validationHarvesting") print(" 3_3_0_pre6 : HARVESTING:validationHarvesting") print(" 3_4_0_pre1 : HARVESTING:validationHarvesting") print(sep_line_short) print("`RelValFS' maps to:") print(" always : HARVESTING:validationHarvestingFS") print(sep_line_short) print("`MC' maps to:") print(" always : HARVESTING:validationprodHarvesting") print(sep_line_short) print("`DQMOffline' maps to:") print(" always : HARVESTING:dqmHarvesting") print(sep_line) # We're done, let's quit. (This is the same thing optparse # does after printing the help.) raise SystemExit # End of option_handler_list_types. ########## def setup_harvesting_info(self): """Fill our dictionary with all info needed to understand harvesting. This depends on the CMSSW version since at some point the names and sequences were modified. NOTE: There is no way (at least not that I could come up with) to code this in a neat generic way that can be read both by this method and by option_handler_list_types(). Please try hard to keep these two methods in sync! """ assert not self.cmssw_version is None, \ "ERROR setup_harvesting() requires " \ "self.cmssw_version to be set!!!" harvesting_info = {} # This is the version-independent part. harvesting_info["DQMOffline"] = {} harvesting_info["DQMOffline"]["beamspot"] = None harvesting_info["DQMOffline"]["eventcontent"] = None harvesting_info["DQMOffline"]["harvesting"] = "AtRunEnd" harvesting_info["RelVal"] = {} harvesting_info["RelVal"]["beamspot"] = None harvesting_info["RelVal"]["eventcontent"] = None harvesting_info["RelVal"]["harvesting"] = "AtRunEnd" harvesting_info["RelValFS"] = {} harvesting_info["RelValFS"]["beamspot"] = None harvesting_info["RelValFS"]["eventcontent"] = None harvesting_info["RelValFS"]["harvesting"] = "AtRunEnd" harvesting_info["MC"] = {} harvesting_info["MC"]["beamspot"] = None harvesting_info["MC"]["eventcontent"] = None harvesting_info["MC"]["harvesting"] = "AtRunEnd" # This is the version-dependent part. And I know, strictly # speaking it's not necessary to fill in all three types since # in a single run we'll only use one type anyway. This does # look more readable, however, and required less thought from # my side when I put this together. # DEBUG DEBUG DEBUG # Check that we understand our own version naming. assert self.cmssw_version.startswith("CMSSW_") # DEBUG DEBUG DEBUG end version = self.cmssw_version[6:] #---------- # RelVal step_string = None if version < "3_3_0": step_string = "validationHarvesting" elif version in ["3_3_0_pre1", "3_3_0_pre2", "3_3_0_pre3", "3_3_0_pre4", "3_3_0_pre6", "3_4_0_pre1"]: step_string = "validationHarvesting" else: step_string = "validationHarvesting+dqmHarvesting" harvesting_info["RelVal"]["step_string"] = step_string # DEBUG DEBUG DEBUG # Let's make sure we found something. assert not step_string is None, \ "ERROR Could not decide a RelVal harvesting sequence " \ "for CMSSW version %s" % self.cmssw_version # DEBUG DEBUG DEBUG end #---------- # RelVal step_string = "validationHarvestingFS" harvesting_info["RelValFS"]["step_string"] = step_string #---------- # MC step_string = "validationprodHarvesting" harvesting_info["MC"]["step_string"] = step_string # DEBUG DEBUG DEBUG # Let's make sure we found something. assert not step_string is None, \ "ERROR Could not decide a MC harvesting " \ "sequence for CMSSW version %s" % self.cmssw_version # DEBUG DEBUG DEBUG end #---------- # DQMOffline step_string = "dqmHarvesting" harvesting_info["DQMOffline"]["step_string"] = step_string #---------- self.harvesting_info = harvesting_info self.logger.info("Based on the CMSSW version (%s) " \ "I decided to use the `HARVESTING:%s' " \ "sequence for %s harvesting" % \ (self.cmssw_version, self.harvesting_info[self.harvesting_type]["step_string"], self.harvesting_type)) # End of setup_harvesting_info. ########## def create_castor_path_name_common(self, dataset_name): """Build the common part of the output path to be used on CASTOR. This consists of the CASTOR area base path specified by the user and a piece depending on the data type (data vs. MC), the harvesting type and the dataset name followed by a piece containing the run number and event count. (See comments in create_castor_path_name_special for details.) This method creates the common part, without run number and event count. """ castor_path = self.castor_base_dir ### # The data type: data vs. mc. datatype = self.datasets_information[dataset_name]["datatype"] datatype = datatype.lower() castor_path = os.path.join(castor_path, datatype) # The harvesting type. harvesting_type = self.harvesting_type harvesting_type = harvesting_type.lower() castor_path = os.path.join(castor_path, harvesting_type) # The CMSSW release version (only the `digits'). Note that the # CMSSW version used here is the version used for harvesting, # not the one from the dataset. This does make the results # slightly harder to find. On the other hand it solves # problems in case one re-harvests a given dataset with a # different CMSSW version, which would lead to ambiguous path # names. (Of course for many cases the harvesting is done with # the same CMSSW version the dataset was created with.) release_version = self.cmssw_version release_version = release_version.lower(). \ replace("cmssw", ""). \ strip("_") castor_path = os.path.join(castor_path, release_version) # The dataset name. dataset_name_escaped = self.escape_dataset_name(dataset_name) castor_path = os.path.join(castor_path, dataset_name_escaped) ### castor_path = os.path.normpath(castor_path) # End of create_castor_path_name_common. return castor_path ########## def create_castor_path_name_special(self, dataset_name, run_number, castor_path_common): """Create the specialised part of the CASTOR output dir name. NOTE: To avoid clashes with `incremental harvesting' (re-harvesting when a dataset grows) we have to include the event count in the path name. The underlying `problem' is that CRAB does not overwrite existing output files so if the output file already exists CRAB will fail to copy back the output. NOTE: It's not possible to create different kinds of harvesting jobs in a single call to this tool. However, in principle it could be possible to create both data and MC jobs in a single go. NOTE: The number of events used in the path name is the _total_ number of events in the dataset/run at the time of harvesting. If we're doing partial harvesting the final results will reflect lower statistics. This is a) the easiest to code and b) the least likely to lead to confusion if someone ever decides to swap/copy around file blocks between sites. """ castor_path = castor_path_common ### # The run number part. castor_path = os.path.join(castor_path, "run_%d" % run_number) ### # The event count (i.e. the number of events we currently see # for this dataset). #nevents = self.datasets_information[dataset_name] \ # ["num_events"][run_number] castor_path = os.path.join(castor_path, "nevents") ### castor_path = os.path.normpath(castor_path) # End of create_castor_path_name_special. return castor_path ########## def create_and_check_castor_dirs(self): """Make sure all required CASTOR output dirs exist. This checks the CASTOR base dir specified by the user as well as all the subdirs required by the current set of jobs. """ self.logger.info("Checking (and if necessary creating) CASTOR " \ "output area(s)...") # Call the real checker method for the base dir. self.create_and_check_castor_dir(self.castor_base_dir) # Now call the checker for all (unique) subdirs. castor_dirs = [] for (dataset_name, runs) in six.iteritems(self.datasets_to_use): for run in runs: castor_dirs.append(self.datasets_information[dataset_name] \ ["castor_path"][run]) castor_dirs_unique = sorted(set(castor_dirs)) # This can take some time. E.g. CRAFT08 has > 300 runs, each # of which will get a new directory. So we show some (rough) # info in between. ndirs = len(castor_dirs_unique) step = max(ndirs / 10, 1) for (i, castor_dir) in enumerate(castor_dirs_unique): if (i + 1) % step == 0 or \ (i + 1) == ndirs: self.logger.info(" %d/%d" % \ (i + 1, ndirs)) self.create_and_check_castor_dir(castor_dir) # Now check if the directory is empty. If (an old version # of) the output file already exists CRAB will run new # jobs but never copy the results back. We assume the user # knows what they are doing and only issue a warning in # case the directory is not empty. self.logger.debug("Checking if path `%s' is empty" % \ castor_dir) cmd = "rfdir %s" % castor_dir (status, output) = commands.getstatusoutput(cmd) if status != 0: msg = "Could not access directory `%s'" \ " !!! This is bad since I should have just" \ " created it !!!" % castor_dir self.logger.fatal(msg) raise Error(msg) if len(output) > 0: self.logger.warning("Output directory `%s' is not empty:" \ " new jobs will fail to" \ " copy back output" % \ castor_dir) # End of create_and_check_castor_dirs. ########## def create_and_check_castor_dir(self, castor_dir): """Check existence of the give CASTOR dir, if necessary create it. Some special care has to be taken with several things like setting the correct permissions such that CRAB can store the output results. Of course this means that things like /castor/cern.ch/ and user/j/ have to be recognised and treated properly. NOTE: Only CERN CASTOR area (/castor/cern.ch/) supported for the moment. NOTE: This method uses some slightly tricky caching to make sure we don't keep over and over checking the same base paths. """ ### # Local helper function to fully split a path into pieces. def split_completely(path): (parent_path, name) = os.path.split(path) if name == "": return (parent_path, ) else: return split_completely(parent_path) + (name, ) ### # Local helper function to check rfio (i.e. CASTOR) # directories. def extract_permissions(rfstat_output): """Parse the output from rfstat and return the 5-digit permissions string.""" permissions_line = [i for i in output.split("\n") \ if i.lower().find("protection") > -1] regexp = re.compile(".*\(([0123456789]{5})\).*") match = regexp.search(rfstat_output) if not match or len(match.groups()) != 1: msg = "Could not extract permissions " \ "from output: %s" % rfstat_output self.logger.fatal(msg) raise Error(msg) permissions = match.group(1) # End of extract_permissions. return permissions ### # These are the pieces of CASTOR directories that we do not # want to touch when modifying permissions. # NOTE: This is all a bit involved, basically driven by the # fact that one wants to treat the `j' directory of # `/castor/cern.ch/user/j/jhegeman/' specially. # BUG BUG BUG # This should be simplified, for example by comparing to the # CASTOR prefix or something like that. # BUG BUG BUG end castor_paths_dont_touch = { 0: ["/", "castor", "cern.ch", "cms", "store", "temp", "dqm", "offline", "user"], -1: ["user", "store"] } self.logger.debug("Checking CASTOR path `%s'" % castor_dir) ### # First we take the full CASTOR path apart. castor_path_pieces = split_completely(castor_dir) # Now slowly rebuild the CASTOR path and see if a) all # permissions are set correctly and b) the final destination # exists. path = "" check_sizes = sorted(castor_paths_dont_touch.keys()) len_castor_path_pieces = len(castor_path_pieces) for piece_index in range (len_castor_path_pieces): skip_this_path_piece = False piece = castor_path_pieces[piece_index] ## self.logger.debug("Checking CASTOR path piece `%s'" % \ ## piece) for check_size in check_sizes: # Do we need to do anything with this? if (piece_index + check_size) > -1: ## self.logger.debug("Checking `%s' against `%s'" % \ ## (castor_path_pieces[piece_index + check_size], ## castor_paths_dont_touch[check_size])) if castor_path_pieces[piece_index + check_size] in castor_paths_dont_touch[check_size]: ## self.logger.debug(" skipping") skip_this_path_piece = True ## else: ## # Piece not in the list, fine. ## self.logger.debug(" accepting") # Add piece to the path we're building. ## self.logger.debug("!!! Skip path piece `%s'? %s" % \ ## (piece, str(skip_this_path_piece))) ## self.logger.debug("Adding piece to path...") path = os.path.join(path, piece) ## self.logger.debug("Path is now `%s'" % \ ## path) # Hmmmm, only at this point can we do some caching. Not # ideal, but okay. try: if path in self.castor_path_checks_cache: continue except AttributeError: # This only happens the first time around. self.castor_path_checks_cache = [] self.castor_path_checks_cache.append(path) # Now, unless we're supposed to skip this piece of the # path, let's make sure it exists and set the permissions # correctly for use by CRAB. This means that: # - the final output directory should (at least) have # permissions 775 # - all directories above that should (at least) have # permissions 755. # BUT: Even though the above permissions are the usual # ones to used when setting up CASTOR areas for grid job # output, there is one caveat in case multiple people are # working in the same CASTOR area. If user X creates # /a/b/c/ and user Y wants to create /a/b/d/ he/she does # not have sufficient rights. So: we set all dir # permissions to 775 to avoid this. if not skip_this_path_piece: # Ok, first thing: let's make sure this directory # exists. # NOTE: The nice complication is of course that the # usual os.path.isdir() etc. methods don't work for an # rfio filesystem. So we call rfstat and interpret an # error as meaning that the path does not exist. self.logger.debug("Checking if path `%s' exists" % \ path) cmd = "rfstat %s" % path (status, output) = commands.getstatusoutput(cmd) if status != 0: # Path does not exist, let's try and create it. self.logger.debug("Creating path `%s'" % path) cmd = "nsmkdir -m 775 %s" % path (status, output) = commands.getstatusoutput(cmd) if status != 0: msg = "Could not create directory `%s'" % path self.logger.fatal(msg) raise Error(msg) cmd = "rfstat %s" % path (status, output) = commands.getstatusoutput(cmd) # Now check that it looks like a directory. If I'm not # mistaken one can deduce this from the fact that the # (octal) permissions string starts with `40' (instead # of `100'). permissions = extract_permissions(output) if not permissions.startswith("40"): msg = "Path `%s' is not a directory(?)" % path self.logger.fatal(msg) raise Error(msg) # Figure out the current permissions for this # (partial) path. self.logger.debug("Checking permissions for path `%s'" % path) cmd = "rfstat %s" % path (status, output) = commands.getstatusoutput(cmd) if status != 0: msg = "Could not obtain permissions for directory `%s'" % \ path self.logger.fatal(msg) raise Error(msg) # Take the last three digits of the permissions. permissions = extract_permissions(output)[-3:] # Now if necessary fix permissions. # NOTE: Be careful never to `downgrade' permissions. if piece_index == (len_castor_path_pieces - 1): # This means we're looking at the final # destination directory. permissions_target = "775" else: # `Only' an intermediate directory. permissions_target = "775" # Compare permissions. permissions_new = [] for (i, j) in zip(permissions, permissions_target): permissions_new.append(str(max(int(i), int(j)))) permissions_new = "".join(permissions_new) self.logger.debug(" current permissions: %s" % \ permissions) self.logger.debug(" target permissions : %s" % \ permissions_target) if permissions_new != permissions: # We have to modify the permissions. self.logger.debug("Changing permissions of `%s' " \ "to %s (were %s)" % \ (path, permissions_new, permissions)) cmd = "rfchmod %s %s" % (permissions_new, path) (status, output) = commands.getstatusoutput(cmd) if status != 0: msg = "Could not change permissions for path `%s' " \ "to %s" % (path, permissions_new) self.logger.fatal(msg) raise Error(msg) self.logger.debug(" Permissions ok (%s)" % permissions_new) # End of create_and_check_castor_dir. ########## def pick_a_site(self, sites, cmssw_version): # Create list of forbidden sites sites_forbidden = [] if (self.preferred_site == "CAF") or (self.preferred_site == "caf.cern.ch"): self.caf_access = True if self.caf_access == False: sites_forbidden.append("caf.cern.ch") # These are the T1 sites. These are only forbidden if we're # running in non-T1 mode. # Source: # https://cmsweb.cern.ch/sitedb/sitelist/?naming_scheme=ce # Hard-coded, yes. Not nice, no. all_t1 = [ "srm-cms.cern.ch", "ccsrm.in2p3.fr", "cmssrm-fzk.gridka.de", "cmssrm.fnal.gov", "gridka-dCache.fzk.de", "srm-cms.gridpp.rl.ac.uk", "srm.grid.sinica.edu.tw", "srm2.grid.sinica.edu.tw", "srmcms.pic.es", "storm-fe-cms.cr.cnaf.infn.it" ] country_codes = { "CAF" : "caf.cern.ch", "CH" : "srm-cms.cern.ch", "FR" : "ccsrm.in2p3.fr", "DE" : "cmssrm-fzk.gridka.de", "GOV" : "cmssrm.fnal.gov", "DE2" : "gridka-dCache.fzk.de", "UK" : "srm-cms.gridpp.rl.ac.uk", "TW" : "srm.grid.sinica.edu.tw", "TW2" : "srm2.grid.sinica.edu.tw", "ES" : "srmcms.pic.es", "IT" : "storm-fe-cms.cr.cnaf.infn.it" } if self.non_t1access: sites_forbidden.extend(all_t1) for site in sites_forbidden: if site in sites: sites.remove(site) if self.preferred_site in country_codes: self.preferred_site = country_codes[self.preferred_site] if self.preferred_site != "no preference": if self.preferred_site in sites: sites = [self.preferred_site] else: sites= [] #print sites # Looks like we have to do some caching here, otherwise things # become waaaay toooo sloooooow. So that's what the # sites_and_versions_cache does. # NOTE: Keep this set to None! site_name = None cmd = None while len(sites) > 0 and \ site_name is None: # Create list of t1_sites t1_sites = [] for site in sites: if site in all_t1: t1_sites.append(site) if site == "caf.cern.ch": t1_sites.append(site) # If avilable pick preferred site #if self.preferred_site in sites: # se_name = self.preferred_site # Else, if available pick t1 site if len(t1_sites) > 0: se_name = choice(t1_sites) # Else pick any site else: se_name = choice(sites) # But check that it hosts the CMSSW version we want. if se_name in self.sites_and_versions_cache and \ cmssw_version in self.sites_and_versions_cache[se_name]: if self.sites_and_versions_cache[se_name][cmssw_version]: site_name = se_name break else: self.logger.debug(" --> rejecting site `%s'" % se_name) sites.remove(se_name) else: self.logger.info("Checking if site `%s' " \ "has CMSSW version `%s'" % \ (se_name, cmssw_version)) self.sites_and_versions_cache[se_name] = {} # TODO TODO TODO # Test for SCRAM architecture removed as per request # from Andreas. # scram_arch = os.getenv("SCRAM_ARCH") # cmd = "lcg-info --list-ce " \ # "--query '" \ # "Tag=VO-cms-%s," \ # "Tag=VO-cms-%s," \ # "CEStatus=Production," \ # "CloseSE=%s'" % \ # (cmssw_version, scram_arch, se_name) # TODO TODO TODO end cmd = "lcg-info --list-ce " \ "--query '" \ "Tag=VO-cms-%s," \ "CEStatus=Production," \ "CloseSE=%s'" % \ (cmssw_version, se_name) (status, output) = commands.getstatusoutput(cmd) if status != 0: self.logger.error("Could not check site information " \ "for site `%s'" % se_name) else: if (len(output) > 0) or (se_name == "caf.cern.ch"): self.sites_and_versions_cache[se_name][cmssw_version] = True site_name = se_name break else: self.sites_and_versions_cache[se_name][cmssw_version] = False self.logger.debug(" --> rejecting site `%s'" % se_name) sites.remove(se_name) if site_name is self.no_matching_site_found_str: self.logger.error(" --> no matching site found") self.logger.error(" --> Your release or SCRAM " \ "architecture may not be available" \ "anywhere on the (LCG) grid.") if not cmd is None: self.logger.debug(" (command used: `%s')" % cmd) else: self.logger.debug(" --> selected site `%s'" % site_name) # Return something more descriptive (than `None') in case we # found nothing. if site_name is None: site_name = self.no_matching_site_found_str # Keep track of our global flag signifying that this # happened. self.all_sites_found = False # End of pick_a_site. return site_name ########## def parse_cmd_line_options(self): # Set up the command line parser. Note that we fix up the help # formatter so that we can add some text pointing people to # the Twiki etc. parser = optparse.OptionParser(version="%s %s" % \ ("%prog", self.version), formatter=CMSHarvesterHelpFormatter()) self.option_parser = parser # The debug switch. parser.add_option("-d", "--debug", help="Switch on debug mode", action="callback", callback=self.option_handler_debug) # The quiet switch. parser.add_option("-q", "--quiet", help="Be less verbose", action="callback", callback=self.option_handler_quiet) # The force switch. If this switch is used sanity checks are # performed but failures do not lead to aborts. Use with care. parser.add_option("", "--force", help="Force mode. Do not abort on sanity check " "failures", action="callback", callback=self.option_handler_force) # Choose between the different kinds of harvesting. parser.add_option("", "--harvesting_type", help="Harvesting type: %s" % \ ", ".join(self.harvesting_types), action="callback", callback=self.option_handler_harvesting_type, type="string", metavar="HARVESTING_TYPE") # Choose between single-step and two-step mode. parser.add_option("", "--harvesting_mode", help="Harvesting mode: %s (default = %s)" % \ (", ".join(self.harvesting_modes), self.harvesting_mode_default), action="callback", callback=self.option_handler_harvesting_mode, type="string", metavar="HARVESTING_MODE") # Override the GlobalTag chosen by the cmsHarvester. parser.add_option("", "--globaltag", help="GlobalTag to use. Default is the ones " \ "the dataset was created with for MC, for data" \ "a GlobalTag has to be specified.", action="callback", callback=self.option_handler_globaltag, type="string", metavar="GLOBALTAG") # Allow switching off of reference histograms. parser.add_option("", "--no-ref-hists", help="Don't use any reference histograms", action="callback", callback=self.option_handler_no_ref_hists) # Allow the default (i.e. the one that should be used) # Frontier connection to be overridden. parser.add_option("", "--frontier-connection", help="Use this Frontier connection to find " \ "GlobalTags and LocalTags (for reference " \ "histograms).\nPlease only use this for " \ "testing.", action="callback", callback=self.option_handler_frontier_connection, type="string", metavar="FRONTIER") # Similar to the above but specific to the Frontier connection # to be used for the GlobalTag. parser.add_option("", "--frontier-connection-for-globaltag", help="Use this Frontier connection to find " \ "GlobalTags.\nPlease only use this for " \ "testing.", action="callback", callback=self.option_handler_frontier_connection, type="string", metavar="FRONTIER") # Similar to the above but specific to the Frontier connection # to be used for the reference histograms. parser.add_option("", "--frontier-connection-for-refhists", help="Use this Frontier connection to find " \ "LocalTags (for reference " \ "histograms).\nPlease only use this for " \ "testing.", action="callback", callback=self.option_handler_frontier_connection, type="string", metavar="FRONTIER") # Option to specify the name (or a regexp) of the dataset(s) # to be used. parser.add_option("", "--dataset", help="Name (or regexp) of dataset(s) to process", action="callback", #callback=self.option_handler_dataset_name, callback=self.option_handler_input_spec, type="string", #dest="self.input_name", metavar="DATASET") # Option to specify the name (or a regexp) of the dataset(s) # to be ignored. parser.add_option("", "--dataset-ignore", help="Name (or regexp) of dataset(s) to ignore", action="callback", callback=self.option_handler_input_spec, type="string", metavar="DATASET-IGNORE") # Option to specify the name (or a regexp) of the run(s) # to be used. parser.add_option("", "--runs", help="Run number(s) to process", action="callback", callback=self.option_handler_input_spec, type="string", metavar="RUNS") # Option to specify the name (or a regexp) of the run(s) # to be ignored. parser.add_option("", "--runs-ignore", help="Run number(s) to ignore", action="callback", callback=self.option_handler_input_spec, type="string", metavar="RUNS-IGNORE") # Option to specify a file containing a list of dataset names # (or regexps) to be used. parser.add_option("", "--datasetfile", help="File containing list of dataset names " \ "(or regexps) to process", action="callback", #callback=self.option_handler_listfile_name, callback=self.option_handler_input_spec, type="string", #dest="self.input_name", metavar="DATASETFILE") # Option to specify a file containing a list of dataset names # (or regexps) to be ignored. parser.add_option("", "--datasetfile-ignore", help="File containing list of dataset names " \ "(or regexps) to ignore", action="callback", callback=self.option_handler_input_spec, type="string", metavar="DATASETFILE-IGNORE") # Option to specify a file containing a list of runs to be # used. parser.add_option("", "--runslistfile", help="File containing list of run numbers " \ "to process", action="callback", callback=self.option_handler_input_spec, type="string", metavar="RUNSLISTFILE") # Option to specify a file containing a list of runs # to be ignored. parser.add_option("", "--runslistfile-ignore", help="File containing list of run numbers " \ "to ignore", action="callback", callback=self.option_handler_input_spec, type="string", metavar="RUNSLISTFILE-IGNORE") # Option to specify a Jsonfile contaning a list of runs # to be used. parser.add_option("", "--Jsonrunfile", help="Jsonfile containing dictionary of run/lumisections pairs. " \ "All lumisections of runs contained in dictionary are processed.", action="callback", callback=self.option_handler_input_Jsonrunfile, type="string", metavar="JSONRUNFILE") # Option to specify a Jsonfile contaning a dictionary of run/lumisections pairs # to be used. parser.add_option("", "--Jsonfile", help="Jsonfile containing dictionary of run/lumisections pairs. " \ "Only specified lumisections of runs contained in dictionary are processed.", action="callback", callback=self.option_handler_input_Jsonfile, type="string", metavar="JSONFILE") # Option to specify a ToDo file contaning a list of runs # to be used. parser.add_option("", "--todo-file", help="Todo file containing a list of runs to process.", action="callback", callback=self.option_handler_input_todofile, type="string", metavar="TODO-FILE") # Option to specify which file to use for the dataset name to # reference histogram name mappings. parser.add_option("", "--refhistmappingfile", help="File to be use for the reference " \ "histogram mappings. Default: `%s'." % \ self.ref_hist_mappings_file_name_default, action="callback", callback=self.option_handler_ref_hist_mapping_file, type="string", metavar="REFHISTMAPPING-FILE") # Specify the place in CASTOR where the output should go. # NOTE: Only output to CASTOR is supported for the moment, # since the central DQM results place is on CASTOR anyway. parser.add_option("", "--castordir", help="Place on CASTOR to store results. " \ "Default: `%s'." % \ self.castor_base_dir_default, action="callback", callback=self.option_handler_castor_dir, type="string", metavar="CASTORDIR") # Use this to try and create jobs that will run without # special `t1access' role. parser.add_option("", "--no-t1access", help="Try to create jobs that will run " \ "without special `t1access' role", action="callback", callback=self.option_handler_no_t1access) # Use this to create jobs that may run on CAF parser.add_option("", "--caf-access", help="Crab jobs may run " \ "on CAF", action="callback", callback=self.option_handler_caf_access) # set process.dqmSaver.saveByLumiSection=1 in harvesting cfg file parser.add_option("", "--saveByLumiSection", help="set saveByLumiSection=1 in harvesting cfg file", action="callback", callback=self.option_handler_saveByLumiSection) # Use this to enable automatic creation and submission of crab jobs parser.add_option("", "--automatic-crab-submission", help="Crab jobs are created and " \ "submitted automatically", action="callback", callback=self.option_handler_crab_submission) # Option to set the max number of sites, each #job is submitted to parser.add_option("", "--max-sites", help="Max. number of sites each job is submitted to", action="callback", callback=self.option_handler_sites, type="int") # Option to set the preferred site parser.add_option("", "--site", help="Crab jobs are submitted to specified site. T1 sites may be shortened by the following (country) codes: \ srm-cms.cern.ch : CH \ ccsrm.in2p3.fr : FR \ cmssrm-fzk.gridka.de : DE \ cmssrm.fnal.gov : GOV \ gridka-dCache.fzk.de : DE2 \ rm-cms.gridpp.rl.ac.uk : UK \ srm.grid.sinica.edu.tw : TW \ srm2.grid.sinica.edu.tw : TW2 \ srmcms.pic.es : ES \ storm-fe-cms.cr.cnaf.infn.it : IT", action="callback", callback=self.option_handler_preferred_site, type="string") # This is the command line flag to list all harvesting # type-to-sequence mappings. parser.add_option("-l", "--list", help="List all harvesting types and their" \ "corresponding sequence names", action="callback", callback=self.option_handler_list_types) # If nothing was specified: tell the user how to do things the # next time and exit. # NOTE: We just use the OptParse standard way of doing this by # acting as if a '--help' was specified. if len(self.cmd_line_opts) < 1: self.cmd_line_opts = ["--help"] # Some trickery with the options. Why? Well, since these # options change the output level immediately from the option # handlers, the results differ depending on where they are on # the command line. Let's just make sure they are at the # front. # NOTE: Not very efficient or sophisticated, but it works and # it only has to be done once anyway. for i in ["-d", "--debug", "-q", "--quiet"]: if i in self.cmd_line_opts: self.cmd_line_opts.remove(i) self.cmd_line_opts.insert(0, i) # Everything is set up, now parse what we were given. parser.set_defaults() (self.options, self.args) = parser.parse_args(self.cmd_line_opts) # End of parse_cmd_line_options. ########## def check_input_status(self): """Check completeness and correctness of input information. Check that all required information has been specified and that, at least as far as can be easily checked, it makes sense. NOTE: This is also where any default values are applied. """ self.logger.info("Checking completeness/correctness of input...") # The cmsHarvester does not take (i.e. understand) any # arguments so there should not be any. if len(self.args) > 0: msg = "Sorry but I don't understand `%s'" % \ (" ".join(self.args)) self.logger.fatal(msg) raise Usage(msg) # BUG BUG BUG # While we wait for some bugs left and right to get fixed, we # disable two-step. if self.harvesting_mode == "two-step": msg = "--------------------\n" \ " Sorry, but for the moment (well, till it works)" \ " the two-step mode has been disabled.\n" \ "--------------------\n" self.logger.fatal(msg) raise Error(msg) # BUG BUG BUG end # We need a harvesting method to be specified if self.harvesting_type is None: msg = "Please specify a harvesting type" self.logger.fatal(msg) raise Usage(msg) # as well as a harvesting mode. if self.harvesting_mode is None: self.harvesting_mode = self.harvesting_mode_default msg = "No harvesting mode specified --> using default `%s'" % \ self.harvesting_mode self.logger.warning(msg) #raise Usage(msg) ### # We need an input method so we can find the dataset name(s). if self.input_method["datasets"]["use"] is None: msg = "Please specify an input dataset name " \ "or a list file name" self.logger.fatal(msg) raise Usage(msg) # DEBUG DEBUG DEBUG # If we get here, we should also have an input name. assert not self.input_name["datasets"]["use"] is None # DEBUG DEBUG DEBUG end ### # The same holds for the reference histogram mapping file (if # we're using references). if self.use_ref_hists: if self.ref_hist_mappings_file_name is None: self.ref_hist_mappings_file_name = self.ref_hist_mappings_file_name_default msg = "No reference histogram mapping file specified --> " \ "using default `%s'" % \ self.ref_hist_mappings_file_name self.logger.warning(msg) ### # We need to know where to put the stuff (okay, the results) # on CASTOR. if self.castor_base_dir is None: self.castor_base_dir = self.castor_base_dir_default msg = "No CASTOR area specified -> using default `%s'" % \ self.castor_base_dir self.logger.warning(msg) #raise Usage(msg) # Only the CERN CASTOR area is supported. if not self.castor_base_dir.startswith(self.castor_prefix): msg = "CASTOR area does not start with `%s'" % \ self.castor_prefix self.logger.fatal(msg) if self.castor_base_dir.find("castor") > -1 and \ not self.castor_base_dir.find("cern.ch") > -1: self.logger.fatal("Only CERN CASTOR is supported") raise Usage(msg) ### # TODO TODO TODO # This should be removed in the future, once I find out how to # get the config file used to create a given dataset from DBS. # For data we need to have a GlobalTag. (For MC we can figure # it out by ourselves.) if self.globaltag is None: self.logger.warning("No GlobalTag specified. This means I cannot") self.logger.warning("run on data, only on MC.") self.logger.warning("I will skip all data datasets.") # TODO TODO TODO end # Make sure the GlobalTag ends with `::All'. if not self.globaltag is None: if not self.globaltag.endswith("::All"): self.logger.warning("Specified GlobalTag `%s' does " \ "not end in `::All' --> " \ "appending this missing piece" % \ self.globaltag) self.globaltag = "%s::All" % self.globaltag ### # Dump some info about the Frontier connections used. for (key, value) in six.iteritems(self.frontier_connection_name): frontier_type_str = "unknown" if key == "globaltag": frontier_type_str = "the GlobalTag" elif key == "refhists": frontier_type_str = "the reference histograms" non_str = None if self.frontier_connection_overridden[key] == True: non_str = "non-" else: non_str = "" self.logger.info("Using %sdefault Frontier " \ "connection for %s: `%s'" % \ (non_str, frontier_type_str, value)) ### # End of check_input_status. ########## def check_cmssw(self): """Check if CMSSW is setup. """ # Try to access the CMSSW_VERSION environment variable. If # it's something useful we consider CMSSW to be set up # properly. Otherwise we raise an error. cmssw_version = os.getenv("CMSSW_VERSION") if cmssw_version is None: self.logger.fatal("It seems CMSSW is not setup...") self.logger.fatal("($CMSSW_VERSION is empty)") raise Error("ERROR: CMSSW needs to be setup first!") self.cmssw_version = cmssw_version self.logger.info("Found CMSSW version %s properly set up" % \ self.cmssw_version) # End of check_cmsssw. return True ########## def check_dbs(self): """Check if DBS is setup. """ # Try to access the DBSCMD_HOME environment variable. If this # looks useful we consider DBS to be set up # properly. Otherwise we raise an error. dbs_home = os.getenv("DBSCMD_HOME") if dbs_home is None: self.logger.fatal("It seems DBS is not setup...") self.logger.fatal(" $DBSCMD_HOME is empty") raise Error("ERROR: DBS needs to be setup first!") ## # Now we try to do a very simple DBS search. If that works ## # instead of giving us the `Unsupported API call' crap, we ## # should be good to go. ## # NOTE: Not ideal, I know, but it reduces the amount of ## # complaints I get... ## cmd = "dbs search --query=\"find dataset where dataset = impossible\"" ## (status, output) = commands.getstatusoutput(cmd) ## pdb.set_trace() ## if status != 0 or \ ## output.lower().find("unsupported api call") > -1: ## self.logger.fatal("It seems DBS is not setup...") ## self.logger.fatal(" %s returns crap:" % cmd) ## for line in output.split("\n"): ## self.logger.fatal(" %s" % line) ## raise Error("ERROR: DBS needs to be setup first!") self.logger.debug("Found DBS properly set up") # End of check_dbs. return True ########## def setup_dbs(self): """Setup the Python side of DBS. For more information see the DBS Python API documentation: https://twiki.cern.ch/twiki/bin/view/CMS/DBSApiDocumentation """ try: args={} args["url"]= "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/" \ "servlet/DBSServlet" api = DbsApi(args) self.dbs_api = api except DBSAPI.dbsApiException.DbsApiException as ex: self.logger.fatal("Caught DBS API exception %s: %s " % \ (ex.getClassName(), ex.getErrorMessage())) if ex.getErrorCode() not in (None, ""): logger.debug("DBS exception error code: ", ex.getErrorCode()) raise # End of setup_dbs. ########## def dbs_resolve_dataset_name(self, dataset_name): """Use DBS to resolve a wildcarded dataset name. """ # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end # Some minor checking to make sure that whatever we've been # given as dataset name actually sounds like a dataset name. if not (dataset_name.startswith("/") and \ dataset_name.endswith("RECO")): self.logger.warning("Dataset name `%s' does not sound " \ "like a valid dataset name!" % \ dataset_name) #---------- api = self.dbs_api dbs_query = "find dataset where dataset like %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) # Setup parsing. handler = DBSXMLHandler(["dataset"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) # Parse. try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end # Extract the results. datasets = handler.results.values()[0] # End of dbs_resolve_dataset_name. return datasets ########## def dbs_resolve_cmssw_version(self, dataset_name): """Ask DBS for the CMSSW version used to create this dataset. """ # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find algo.version where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["algo.version"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end cmssw_version = handler.results.values()[0] # DEBUG DEBUG DEBUG assert len(cmssw_version) == 1 # DEBUG DEBUG DEBUG end cmssw_version = cmssw_version[0] # End of dbs_resolve_cmssw_version. return cmssw_version ########## ## def dbs_resolve_dataset_number_of_events(self, dataset_name): ## """Ask DBS across how many events this dataset has been spread ## out. ## This is especially useful to check that we do not submit a job ## supposed to run on a complete sample that is not contained at ## a single site. ## """ ## # DEBUG DEBUG DEBUG ## # If we get here DBS should have been set up already. ## assert not self.dbs_api is None ## # DEBUG DEBUG DEBUG end ## api = self.dbs_api ## dbs_query = "find count(site) where dataset = %s " \ ## "and dataset.status = VALID" % \ ## dataset_name ## try: ## api_result = api.executeQuery(dbs_query) ## except DbsApiException: ## raise Error("ERROR: Could not execute DBS query") ## try: ## num_events = [] ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## num_events.append(str(attrs["COUNT_STORAGEELEMENT"])) ## xml.sax.parseString(api_result, Handler()) ## except SAXParseException: ## raise Error("ERROR: Could not parse DBS server output") ## # DEBUG DEBUG DEBUG ## assert len(num_events) == 1 ## # DEBUG DEBUG DEBUG end ## num_events = int(num_events[0]) ## # End of dbs_resolve_dataset_number_of_events. ## return num_events ########## def dbs_resolve_runs(self, dataset_name): """Ask DBS for the list of runs in a given dataset. # NOTE: This does not (yet?) skip/remove empty runs. There is # a bug in the DBS entry run.numevents (i.e. it always returns # zero) which should be fixed in the `next DBS release'. # See also: # https://savannah.cern.ch/bugs/?53452 # https://savannah.cern.ch/bugs/?53711 """ # TODO TODO TODO # We should remove empty runs as soon as the above mentioned # bug is fixed. # TODO TODO TODO end # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find run where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["run"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end runs = handler.results.values()[0] # Turn strings into integers. runs = sorted([int(i) for i in runs]) # End of dbs_resolve_runs. return runs ########## def dbs_resolve_globaltag(self, dataset_name): """Ask DBS for the globaltag corresponding to a given dataset. # BUG BUG BUG # This does not seem to work for data datasets? E.g. for # /Cosmics/Commissioning08_CRAFT0831X_V1_311_ReReco_FromSuperPointing_v1/RAW-RECO # Probaly due to the fact that the GlobalTag changed during # datataking... BUG BUG BUG end """ # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find dataset.tag where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["dataset.tag"]) parser = xml.sax.make_parser() parser.setContentHandler(parser) try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end globaltag = handler.results.values()[0] # DEBUG DEBUG DEBUG assert len(globaltag) == 1 # DEBUG DEBUG DEBUG end globaltag = globaltag[0] # End of dbs_resolve_globaltag. return globaltag ########## def dbs_resolve_datatype(self, dataset_name): """Ask DBS for the the data type (data or mc) of a given dataset. """ # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find datatype.type where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["datatype.type"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end datatype = handler.results.values()[0] # DEBUG DEBUG DEBUG assert len(datatype) == 1 # DEBUG DEBUG DEBUG end datatype = datatype[0] # End of dbs_resolve_datatype. return datatype ########## # OBSOLETE OBSOLETE OBSOLETE # This method is no longer used. def dbs_resolve_number_of_events(self, dataset_name, run_number=None): """Determine the number of events in a given dataset (and run). Ask DBS for the number of events in a dataset. If a run number is specified the number of events returned is that in that run of that dataset. If problems occur we throw an exception. # BUG BUG BUG # Since DBS does not return the number of events correctly, # neither for runs nor for whole datasets, we have to work # around that a bit... # BUG BUG BUG end """ # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find file.name, file.numevents where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name if not run_number is None: dbs_query = dbq_query + (" and run = %d" % run_number) try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["file.name", "file.numevents"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) try: xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end num_events = sum(handler.results["file.numevents"]) # End of dbs_resolve_number_of_events. return num_events # OBSOLETE OBSOLETE OBSOLETE end ########## ## def dbs_resolve_dataset_number_of_sites(self, dataset_name): ## """Ask DBS across how many sites this dataset has been spread ## out. ## This is especially useful to check that we do not submit a job ## supposed to run on a complete sample that is not contained at ## a single site. ## """ ## # DEBUG DEBUG DEBUG ## # If we get here DBS should have been set up already. ## assert not self.dbs_api is None ## # DEBUG DEBUG DEBUG end ## api = self.dbs_api ## dbs_query = "find count(site) where dataset = %s " \ ## "and dataset.status = VALID" % \ ## dataset_name ## try: ## api_result = api.executeQuery(dbs_query) ## except DbsApiException: ## raise Error("ERROR: Could not execute DBS query") ## try: ## num_sites = [] ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## num_sites.append(str(attrs["COUNT_STORAGEELEMENT"])) ## xml.sax.parseString(api_result, Handler()) ## except SAXParseException: ## raise Error("ERROR: Could not parse DBS server output") ## # DEBUG DEBUG DEBUG ## assert len(num_sites) == 1 ## # DEBUG DEBUG DEBUG end ## num_sites = int(num_sites[0]) ## # End of dbs_resolve_dataset_number_of_sites. ## return num_sites ########## ## def dbs_check_dataset_spread(self, dataset_name): ## """Figure out across how many sites this dataset is spread. ## NOTE: This is something we need to figure out per run, since ## we want to submit harvesting jobs per run. ## Basically three things can happen with a given dataset: ## - the whole dataset is available on a single site, ## - the whole dataset is available (mirrored) at multiple sites, ## - the dataset is spread across multiple sites and there is no ## single site containing the full dataset in one place. ## NOTE: If all goes well, it should not be possible that ## anything but a _full_ dataset is mirrored. So we ignore the ## possibility in which for example one site contains the full ## dataset and two others mirror half of it. ## ANOTHER NOTE: According to some people this last case _could_ ## actually happen. I will not design for it, but make sure it ## ends up as a false negative, in which case we just loose some ## efficiency and treat the dataset (unnecessarily) as ## spread-out. ## We don't really care about the first two possibilities, but in ## the third case we need to make sure to run the harvesting in ## two-step mode. ## This method checks with DBS which of the above cases is true ## for the dataset name given, and returns a 1 for the first two ## cases, and the number of sites across which the dataset is ## spread for the third case. ## The way in which this is done is by asking how many files each ## site has for the dataset. In the first case there is only one ## site, in the second case all sites should have the same number ## of files (i.e. the total number of files in the dataset) and ## in the third case the file counts from all sites should add up ## to the total file count for the dataset. ## """ ## # DEBUG DEBUG DEBUG ## # If we get here DBS should have been set up already. ## assert not self.dbs_api is None ## # DEBUG DEBUG DEBUG end ## api = self.dbs_api ## dbs_query = "find run, run.numevents, site, file.count " \ ## "where dataset = %s " \ ## "and dataset.status = VALID" % \ ## dataset_name ## try: ## api_result = api.executeQuery(dbs_query) ## except DbsApiException: ## msg = "ERROR: Could not execute DBS query" ## self.logger.fatal(msg) ## raise Error(msg) ## # Index things by run number. No cross-check is done to make ## # sure we get results for each and every run in the ## # dataset. I'm not sure this would make sense since we'd be ## # cross-checking DBS info with DBS info anyway. Note that we ## # use the file count per site to see if we're dealing with an ## # incomplete vs. a mirrored dataset. ## sample_info = {} ## try: ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## run_number = int(attrs["RUNS_RUNNUMBER"]) ## site_name = str(attrs["STORAGEELEMENT_SENAME"]) ## file_count = int(attrs["COUNT_FILES"]) ## # BUG BUG BUG ## # Doh! For some reason DBS never returns any other ## # event count than zero. ## event_count = int(attrs["RUNS_NUMBEROFEVENTS"]) ## # BUG BUG BUG end ## info = (site_name, file_count, event_count) ## try: ## sample_info[run_number].append(info) ## except KeyError: ## sample_info[run_number] = [info] ## xml.sax.parseString(api_result, Handler()) ## except SAXParseException: ## msg = "ERROR: Could not parse DBS server output" ## self.logger.fatal(msg) ## raise Error(msg) ## # Now translate this into a slightly more usable mapping. ## sites = {} ## for (run_number, site_info) in six.iteritems(sample_info): ## # Quick-n-dirty trick to see if all file counts are the ## # same. ## unique_file_counts = set([i[1] for i in site_info]) ## if len(unique_file_counts) == 1: ## # Okay, so this must be a mirrored dataset. ## # We have to pick one but we have to be careful. We ## # cannot submit to things like a T0, a T1, or CAF. ## site_names = [self.pick_a_site([i[0] for i in site_info])] ## nevents = [site_info[0][2]] ## else: ## # Looks like this is a spread-out sample. ## site_names = [i[0] for i in site_info] ## nevents = [i[2] for i in site_info] ## sites[run_number] = zip(site_names, nevents) ## self.logger.debug("Sample `%s' spread is:" % dataset_name) ## run_numbers = sites.keys() ## run_numbers.sort() ## for run_number in run_numbers: ## self.logger.debug(" run # %6d: %d sites (%s)" % \ ## (run_number, ## len(sites[run_number]), ## ", ".join([i[0] for i in sites[run_number]]))) ## # End of dbs_check_dataset_spread. ## return sites ## # DEBUG DEBUG DEBUG ## # Just kept for debugging now. ## def dbs_check_dataset_spread_old(self, dataset_name): ## """Figure out across how many sites this dataset is spread. ## NOTE: This is something we need to figure out per run, since ## we want to submit harvesting jobs per run. ## Basically three things can happen with a given dataset: ## - the whole dataset is available on a single site, ## - the whole dataset is available (mirrored) at multiple sites, ## - the dataset is spread across multiple sites and there is no ## single site containing the full dataset in one place. ## NOTE: If all goes well, it should not be possible that ## anything but a _full_ dataset is mirrored. So we ignore the ## possibility in which for example one site contains the full ## dataset and two others mirror half of it. ## ANOTHER NOTE: According to some people this last case _could_ ## actually happen. I will not design for it, but make sure it ## ends up as a false negative, in which case we just loose some ## efficiency and treat the dataset (unnecessarily) as ## spread-out. ## We don't really care about the first two possibilities, but in ## the third case we need to make sure to run the harvesting in ## two-step mode. ## This method checks with DBS which of the above cases is true ## for the dataset name given, and returns a 1 for the first two ## cases, and the number of sites across which the dataset is ## spread for the third case. ## The way in which this is done is by asking how many files each ## site has for the dataset. In the first case there is only one ## site, in the second case all sites should have the same number ## of files (i.e. the total number of files in the dataset) and ## in the third case the file counts from all sites should add up ## to the total file count for the dataset. ## """ ## # DEBUG DEBUG DEBUG ## # If we get here DBS should have been set up already. ## assert not self.dbs_api is None ## # DEBUG DEBUG DEBUG end ## api = self.dbs_api ## dbs_query = "find run, run.numevents, site, file.count " \ ## "where dataset = %s " \ ## "and dataset.status = VALID" % \ ## dataset_name ## try: ## api_result = api.executeQuery(dbs_query) ## except DbsApiException: ## msg = "ERROR: Could not execute DBS query" ## self.logger.fatal(msg) ## raise Error(msg) ## # Index things by run number. No cross-check is done to make ## # sure we get results for each and every run in the ## # dataset. I'm not sure this would make sense since we'd be ## # cross-checking DBS info with DBS info anyway. Note that we ## # use the file count per site to see if we're dealing with an ## # incomplete vs. a mirrored dataset. ## sample_info = {} ## try: ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## run_number = int(attrs["RUNS_RUNNUMBER"]) ## site_name = str(attrs["STORAGEELEMENT_SENAME"]) ## file_count = int(attrs["COUNT_FILES"]) ## # BUG BUG BUG ## # Doh! For some reason DBS never returns any other ## # event count than zero. ## event_count = int(attrs["RUNS_NUMBEROFEVENTS"]) ## # BUG BUG BUG end ## info = (site_name, file_count, event_count) ## try: ## sample_info[run_number].append(info) ## except KeyError: ## sample_info[run_number] = [info] ## xml.sax.parseString(api_result, Handler()) ## except SAXParseException: ## msg = "ERROR: Could not parse DBS server output" ## self.logger.fatal(msg) ## raise Error(msg) ## # Now translate this into a slightly more usable mapping. ## sites = {} ## for (run_number, site_info) in six.iteritems(sample_info): ## # Quick-n-dirty trick to see if all file counts are the ## # same. ## unique_file_counts = set([i[1] for i in site_info]) ## if len(unique_file_counts) == 1: ## # Okay, so this must be a mirrored dataset. ## # We have to pick one but we have to be careful. We ## # cannot submit to things like a T0, a T1, or CAF. ## site_names = [self.pick_a_site([i[0] for i in site_info])] ## nevents = [site_info[0][2]] ## else: ## # Looks like this is a spread-out sample. ## site_names = [i[0] for i in site_info] ## nevents = [i[2] for i in site_info] ## sites[run_number] = zip(site_names, nevents) ## self.logger.debug("Sample `%s' spread is:" % dataset_name) ## run_numbers = sites.keys() ## run_numbers.sort() ## for run_number in run_numbers: ## self.logger.debug(" run # %6d: %d site(s) (%s)" % \ ## (run_number, ## len(sites[run_number]), ## ", ".join([i[0] for i in sites[run_number]]))) ## # End of dbs_check_dataset_spread_old. ## return sites ## # DEBUG DEBUG DEBUG end ########## def dbs_check_dataset_spread(self, dataset_name): """Figure out the number of events in each run of this dataset. This is a more efficient way of doing this than calling dbs_resolve_number_of_events for each run. """ self.logger.debug("Checking spread of dataset `%s'" % dataset_name) # DEBUG DEBUG DEBUG # If we get here DBS should have been set up already. assert not self.dbs_api is None # DEBUG DEBUG DEBUG end api = self.dbs_api dbs_query = "find run.number, site, file.name, file.numevents " \ "where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DBSAPI.dbsApiException.DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) handler = DBSXMLHandler(["run.number", "site", "file.name", "file.numevents"]) parser = xml.sax.make_parser() parser.setContentHandler(handler) try: # OBSOLETE OBSOLETE OBSOLETE ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## site_name = str(attrs["STORAGEELEMENT_SENAME"]) ## # TODO TODO TODO ## # Ugly hack to get around cases like this: ## # $ dbs search --query="find dataset, site, file.count where dataset=/RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO" ## # Using DBS instance at: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet ## # Processing ... \ ## # PATH STORAGEELEMENT_SENAME COUNT_FILES ## # _________________________________________________________________________________ ## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO 1 ## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO cmssrm.fnal.gov 12 ## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO srm-cms.cern.ch 12 ## if len(site_name) < 1: ## return ## # TODO TODO TODO end ## run_number = int(attrs["RUNS_RUNNUMBER"]) ## file_name = str(attrs["FILES_LOGICALFILENAME"]) ## nevents = int(attrs["FILES_NUMBEROFEVENTS"]) ## # I know, this is a bit of a kludge. ## if not files_info.has_key(run_number): ## # New run. ## files_info[run_number] = {} ## files_info[run_number][file_name] = (nevents, ## [site_name]) ## elif not files_info[run_number].has_key(file_name): ## # New file for a known run. ## files_info[run_number][file_name] = (nevents, ## [site_name]) ## else: ## # New entry for a known file for a known run. ## # DEBUG DEBUG DEBUG ## # Each file should have the same number of ## # events independent of the site it's at. ## assert nevents == files_info[run_number][file_name][0] ## # DEBUG DEBUG DEBUG end ## files_info[run_number][file_name][1].append(site_name) # OBSOLETE OBSOLETE OBSOLETE end xml.sax.parseString(api_result, handler) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # DEBUG DEBUG DEBUG assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!" # DEBUG DEBUG DEBUG end # Now reshuffle all results a bit so we can more easily use # them later on. (Remember that all arrays in the results # should have equal length.) files_info = {} for (index, site_name) in enumerate(handler.results["site"]): # Ugly hack to get around cases like this: # $ dbs search --query="find dataset, site, file.count where dataset=/RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO" # Using DBS instance at: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet # Processing ... \ # PATH STORAGEELEMENT_SENAME COUNT_FILES # _________________________________________________________________________________ # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO 1 # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO cmssrm.fnal.gov 12 # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO srm-cms.cern.ch 12 if len(site_name) < 1: continue run_number = int(handler.results["run.number"][index]) file_name = handler.results["file.name"][index] nevents = int(handler.results["file.numevents"][index]) # I know, this is a bit of a kludge. if run_number not in files_info: # New run. files_info[run_number] = {} files_info[run_number][file_name] = (nevents, [site_name]) elif file_name not in files_info[run_number]: # New file for a known run. files_info[run_number][file_name] = (nevents, [site_name]) else: # New entry for a known file for a known run. # DEBUG DEBUG DEBUG # Each file should have the same number of # events independent of the site it's at. assert nevents == files_info[run_number][file_name][0] # DEBUG DEBUG DEBUG end files_info[run_number][file_name][1].append(site_name) # Remove any information for files that are not available # anywhere. NOTE: After introducing the ugly hack above, this # is a bit redundant, but let's keep it for the moment. for run_number in files_info.keys(): files_without_sites = [i for (i, j) in \ files_info[run_number].items() \ if len(j[1]) < 1] if len(files_without_sites) > 0: self.logger.warning("Removing %d file(s)" \ " with empty site names" % \ len(files_without_sites)) for file_name in files_without_sites: del files_info[run_number][file_name] # files_info[run_number][file_name] = (files_info \ # [run_number] \ # [file_name][0], []) # And another bit of a kludge. num_events_catalog = {} for run_number in files_info.keys(): site_names = list(set([j for i in files_info[run_number].values() for j in i[1]])) # NOTE: The term `mirrored' does not have the usual # meaning here. It basically means that we can apply # single-step harvesting. mirrored = None if len(site_names) > 1: # Now we somehow need to figure out if we're dealing # with a mirrored or a spread-out dataset. The rule we # use here is that we're dealing with a spread-out # dataset unless we can find at least one site # containing exactly the full list of files for this # dataset that DBS knows about. In that case we just # use only that site. all_file_names = files_info[run_number].keys() all_file_names = set(all_file_names) sites_with_complete_copies = [] for site_name in site_names: files_at_site = [i for (i, (j, k)) \ in files_info[run_number].items() \ if site_name in k] files_at_site = set(files_at_site) if files_at_site == all_file_names: sites_with_complete_copies.append(site_name) if len(sites_with_complete_copies) < 1: # This dataset/run is available at more than one # site, but no one has a complete copy. So this is # a spread-out sample. mirrored = False else: if len(sites_with_complete_copies) > 1: # This sample is available (and complete) at # more than one site. Definitely mirrored. mirrored = True else: # This dataset/run is available at more than # one site and at least one of them has a # complete copy. Even if this is only a single # site, let's call this `mirrored' and run the # single-step harvesting. mirrored = True ## site_names_ref = set(files_info[run_number].values()[0][1]) ## for site_names_tmp in files_info[run_number].values()[1:]: ## if set(site_names_tmp[1]) != site_names_ref: ## mirrored = False ## break if mirrored: self.logger.debug(" -> run appears to be `mirrored'") else: self.logger.debug(" -> run appears to be spread-out") if mirrored and \ len(sites_with_complete_copies) != len(site_names): # Remove any references to incomplete sites if we # have at least one complete site (and if there # are incomplete sites). for (file_name, (i, sites)) in files_info[run_number].items(): complete_sites = [site for site in sites \ if site in sites_with_complete_copies] files_info[run_number][file_name] = (i, complete_sites) site_names = sites_with_complete_copies self.logger.debug(" for run #%d:" % run_number) num_events_catalog[run_number] = {} num_events_catalog[run_number]["all_sites"] = sum([i[0] for i in files_info[run_number].values()]) if len(site_names) < 1: self.logger.debug(" run is not available at any site") self.logger.debug(" (but should contain %d events" % \ num_events_catalog[run_number]["all_sites"]) else: self.logger.debug(" at all sites combined there are %d events" % \ num_events_catalog[run_number]["all_sites"]) for site_name in site_names: num_events_catalog[run_number][site_name] = sum([i[0] for i in files_info[run_number].values() if site_name in i[1]]) self.logger.debug(" at site `%s' there are %d events" % \ (site_name, num_events_catalog[run_number][site_name])) num_events_catalog[run_number]["mirrored"] = mirrored # End of dbs_check_dataset_spread. return num_events_catalog # Beginning of old version. ## def dbs_check_dataset_num_events(self, dataset_name): ## """Figure out the number of events in each run of this dataset. ## This is a more efficient way of doing this than calling ## dbs_resolve_number_of_events for each run. ## # BUG BUG BUG ## # This might very well not work at all for spread-out samples. (?) ## # BUG BUG BUG end ## """ ## # DEBUG DEBUG DEBUG ## # If we get here DBS should have been set up already. ## assert not self.dbs_api is None ## # DEBUG DEBUG DEBUG end ## api = self.dbs_api ## dbs_query = "find run.number, file.name, file.numevents where dataset = %s " \ ## "and dataset.status = VALID" % \ ## dataset_name ## try: ## api_result = api.executeQuery(dbs_query) ## except DbsApiException: ## msg = "ERROR: Could not execute DBS query" ## self.logger.fatal(msg) ## raise Error(msg) ## try: ## files_info = {} ## class Handler(xml.sax.handler.ContentHandler): ## def startElement(self, name, attrs): ## if name == "result": ## run_number = int(attrs["RUNS_RUNNUMBER"]) ## file_name = str(attrs["FILES_LOGICALFILENAME"]) ## nevents = int(attrs["FILES_NUMBEROFEVENTS"]) ## try: ## files_info[run_number][file_name] = nevents ## except KeyError: ## files_info[run_number] = {file_name: nevents} ## xml.sax.parseString(api_result, Handler()) ## except SAXParseException: ## msg = "ERROR: Could not parse DBS server output" ## self.logger.fatal(msg) ## raise Error(msg) ## num_events_catalog = {} ## for run_number in files_info.keys(): ## num_events_catalog[run_number] = sum(files_info[run_number].values()) ## # End of dbs_check_dataset_num_events. ## return num_events_catalog # End of old version. ########## def build_dataset_list(self, input_method, input_name): """Build a list of all datasets to be processed. """ dataset_names = [] # It may be, but only for the list of datasets to ignore, that # the input method and name are None because nothing was # specified. In that case just an empty list is returned. if input_method is None: pass elif input_method == "dataset": # Input comes from a dataset name directly on the command # line. But, this can also contain wildcards so we need # DBS to translate it conclusively into a list of explicit # dataset names. self.logger.info("Asking DBS for dataset names") dataset_names = self.dbs_resolve_dataset_name(input_name) elif input_method == "datasetfile": # In this case a file containing a list of dataset names # is specified. Still, each line may contain wildcards so # this step also needs help from DBS. # NOTE: Lines starting with a `#' are ignored. self.logger.info("Reading input from list file `%s'" % \ input_name) try: listfile = open("/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/bin/%s" %input_name, "r") print("open listfile") for dataset in listfile: # Skip empty lines. dataset_stripped = dataset.strip() if len(dataset_stripped) < 1: continue # Skip lines starting with a `#'. if dataset_stripped[0] != "#": dataset_names.extend(self. \ dbs_resolve_dataset_name(dataset_stripped)) listfile.close() except IOError: msg = "ERROR: Could not open input list file `%s'" % \ input_name self.logger.fatal(msg) raise Error(msg) else: # DEBUG DEBUG DEBUG # We should never get here. assert False, "Unknown input method `%s'" % input_method # DEBUG DEBUG DEBUG end # Remove duplicates from the dataset list. # NOTE: There should not be any duplicates in any list coming # from DBS, but maybe the user provided a list file with less # care. # Store for later use. dataset_names = sorted(set(dataset_names)) # End of build_dataset_list. return dataset_names ########## def build_dataset_use_list(self): """Build a list of datasets to process. """ self.logger.info("Building list of datasets to consider...") input_method = self.input_method["datasets"]["use"] input_name = self.input_name["datasets"]["use"] dataset_names = self.build_dataset_list(input_method, input_name) self.datasets_to_use = dict(list(zip(dataset_names, [None] * len(dataset_names)))) self.logger.info(" found %d dataset(s) to process:" % \ len(dataset_names)) for dataset in dataset_names: self.logger.info(" `%s'" % dataset) # End of build_dataset_use_list. ########## def build_dataset_ignore_list(self): """Build a list of datasets to ignore. NOTE: We should always have a list of datasets to process, but it may be that we don't have a list of datasets to ignore. """ self.logger.info("Building list of datasets to ignore...") input_method = self.input_method["datasets"]["ignore"] input_name = self.input_name["datasets"]["ignore"] dataset_names = self.build_dataset_list(input_method, input_name) self.datasets_to_ignore = dict(list(zip(dataset_names, [None] * len(dataset_names)))) self.logger.info(" found %d dataset(s) to ignore:" % \ len(dataset_names)) for dataset in dataset_names: self.logger.info(" `%s'" % dataset) # End of build_dataset_ignore_list. ########## def build_runs_list(self, input_method, input_name): runs = [] # A list of runs (either to use or to ignore) is not # required. This protects against `empty cases.' if input_method is None: pass elif input_method == "runs": # A list of runs was specified directly from the command # line. self.logger.info("Reading list of runs from the " \ "command line") runs.extend([int(i.strip()) \ for i in input_name.split(",") \ if len(i.strip()) > 0]) elif input_method == "runslistfile": # We were passed a file containing a list of runs. self.logger.info("Reading list of runs from file `%s'" % \ input_name) try: listfile = open(input_name, "r") for run in listfile: # Skip empty lines. run_stripped = run.strip() if len(run_stripped) < 1: continue # Skip lines starting with a `#'. if run_stripped[0] != "#": runs.append(int(run_stripped)) listfile.close() except IOError: msg = "ERROR: Could not open input list file `%s'" % \ input_name self.logger.fatal(msg) raise Error(msg) else: # DEBUG DEBUG DEBUG # We should never get here. assert False, "Unknown input method `%s'" % input_method # DEBUG DEBUG DEBUG end # Remove duplicates, sort and done. runs = list(set(runs)) # End of build_runs_list(). return runs ########## def build_runs_use_list(self): """Build a list of runs to process. """ self.logger.info("Building list of runs to consider...") input_method = self.input_method["runs"]["use"] input_name = self.input_name["runs"]["use"] runs = self.build_runs_list(input_method, input_name) self.runs_to_use = dict(list(zip(runs, [None] * len(runs)))) self.logger.info(" found %d run(s) to process:" % \ len(runs)) if len(runs) > 0: self.logger.info(" %s" % ", ".join([str(i) for i in runs])) # End of build_runs_list(). ########## def build_runs_ignore_list(self): """Build a list of runs to ignore. NOTE: We should always have a list of runs to process, but it may be that we don't have a list of runs to ignore. """ self.logger.info("Building list of runs to ignore...") input_method = self.input_method["runs"]["ignore"] input_name = self.input_name["runs"]["ignore"] runs = self.build_runs_list(input_method, input_name) self.runs_to_ignore = dict(list(zip(runs, [None] * len(runs)))) self.logger.info(" found %d run(s) to ignore:" % \ len(runs)) if len(runs) > 0: self.logger.info(" %s" % ", ".join([str(i) for i in runs])) # End of build_runs_ignore_list(). ########## def process_dataset_ignore_list(self): """Update the list of datasets taking into account the ones to ignore. Both lists have been generated before from DBS and both are assumed to be unique. NOTE: The advantage of creating the ignore list from DBS (in case a regexp is given) and matching that instead of directly matching the ignore criterion against the list of datasets (to consider) built from DBS is that in the former case we're sure that all regexps are treated exactly as DBS would have done without the cmsHarvester. NOTE: This only removes complete samples. Exclusion of single runs is done by the book keeping. So the assumption is that a user never wants to harvest just part (i.e. n out of N runs) of a sample. """ self.logger.info("Processing list of datasets to ignore...") self.logger.debug("Before processing ignore list there are %d " \ "datasets in the list to be processed" % \ len(self.datasets_to_use)) # Simple approach: just loop and search. dataset_names_filtered = copy.deepcopy(self.datasets_to_use) for dataset_name in self.datasets_to_use.keys(): if dataset_name in self.datasets_to_ignore.keys(): del dataset_names_filtered[dataset_name] self.logger.info(" --> Removed %d dataset(s)" % \ (len(self.datasets_to_use) - len(dataset_names_filtered))) self.datasets_to_use = dataset_names_filtered self.logger.debug("After processing ignore list there are %d " \ "datasets in the list to be processed" % \ len(self.datasets_to_use)) # End of process_dataset_ignore_list. ########## def process_runs_use_and_ignore_lists(self): self.logger.info("Processing list of runs to use and ignore...") # This basically adds all runs in a dataset to be processed, # except for any runs that are not specified in the `to use' # list and any runs that are specified in the `to ignore' # list. # NOTE: It is assumed that those lists make sense. The input # should be checked against e.g. overlapping `use' and # `ignore' lists. runs_to_use = self.runs_to_use runs_to_ignore = self.runs_to_ignore for dataset_name in self.datasets_to_use: runs_in_dataset = self.datasets_information[dataset_name]["runs"] # First some sanity checks. runs_to_use_tmp = [] for run in runs_to_use: if not run in runs_in_dataset: self.logger.warning("Dataset `%s' does not contain " \ "requested run %d " \ "--> ignoring `use' of this run" % \ (dataset_name, run)) else: runs_to_use_tmp.append(run) if len(runs_to_use) > 0: runs = runs_to_use_tmp self.logger.info("Using %d out of %d runs " \ "of dataset `%s'" % \ (len(runs), len(runs_in_dataset), dataset_name)) else: runs = runs_in_dataset if len(runs_to_ignore) > 0: runs_tmp = [] for run in runs: if not run in runs_to_ignore: runs_tmp.append(run) self.logger.info("Ignoring %d out of %d runs " \ "of dataset `%s'" % \ (len(runs)- len(runs_tmp), len(runs_in_dataset), dataset_name)) runs = runs_tmp if self.todofile != "YourToDofile.txt": runs_todo = [] print("Reading runs from file /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/%s" %self.todofile) cmd="grep %s /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/%s | cut -f5 -d' '" %(dataset_name,self.todofile) (status, output)=commands.getstatusoutput(cmd) for run in runs: run_str="%s" %run if run_str in output: runs_todo.append(run) self.logger.info("Using %d runs " \ "of dataset `%s'" % \ (len(runs_todo), dataset_name)) runs=runs_todo Json_runs = [] if self.Jsonfilename != "YourJSON.txt": good_runs = [] self.Jsonlumi = True # We were passed a Jsonfile containing a dictionary of # run/lunisection-pairs self.logger.info("Reading runs and lumisections from file `%s'" % \ self.Jsonfilename) try: Jsonfile = open(self.Jsonfilename, "r") for names in Jsonfile: dictNames= eval(str(names)) for key in dictNames: intkey=int(key) Json_runs.append(intkey) Jsonfile.close() except IOError: msg = "ERROR: Could not open Jsonfile `%s'" % \ input_name self.logger.fatal(msg) raise Error(msg) for run in runs: if run in Json_runs: good_runs.append(run) self.logger.info("Using %d runs " \ "of dataset `%s'" % \ (len(good_runs), dataset_name)) runs=good_runs if (self.Jsonrunfilename != "YourJSON.txt") and (self.Jsonfilename == "YourJSON.txt"): good_runs = [] # We were passed a Jsonfile containing a dictionary of # run/lunisection-pairs self.logger.info("Reading runs from file `%s'" % \ self.Jsonrunfilename) try: Jsonfile = open(self.Jsonrunfilename, "r") for names in Jsonfile: dictNames= eval(str(names)) for key in dictNames: intkey=int(key) Json_runs.append(intkey) Jsonfile.close() except IOError: msg = "ERROR: Could not open Jsonfile `%s'" % \ input_name self.logger.fatal(msg) raise Error(msg) for run in runs: if run in Json_runs: good_runs.append(run) self.logger.info("Using %d runs " \ "of dataset `%s'" % \ (len(good_runs), dataset_name)) runs=good_runs self.datasets_to_use[dataset_name] = runs # End of process_runs_use_and_ignore_lists(). ########## def singlify_datasets(self): """Remove all but the largest part of all datasets. This allows us to harvest at least part of these datasets using single-step harvesting until the two-step approach works. """ # DEBUG DEBUG DEBUG assert self.harvesting_mode == "single-step-allow-partial" # DEBUG DEBUG DEBUG end for dataset_name in self.datasets_to_use: for run_number in self.datasets_information[dataset_name]["runs"]: max_events = max(self.datasets_information[dataset_name]["sites"][run_number].values()) sites_with_max_events = [i[0] for i in self.datasets_information[dataset_name]["sites"][run_number].items() if i[1] == max_events] self.logger.warning("Singlifying dataset `%s', " \ "run %d" % \ (dataset_name, run_number)) cmssw_version = self.datasets_information[dataset_name] \ ["cmssw_version"] selected_site = self.pick_a_site(sites_with_max_events, cmssw_version) # Let's tell the user that we're manhandling this dataset. nevents_old = self.datasets_information[dataset_name]["num_events"][run_number] self.logger.warning(" --> " \ "only harvesting partial statistics: " \ "%d out of %d events (5.1%f%%) " \ "at site `%s'" % \ (max_events, nevents_old, 100. * max_events / nevents_old, selected_site)) self.logger.warning("!!! Please note that the number of " \ "events in the output path name will " \ "NOT reflect the actual statistics in " \ "the harvested results !!!") # We found the site with the highest statistics and # the corresponding number of events. (CRAB gets upset # if we ask for more events than there are at a given # site.) Now update this information in our main # datasets_information variable. self.datasets_information[dataset_name]["sites"][run_number] = {selected_site: max_events} self.datasets_information[dataset_name]["num_events"][run_number] = max_events #self.datasets_information[dataset_name]["sites"][run_number] = [selected_site] # End of singlify_datasets. ########## def check_dataset_list(self): """Check list of dataset names for impossible ones. Two kinds of checks are done: - Checks for things that do not make sense. These lead to errors and skipped datasets. - Sanity checks. For these warnings are issued but the user is considered to be the authoritative expert. Checks performed: - The CMSSW version encoded in the dataset name should match self.cmssw_version. This is critical. - There should be some events in the dataset/run. This is critical in the sense that CRAB refuses to create jobs for zero events. And yes, this does happen in practice. E.g. the reprocessed CRAFT08 datasets contain runs with zero events. - A cursory check is performed to see if the harvesting type makes sense for the data type. This should prevent the user from inadvertently running RelVal for data. - It is not possible to run single-step harvesting jobs on samples that are not fully contained at a single site. - Each dataset/run has to be available at at least one site. """ self.logger.info("Performing sanity checks on dataset list...") dataset_names_after_checks = copy.deepcopy(self.datasets_to_use) for dataset_name in self.datasets_to_use.keys(): # Check CMSSW version. version_from_dataset = self.datasets_information[dataset_name] \ ["cmssw_version"] if version_from_dataset != self.cmssw_version: msg = " CMSSW version mismatch for dataset `%s' " \ "(%s vs. %s)" % \ (dataset_name, self.cmssw_version, version_from_dataset) if self.force_running: # Expert mode: just warn, then continue. self.logger.warning("%s " \ "--> `force mode' active: " \ "run anyway" % msg) else: del dataset_names_after_checks[dataset_name] self.logger.warning("%s " \ "--> skipping" % msg) continue ### # Check that the harvesting type makes sense for the # sample. E.g. normally one would not run the DQMOffline # harvesting on Monte Carlo. # TODO TODO TODO # This should be further refined. suspicious = False datatype = self.datasets_information[dataset_name]["datatype"] if datatype == "data": # Normally only DQM harvesting is run on data. if self.harvesting_type != "DQMOffline": suspicious = True elif datatype == "mc": if self.harvesting_type == "DQMOffline": suspicious = True else: # Doh! assert False, "ERROR Impossible data type `%s' " \ "for dataset `%s'" % \ (datatype, dataset_name) if suspicious: msg = " Normally one does not run `%s' harvesting " \ "on %s samples, are you sure?" % \ (self.harvesting_type, datatype) if self.force_running: self.logger.warning("%s " \ "--> `force mode' active: " \ "run anyway" % msg) else: del dataset_names_after_checks[dataset_name] self.logger.warning("%s " \ "--> skipping" % msg) continue # TODO TODO TODO end ### # BUG BUG BUG # For the moment, due to a problem with DBS, I cannot # figure out the GlobalTag for data by myself. (For MC # it's no problem.) This means that unless a GlobalTag was # specified from the command line, we will have to skip # any data datasets. if datatype == "data": if self.globaltag is None: msg = "For data datasets (like `%s') " \ "we need a GlobalTag" % \ dataset_name del dataset_names_after_checks[dataset_name] self.logger.warning("%s " \ "--> skipping" % msg) continue # BUG BUG BUG end ### # Check if the GlobalTag exists and (if we're using # reference histograms) if it's ready to be used with # reference histograms. globaltag = self.datasets_information[dataset_name]["globaltag"] if not globaltag in self.globaltag_check_cache: if self.check_globaltag(globaltag): self.globaltag_check_cache.append(globaltag) else: msg = "Something is wrong with GlobalTag `%s' " \ "used by dataset `%s'!" % \ (globaltag, dataset_name) if self.use_ref_hists: msg += "\n(Either it does not exist or it " \ "does not contain the required key to " \ "be used with reference histograms.)" else: msg += "\n(It probably just does not exist.)" self.logger.fatal(msg) raise Usage(msg) ### # Require that each run is available at least somewhere. runs_without_sites = [i for (i, j) in \ self.datasets_information[dataset_name] \ ["sites"].items() \ if len(j) < 1 and \ i in self.datasets_to_use[dataset_name]] if len(runs_without_sites) > 0: for run_without_sites in runs_without_sites: try: dataset_names_after_checks[dataset_name].remove(run_without_sites) except KeyError: pass self.logger.warning(" removed %d unavailable run(s) " \ "from dataset `%s'" % \ (len(runs_without_sites), dataset_name)) self.logger.debug(" (%s)" % \ ", ".join([str(i) for i in \ runs_without_sites])) ### # Unless we're running two-step harvesting: only allow # samples located on a single site. if not self.harvesting_mode == "two-step": for run_number in self.datasets_to_use[dataset_name]: # DEBUG DEBUG DEBUG ## if self.datasets_information[dataset_name]["num_events"][run_number] != 0: ## pdb.set_trace() # DEBUG DEBUG DEBUG end num_sites = len(self.datasets_information[dataset_name] \ ["sites"][run_number]) if num_sites > 1 and \ not self.datasets_information[dataset_name] \ ["mirrored"][run_number]: # Cannot do this with a single-step job, not # even in force mode. It just does not make # sense. msg = " Dataset `%s', run %d is spread across more " \ "than one site.\n" \ " Cannot run single-step harvesting on " \ "samples spread across multiple sites" % \ (dataset_name, run_number) try: dataset_names_after_checks[dataset_name].remove(run_number) except KeyError: pass self.logger.warning("%s " \ "--> skipping" % msg) ### # Require that the dataset/run is non-empty. # NOTE: To avoid reconsidering empty runs/datasets next # time around, we do include them in the book keeping. # BUG BUG BUG # This should sum only over the runs that we use! tmp = [j for (i, j) in self.datasets_information \ [dataset_name]["num_events"].items() \ if i in self.datasets_to_use[dataset_name]] num_events_dataset = sum(tmp) # BUG BUG BUG end if num_events_dataset < 1: msg = " dataset `%s' is empty" % dataset_name del dataset_names_after_checks[dataset_name] self.logger.warning("%s " \ "--> skipping" % msg) # Update the book keeping with all the runs in the dataset. # DEBUG DEBUG DEBUG #assert set([j for (i, j) in self.datasets_information \ # [dataset_name]["num_events"].items() \ # if i in self.datasets_to_use[dataset_name]]) == \ # set([0]) # DEBUG DEBUG DEBUG end #self.book_keeping_information[dataset_name] = self.datasets_information \ # [dataset_name]["num_events"] continue tmp = [i for i in \ self.datasets_information[dataset_name] \ ["num_events"].items() if i[1] < 1] tmp = [i for i in tmp if i[0] in self.datasets_to_use[dataset_name]] empty_runs = dict(tmp) if len(empty_runs) > 0: for empty_run in empty_runs: try: dataset_names_after_checks[dataset_name].remove(empty_run) except KeyError: pass self.logger.info(" removed %d empty run(s) from dataset `%s'" % \ (len(empty_runs), dataset_name)) self.logger.debug(" (%s)" % \ ", ".join([str(i) for i in empty_runs])) ### # If we emptied out a complete dataset, remove the whole # thing. dataset_names_after_checks_tmp = copy.deepcopy(dataset_names_after_checks) for (dataset_name, runs) in six.iteritems(dataset_names_after_checks): if len(runs) < 1: self.logger.warning(" Removing dataset without any runs " \ "(left) `%s'" % \ dataset_name) del dataset_names_after_checks_tmp[dataset_name] dataset_names_after_checks = dataset_names_after_checks_tmp ### self.logger.warning(" --> Removed %d dataset(s)" % \ (len(self.datasets_to_use) - len(dataset_names_after_checks))) # Now store the modified version of the dataset list. self.datasets_to_use = dataset_names_after_checks # End of check_dataset_list. ########## def escape_dataset_name(self, dataset_name): """Escape a DBS dataset name. Escape a DBS dataset name such that it does not cause trouble with the file system. This means turning each `/' into `__', except for the first one which is just removed. """ escaped_dataset_name = dataset_name escaped_dataset_name = escaped_dataset_name.strip("/") escaped_dataset_name = escaped_dataset_name.replace("/", "__") return escaped_dataset_name ########## # BUG BUG BUG # This is a bit of a redundant method, isn't it? def create_config_file_name(self, dataset_name, run_number): """Generate the name of the configuration file to be run by CRAB. Depending on the harvesting mode (single-step or two-step) this is the name of the real harvesting configuration or the name of the first-step ME summary extraction configuration. """ if self.harvesting_mode == "single-step": config_file_name = self.create_harvesting_config_file_name(dataset_name) elif self.harvesting_mode == "single-step-allow-partial": config_file_name = self.create_harvesting_config_file_name(dataset_name) ## # Only add the alarming piece to the file name if this is ## # a spread-out dataset. ## pdb.set_trace() ## if self.datasets_information[dataset_name] \ ## ["mirrored"][run_number] == False: ## config_file_name = config_file_name.replace(".py", "_partial.py") elif self.harvesting_mode == "two-step": config_file_name = self.create_me_summary_config_file_name(dataset_name) else: assert False, "ERROR Unknown harvesting mode `%s'" % \ self.harvesting_mode # End of create_config_file_name. return config_file_name # BUG BUG BUG end ########## def create_harvesting_config_file_name(self, dataset_name): "Generate the name to be used for the harvesting config file." file_name_base = "harvesting.py" dataset_name_escaped = self.escape_dataset_name(dataset_name) config_file_name = file_name_base.replace(".py", "_%s.py" % \ dataset_name_escaped) # End of create_harvesting_config_file_name. return config_file_name ########## def create_me_summary_config_file_name(self, dataset_name): "Generate the name of the ME summary extraction config file." file_name_base = "me_extraction.py" dataset_name_escaped = self.escape_dataset_name(dataset_name) config_file_name = file_name_base.replace(".py", "_%s.py" % \ dataset_name_escaped) # End of create_me_summary_config_file_name. return config_file_name ########## def create_output_file_name(self, dataset_name, run_number=None): """Create the name of the output file name to be used. This is the name of the output file of the `first step'. In the case of single-step harvesting this is already the final harvesting output ROOT file. In the case of two-step harvesting it is the name of the intermediary ME summary file. """ # BUG BUG BUG # This method has become a bit of a mess. Originally it was # nice to have one entry point for both single- and two-step # output file names. However, now the former needs the run # number, while the latter does not even know about run # numbers. This should be fixed up a bit. # BUG BUG BUG end if self.harvesting_mode == "single-step": # DEBUG DEBUG DEBUG assert not run_number is None # DEBUG DEBUG DEBUG end output_file_name = self.create_harvesting_output_file_name(dataset_name, run_number) elif self.harvesting_mode == "single-step-allow-partial": # DEBUG DEBUG DEBUG assert not run_number is None # DEBUG DEBUG DEBUG end output_file_name = self.create_harvesting_output_file_name(dataset_name, run_number) elif self.harvesting_mode == "two-step": # DEBUG DEBUG DEBUG assert run_number is None # DEBUG DEBUG DEBUG end output_file_name = self.create_me_summary_output_file_name(dataset_name) else: # This should not be possible, but hey... assert False, "ERROR Unknown harvesting mode `%s'" % \ self.harvesting_mode # End of create_harvesting_output_file_name. return output_file_name ########## def create_harvesting_output_file_name(self, dataset_name, run_number): """Generate the name to be used for the harvesting output file. This harvesting output file is the _final_ ROOT output file containing the harvesting results. In case of two-step harvesting there is an intermediate ME output file as well. """ dataset_name_escaped = self.escape_dataset_name(dataset_name) # Hmmm, looking at the code for the DQMFileSaver this might # actually be the place where the first part of this file # naming scheme comes from. # NOTE: It looks like the `V0001' comes from the DQM # version. This is something that cannot be looked up from # here, so let's hope it does not change too often. output_file_name = "DQM_V0001_R%09d__%s.root" % \ (run_number, dataset_name_escaped) if self.harvesting_mode.find("partial") > -1: # Only add the alarming piece to the file name if this is # a spread-out dataset. if self.datasets_information[dataset_name] \ ["mirrored"][run_number] == False: output_file_name = output_file_name.replace(".root", \ "_partial.root") # End of create_harvesting_output_file_name. return output_file_name ########## def create_me_summary_output_file_name(self, dataset_name): """Generate the name of the intermediate ME file name to be used in two-step harvesting. """ dataset_name_escaped = self.escape_dataset_name(dataset_name) output_file_name = "me_summary_%s.root" % \ dataset_name_escaped # End of create_me_summary_output_file_name. return output_file_name ########## def create_multicrab_block_name(self, dataset_name, run_number, index): """Create the block name to use for this dataset/run number. This is what appears in the brackets `[]' in multicrab.cfg. It is used as the name of the job and to create output directories. """ dataset_name_escaped = self.escape_dataset_name(dataset_name) block_name = "%s_%09d_%s" % (dataset_name_escaped, run_number, index) # End of create_multicrab_block_name. return block_name ########## def create_crab_config(self): """Create a CRAB configuration for a given job. NOTE: This is _not_ a complete (as in: submittable) CRAB configuration. It is used to store the common settings for the multicrab configuration. NOTE: Only CERN CASTOR area (/castor/cern.ch/) is supported. NOTE: According to CRAB, you `Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'. For single-step harvesting we force one job, for the rest we don't really care. # BUG BUG BUG # With the current version of CRAB (2.6.1), in which Daniele # fixed the behaviour of no_block_boundary for me, one _has to # specify_ the total_number_of_events and one single site in # the se_white_list. # BUG BUG BUG end """ tmp = [] # This is the stuff we will need to fill in. castor_prefix = self.castor_prefix tmp.append(self.config_file_header()) tmp.append("") ## CRAB ##------ tmp.append("[CRAB]") tmp.append("jobtype = cmssw") tmp.append("") ## GRID ##------ tmp.append("[GRID]") tmp.append("virtual_organization=cms") tmp.append("") ## USER ##------ tmp.append("[USER]") tmp.append("copy_data = 1") tmp.append("") ## CMSSW ##------- tmp.append("[CMSSW]") tmp.append("# This reveals data hosted on T1 sites,") tmp.append("# which is normally hidden by CRAB.") tmp.append("show_prod = 1") tmp.append("number_of_jobs = 1") if self.Jsonlumi == True: tmp.append("lumi_mask = %s" % self.Jsonfilename) tmp.append("total_number_of_lumis = -1") else: if self.harvesting_type == "DQMOffline": tmp.append("total_number_of_lumis = -1") else: tmp.append("total_number_of_events = -1") if self.harvesting_mode.find("single-step") > -1: tmp.append("# Force everything to run in one job.") tmp.append("no_block_boundary = 1") tmp.append("") ## CAF ##----- tmp.append("[CAF]") crab_config = "\n".join(tmp) # End of create_crab_config. return crab_config ########## def create_multicrab_config(self): """Create a multicrab.cfg file for all samples. This creates the contents for a multicrab.cfg file that uses the crab.cfg file (generated elsewhere) for the basic settings and contains blocks for each run of each dataset. # BUG BUG BUG # The fact that it's necessary to specify the se_white_list # and the total_number_of_events is due to our use of CRAB # version 2.6.1. This should no longer be necessary in the # future. # BUG BUG BUG end """ cmd="who i am | cut -f1 -d' '" (status, output)=commands.getstatusoutput(cmd) UserName = output if self.caf_access == True: print("Extracting %s as user name" %UserName) number_max_sites = self.nr_max_sites + 1 multicrab_config_lines = [] multicrab_config_lines.append(self.config_file_header()) multicrab_config_lines.append("") multicrab_config_lines.append("[MULTICRAB]") multicrab_config_lines.append("cfg = crab.cfg") multicrab_config_lines.append("") dataset_names = sorted(self.datasets_to_use.keys()) for dataset_name in dataset_names: runs = self.datasets_to_use[dataset_name] dataset_name_escaped = self.escape_dataset_name(dataset_name) castor_prefix = self.castor_prefix for run in runs: # CASTOR output dir. castor_dir = self.datasets_information[dataset_name] \ ["castor_path"][run] cmd = "rfdir %s" % castor_dir (status, output) = commands.getstatusoutput(cmd) if len(output) <= 0: # DEBUG DEBUG DEBUG # We should only get here if we're treating a # dataset/run that is fully contained at a single # site. assert (len(self.datasets_information[dataset_name] \ ["sites"][run]) == 1) or \ self.datasets_information[dataset_name]["mirrored"] # DEBUG DEBUG DEBUG end site_names = self.datasets_information[dataset_name] \ ["sites"][run].keys() for i in range(1, number_max_sites, 1): if len(site_names) > 0: index = "site_%02d" % (i) config_file_name = self. \ create_config_file_name(dataset_name, run) output_file_name = self. \ create_output_file_name(dataset_name, run) # If we're looking at a mirrored dataset we just pick # one of the sites. Otherwise there is nothing to # choose. # Loop variable loop = 0 if len(site_names) > 1: cmssw_version = self.datasets_information[dataset_name] \ ["cmssw_version"] self.logger.info("Picking site for mirrored dataset " \ "`%s', run %d" % \ (dataset_name, run)) site_name = self.pick_a_site(site_names, cmssw_version) if site_name in site_names: site_names.remove(site_name) else: site_name = site_names[0] site_names.remove(site_name) if site_name is self.no_matching_site_found_str: if loop < 1: break nevents = self.datasets_information[dataset_name]["num_events"][run] # The block name. multicrab_block_name = self.create_multicrab_block_name( \ dataset_name, run, index) multicrab_config_lines.append("[%s]" % \ multicrab_block_name) ## CRAB ##------ if site_name == "caf.cern.ch": multicrab_config_lines.append("CRAB.use_server=0") multicrab_config_lines.append("CRAB.scheduler=caf") else: multicrab_config_lines.append("scheduler = glite") ## GRID ##------ if site_name == "caf.cern.ch": pass else: multicrab_config_lines.append("GRID.se_white_list = %s" % \ site_name) multicrab_config_lines.append("# This removes the default blacklisting of T1 sites.") multicrab_config_lines.append("GRID.remove_default_blacklist = 1") multicrab_config_lines.append("GRID.rb = CERN") if not self.non_t1access: multicrab_config_lines.append("GRID.role = t1access") ## USER ##------ castor_dir = castor_dir.replace(castor_prefix, "") multicrab_config_lines.append("USER.storage_element=srm-cms.cern.ch") multicrab_config_lines.append("USER.user_remote_dir = %s" % \ castor_dir) multicrab_config_lines.append("USER.check_user_remote_dir=0") if site_name == "caf.cern.ch": multicrab_config_lines.append("USER.storage_path=%s" % castor_prefix) #multicrab_config_lines.append("USER.storage_element=T2_CH_CAF") #castor_dir = castor_dir.replace("/cms/store/caf/user/%s" %UserName, "") #multicrab_config_lines.append("USER.user_remote_dir = %s" % \ # castor_dir) else: multicrab_config_lines.append("USER.storage_path=/srm/managerv2?SFN=%s" % castor_prefix) #multicrab_config_lines.append("USER.user_remote_dir = %s" % \ # castor_dir) #multicrab_config_lines.append("USER.storage_element=srm-cms.cern.ch") ## CMSSW ##------- multicrab_config_lines.append("CMSSW.pset = %s" % \ config_file_name) multicrab_config_lines.append("CMSSW.datasetpath = %s" % \ dataset_name) multicrab_config_lines.append("CMSSW.runselection = %d" % \ run) if self.Jsonlumi == True: pass else: if self.harvesting_type == "DQMOffline": pass else: multicrab_config_lines.append("CMSSW.total_number_of_events = %d" % \ nevents) # The output file name. multicrab_config_lines.append("CMSSW.output_file = %s" % \ output_file_name) ## CAF ##----- if site_name == "caf.cern.ch": multicrab_config_lines.append("CAF.queue=cmscaf1nd") # End of block. multicrab_config_lines.append("") loop = loop + 1 self.all_sites_found = True multicrab_config = "\n".join(multicrab_config_lines) # End of create_multicrab_config. return multicrab_config ########## def check_globaltag(self, globaltag=None): """Check if globaltag exists. Check if globaltag exists as GlobalTag in the database given by self.frontier_connection_name['globaltag']. If globaltag is None, self.globaltag is used instead. If we're going to use reference histograms this method also checks for the existence of the required key in the GlobalTag. """ if globaltag is None: globaltag = self.globaltag # All GlobalTags should end in `::All', right? if globaltag.endswith("::All"): globaltag = globaltag[:-5] connect_name = self.frontier_connection_name["globaltag"] # BUG BUG BUG # There is a bug in cmscond_tagtree_list: some magic is # missing from the implementation requiring one to specify # explicitly the name of the squid to connect to. Since the # cmsHarvester can only be run from the CERN network anyway, # cmsfrontier:8000 is hard-coded in here. Not nice but it # works. connect_name = connect_name.replace("frontier://", "frontier://cmsfrontier:8000/") # BUG BUG BUG end connect_name += self.db_account_name_cms_cond_globaltag() tag_exists = self.check_globaltag_exists(globaltag, connect_name) #---------- tag_contains_ref_hist_key = False if self.use_ref_hists and tag_exists: # Check for the key required to use reference histograms. tag_contains_ref_hist_key = self.check_globaltag_contains_ref_hist_key(globaltag, connect_name) #---------- if self.use_ref_hists: ret_val = tag_exists and tag_contains_ref_hist_key else: ret_val = tag_exists #---------- # End of check_globaltag. return ret_val ########## def check_globaltag_exists(self, globaltag, connect_name): """Check if globaltag exists. """ self.logger.info("Checking existence of GlobalTag `%s'" % \ globaltag) self.logger.debug(" (Using database connection `%s')" % \ connect_name) cmd = "cmscond_tagtree_list -c %s -T %s" % \ (connect_name, globaltag) (status, output) = commands.getstatusoutput(cmd) if status != 0 or \ output.find("error") > -1: msg = "Could not check existence of GlobalTag `%s' in `%s'" % \ (globaltag, connect_name) if output.find(".ALL_TABLES not found") > -1: msg = "%s\n" \ "Missing database account `%s'" % \ (msg, output.split(".ALL_TABLES")[0].split()[-1]) self.logger.fatal(msg) self.logger.debug("Command used:") self.logger.debug(" %s" % cmd) self.logger.debug("Output received:") self.logger.debug(output) raise Error(msg) if output.find("does not exist") > -1: self.logger.debug("GlobalTag `%s' does not exist in `%s':" % \ (globaltag, connect_name)) self.logger.debug("Output received:") self.logger.debug(output) tag_exists = False else: tag_exists = True self.logger.info(" GlobalTag exists? -> %s" % tag_exists) # End of check_globaltag_exists. return tag_exists ########## def check_globaltag_contains_ref_hist_key(self, globaltag, connect_name): """Check if globaltag contains the required RefHistos key. """ # Check for the key required to use reference histograms. tag_contains_key = None ref_hist_key = "RefHistos" self.logger.info("Checking existence of reference " \ "histogram key `%s' in GlobalTag `%s'" % \ (ref_hist_key, globaltag)) self.logger.debug(" (Using database connection `%s')" % \ connect_name) cmd = "cmscond_tagtree_list -c %s -T %s -n %s" % \ (connect_name, globaltag, ref_hist_key) (status, output) = commands.getstatusoutput(cmd) if status != 0 or \ output.find("error") > -1: msg = "Could not check existence of key `%s'" % \ (ref_hist_key, connect_name) self.logger.fatal(msg) self.logger.debug("Command used:") self.logger.debug(" %s" % cmd) self.logger.debug("Output received:") self.logger.debug(" %s" % output) raise Error(msg) if len(output) < 1: self.logger.debug("Required key for use of reference " \ "histograms `%s' does not exist " \ "in GlobalTag `%s':" % \ (ref_hist_key, globaltag)) self.logger.debug("Output received:") self.logger.debug(output) tag_contains_key = False else: tag_contains_key = True self.logger.info(" GlobalTag contains `%s' key? -> %s" % \ (ref_hist_key, tag_contains_key)) # End of check_globaltag_contains_ref_hist_key. return tag_contains_key ########## def check_ref_hist_tag(self, tag_name): """Check the existence of tag_name in database connect_name. Check if tag_name exists as a reference histogram tag in the database given by self.frontier_connection_name['refhists']. """ connect_name = self.frontier_connection_name["refhists"] connect_name += self.db_account_name_cms_cond_dqm_summary() self.logger.debug("Checking existence of reference " \ "histogram tag `%s'" % \ tag_name) self.logger.debug(" (Using database connection `%s')" % \ connect_name) cmd = "cmscond_list_iov -c %s" % \ connect_name (status, output) = commands.getstatusoutput(cmd) if status != 0: msg = "Could not check existence of tag `%s' in `%s'" % \ (tag_name, connect_name) self.logger.fatal(msg) self.logger.debug("Command used:") self.logger.debug(" %s" % cmd) self.logger.debug("Output received:") self.logger.debug(output) raise Error(msg) if not tag_name in output.split(): self.logger.debug("Reference histogram tag `%s' " \ "does not exist in `%s'" % \ (tag_name, connect_name)) self.logger.debug(" Existing tags: `%s'" % \ "', `".join(output.split())) tag_exists = False else: tag_exists = True self.logger.debug(" Reference histogram tag exists? " \ "-> %s" % tag_exists) # End of check_ref_hist_tag. return tag_exists ########## def create_es_prefer_snippet(self, dataset_name): """Build the es_prefer snippet for the reference histograms. The building of the snippet is wrapped in some care-taking code that figures out the name of the reference histogram set and makes sure the corresponding tag exists. """ # Figure out the name of the reference histograms tag. # NOTE: The existence of these tags has already been checked. ref_hist_tag_name = self.ref_hist_mappings[dataset_name] connect_name = self.frontier_connection_name["refhists"] connect_name += self.db_account_name_cms_cond_dqm_summary() record_name = "DQMReferenceHistogramRootFileRcd" # Build up the code snippet. code_lines = [] code_lines.append("from CondCore.DBCommon.CondDBSetup_cfi import *") code_lines.append("process.ref_hist_source = cms.ESSource(\"PoolDBESSource\", CondDBSetup,") code_lines.append(" connect = cms.string(\"%s\")," % connect_name) code_lines.append(" toGet = cms.VPSet(cms.PSet(record = cms.string(\"%s\")," % record_name) code_lines.append(" tag = cms.string(\"%s\"))," % ref_hist_tag_name) code_lines.append(" )") code_lines.append(" )") code_lines.append("process.es_prefer_ref_hist_source = cms.ESPrefer(\"PoolDBESSource\", \"ref_hist_source\")") snippet = "\n".join(code_lines) # End of create_es_prefer_snippet. return snippet ########## def create_harvesting_config(self, dataset_name): """Create the Python harvesting configuration for harvesting. The basic configuration is created by Configuration.PyReleaseValidation.ConfigBuilder. (This mimics what cmsDriver.py does.) After that we add some specials ourselves. NOTE: On one hand it may not be nice to circumvent cmsDriver.py, on the other hand cmsDriver.py does not really do anything itself. All the real work is done by the ConfigBuilder so there is not much risk that we miss out on essential developments of cmsDriver in the future. """ # Setup some options needed by the ConfigBuilder. config_options = defaultOptions # These are fixed for all kinds of harvesting jobs. Some of # them are not needed for the harvesting config, but to keep # the ConfigBuilder happy. config_options.name = "harvesting" config_options.scenario = "pp" config_options.number = 1 config_options.arguments = self.ident_string() config_options.evt_type = config_options.name config_options.customisation_file = None config_options.filein = "dummy_value" config_options.filetype = "EDM" # This seems to be new in CMSSW 3.3.X, no clue what it does. config_options.gflash = "dummy_value" # This seems to be new in CMSSW 3.3.0.pre6, no clue what it # does. #config_options.himix = "dummy_value" config_options.dbsquery = "" ### # These options depend on the type of harvesting we're doing # and are stored in self.harvesting_info. config_options.step = "HARVESTING:%s" % \ self.harvesting_info[self.harvesting_type] \ ["step_string"] config_options.beamspot = self.harvesting_info[self.harvesting_type] \ ["beamspot"] config_options.eventcontent = self.harvesting_info \ [self.harvesting_type] \ ["eventcontent"] config_options.harvesting = self.harvesting_info \ [self.harvesting_type] \ ["harvesting"] ### # This one is required (see also above) for each dataset. datatype = self.datasets_information[dataset_name]["datatype"] config_options.isMC = (datatype.lower() == "mc") config_options.isData = (datatype.lower() == "data") globaltag = self.datasets_information[dataset_name]["globaltag"] config_options.conditions = self.format_conditions_string(globaltag) ### if "with_input" in getargspec(ConfigBuilder.__init__)[0]: # This is the case for 3.3.X. config_builder = ConfigBuilder(config_options, with_input=True) else: # This is the case in older CMSSW versions. config_builder = ConfigBuilder(config_options) config_builder.prepare(True) config_contents = config_builder.pythonCfgCode ### # Add our signature to the top of the configuration. and add # some markers to the head and the tail of the Python code # generated by the ConfigBuilder. marker_lines = [] sep = "#" * 30 marker_lines.append(sep) marker_lines.append("# Code between these markers was generated by") marker_lines.append("# Configuration.PyReleaseValidation." \ "ConfigBuilder") marker_lines.append(sep) marker = "\n".join(marker_lines) tmp = [self.config_file_header()] tmp.append("") tmp.append(marker) tmp.append("") tmp.append(config_contents) tmp.append("") tmp.append(marker) tmp.append("") config_contents = "\n".join(tmp) ### # Now we add some stuff of our own. customisations = [""] customisations.append("# Now follow some customisations") customisations.append("") connect_name = self.frontier_connection_name["globaltag"] connect_name += self.db_account_name_cms_cond_globaltag() customisations.append("process.GlobalTag.connect = \"%s\"" % \ connect_name) if self.saveByLumiSection == True: customisations.append("process.dqmSaver.saveByLumiSection = 1") ## ## customisations.append("") # About the reference histograms... For data there is only one # set of references and those are picked up automatically # based on the GlobalTag. For MC we have to do some more work # since the reference histograms to be used depend on the MC # sample at hand. In this case we glue in an es_prefer snippet # to pick up the references. We do this only for RelVals since # for MC there are no meaningful references so far. # NOTE: Due to the lack of meaningful references for # MC samples reference histograms are explicitly # switched off in this case. use_es_prefer = (self.harvesting_type == "RelVal") use_refs = use_es_prefer or \ (not self.harvesting_type == "MC") # Allow global override. use_refs = use_refs and self.use_ref_hists if not use_refs: # Disable reference histograms explicitly. The histograms # are loaded by the dqmRefHistoRootFileGetter # EDAnalyzer. This analyzer can be run from several # sequences. Here we remove it from each sequence that # exists. customisations.append("print \"Not using reference histograms\"") customisations.append("if hasattr(process, \"dqmRefHistoRootFileGetter\"):") customisations.append(" for (sequence_name, sequence) in six.iteritems(process.sequences):") customisations.append(" if sequence.remove(process.dqmRefHistoRootFileGetter):") customisations.append(" print \"Removed process.dqmRefHistoRootFileGetter from sequence `%s'\" % \\") customisations.append(" sequence_name") customisations.append("process.dqmSaver.referenceHandling = \"skip\"") else: # This makes sure all reference histograms are saved to # the output ROOT file. customisations.append("process.dqmSaver.referenceHandling = \"all\"") if use_es_prefer: es_prefer_snippet = self.create_es_prefer_snippet(dataset_name) customisations.append(es_prefer_snippet) # Make sure we get the `workflow' correct. As far as I can see # this is only important for the output file name. workflow_name = dataset_name if self.harvesting_mode == "single-step-allow-partial": workflow_name += "_partial" customisations.append("process.dqmSaver.workflow = \"%s\"" % \ workflow_name) # BUG BUG BUG # This still does not work. The current two-step harvesting # efforts are on hold waiting for the solution to come from # elsewhere. (In this case the elsewhere is Daniele Spiga.) ## # In case this file is the second step (the real harvesting ## # step) of the two-step harvesting we have to tell it to use ## # our local files. ## if self.harvesting_mode == "two-step": ## castor_dir = self.datasets_information[dataset_name] \ ## ["castor_path"][run] ## customisations.append("") ## customisations.append("# This is the second step (the real") ## customisations.append("# harvesting step) of a two-step") ## customisations.append("# harvesting procedure.") ## # BUG BUG BUG ## # To be removed in production version. ## customisations.append("import pdb") ## # BUG BUG BUG end ## customisations.append("import commands") ## customisations.append("import os") ## customisations.append("castor_dir = \"%s\"" % castor_dir) ## customisations.append("cmd = \"rfdir %s\" % castor_dir") ## customisations.append("(status, output) = commands.getstatusoutput(cmd)") ## customisations.append("if status != 0:") ## customisations.append(" print \"ERROR\"") ## customisations.append(" raise Exception, \"ERROR\"") ## customisations.append("file_names = [os.path.join(\"rfio:%s\" % path, i) for i in output.split() if i.startswith(\"EDM_summary\") and i.endswith(\".root\")]") ## #customisations.append("pdb.set_trace()") ## customisations.append("process.source.fileNames = cms.untracked.vstring(*file_names)") ## customisations.append("") # BUG BUG BUG end config_contents = config_contents + "\n".join(customisations) ### # End of create_harvesting_config. return config_contents ## ########## ## def create_harvesting_config_two_step(self, dataset_name): ## """Create the Python harvesting configuration for two-step ## harvesting. ## """ ## # BUG BUG BUG ## config_contents = self.create_harvesting_config_single_step(dataset_name) ## # BUG BUG BUG end ## # End of create_harvesting_config_two_step. ## return config_contents ########## def create_me_extraction_config(self, dataset_name): """ """ # Big chunk of hard-coded Python. Not such a big deal since # this does not do much and is not likely to break. tmp = [] tmp.append(self.config_file_header()) tmp.append("") tmp.append("import FWCore.ParameterSet.Config as cms") tmp.append("") tmp.append("process = cms.Process(\"ME2EDM\")") tmp.append("") tmp.append("# Import of standard configurations") tmp.append("process.load(\"Configuration/EventContent/EventContent_cff\")") tmp.append("") tmp.append("# We don't really process any events, just keep this set to one to") tmp.append("# make sure things work.") tmp.append("process.maxEvents = cms.untracked.PSet(") tmp.append(" input = cms.untracked.int32(1)") tmp.append(" )") tmp.append("") tmp.append("process.options = cms.untracked.PSet(") tmp.append(" Rethrow = cms.untracked.vstring(\"ProductNotFound\")") tmp.append(" )") tmp.append("") tmp.append("process.source = cms.Source(\"PoolSource\",") tmp.append(" processingMode = \\") tmp.append(" cms.untracked.string(\"RunsAndLumis\"),") tmp.append(" fileNames = \\") tmp.append(" cms.untracked.vstring(\"no_file_specified\")") tmp.append(" )") tmp.append("") tmp.append("# Output definition: drop everything except for the monitoring.") tmp.append("process.output = cms.OutputModule(") tmp.append(" \"PoolOutputModule\",") tmp.append(" outputCommands = \\") tmp.append(" cms.untracked.vstring(\"drop *\", \\") tmp.append(" \"keep *_MEtoEDMConverter_*_*\"),") output_file_name = self. \ create_output_file_name(dataset_name) tmp.append(" fileName = \\") tmp.append(" cms.untracked.string(\"%s\")," % output_file_name) tmp.append(" dataset = cms.untracked.PSet(") tmp.append(" dataTier = cms.untracked.string(\"RECO\"),") tmp.append(" filterName = cms.untracked.string(\"\")") tmp.append(" )") tmp.append(" )") tmp.append("") tmp.append("# Additional output definition") tmp.append("process.out_step = cms.EndPath(process.output)") tmp.append("") tmp.append("# Schedule definition") tmp.append("process.schedule = cms.Schedule(process.out_step)") tmp.append("") config_contents = "\n".join(tmp) # End of create_me_extraction_config. return config_contents ########## ## def create_harvesting_config(self, dataset_name): ## """Create the Python harvesting configuration for a given job. ## NOTE: The reason to have a single harvesting configuration per ## sample is to be able to specify the GlobalTag corresponding to ## each sample. Since it has been decided that (apart from the ## prompt reco) datasets cannot contain runs with different ## GlobalTags, we don't need a harvesting config per run. ## NOTE: This is the place where we distinguish between ## single-step and two-step harvesting modes (at least for the ## Python job configuration). ## """ ## ### ## if self.harvesting_mode == "single-step": ## config_contents = self.create_harvesting_config_single_step(dataset_name) ## elif self.harvesting_mode == "two-step": ## config_contents = self.create_harvesting_config_two_step(dataset_name) ## else: ## # Impossible harvesting mode, we should never get here. ## assert False, "ERROR: unknown harvesting mode `%s'" % \ ## self.harvesting_mode ## ### ## # End of create_harvesting_config. ## return config_contents ########## def write_crab_config(self): """Write a CRAB job configuration Python file. """ self.logger.info("Writing CRAB configuration...") file_name_base = "crab.cfg" # Create CRAB configuration. crab_contents = self.create_crab_config() # Write configuration to file. crab_file_name = file_name_base try: crab_file = file(crab_file_name, "w") crab_file.write(crab_contents) crab_file.close() except IOError: self.logger.fatal("Could not write " \ "CRAB configuration to file `%s'" % \ crab_file_name) raise Error("ERROR: Could not write to file `%s'!" % \ crab_file_name) # End of write_crab_config. ########## def write_multicrab_config(self): """Write a multi-CRAB job configuration Python file. """ self.logger.info("Writing multi-CRAB configuration...") file_name_base = "multicrab.cfg" # Create multi-CRAB configuration. multicrab_contents = self.create_multicrab_config() # Write configuration to file. multicrab_file_name = file_name_base try: multicrab_file = file(multicrab_file_name, "w") multicrab_file.write(multicrab_contents) multicrab_file.close() except IOError: self.logger.fatal("Could not write " \ "multi-CRAB configuration to file `%s'" % \ multicrab_file_name) raise Error("ERROR: Could not write to file `%s'!" % \ multicrab_file_name) # End of write_multicrab_config. ########## def write_harvesting_config(self, dataset_name): """Write a harvesting job configuration Python file. NOTE: This knows nothing about single-step or two-step harvesting. That's all taken care of by create_harvesting_config. """ self.logger.debug("Writing harvesting configuration for `%s'..." % \ dataset_name) # Create Python configuration. config_contents = self.create_harvesting_config(dataset_name) # Write configuration to file. config_file_name = self. \ create_harvesting_config_file_name(dataset_name) try: config_file = file(config_file_name, "w") config_file.write(config_contents) config_file.close() except IOError: self.logger.fatal("Could not write " \ "harvesting configuration to file `%s'" % \ config_file_name) raise Error("ERROR: Could not write to file `%s'!" % \ config_file_name) # End of write_harvesting_config. ########## def write_me_extraction_config(self, dataset_name): """Write an ME-extraction configuration Python file. This `ME-extraction' (ME = Monitoring Element) is the first step of the two-step harvesting. """ self.logger.debug("Writing ME-extraction configuration for `%s'..." % \ dataset_name) # Create Python configuration. config_contents = self.create_me_extraction_config(dataset_name) # Write configuration to file. config_file_name = self. \ create_me_summary_config_file_name(dataset_name) try: config_file = file(config_file_name, "w") config_file.write(config_contents) config_file.close() except IOError: self.logger.fatal("Could not write " \ "ME-extraction configuration to file `%s'" % \ config_file_name) raise Error("ERROR: Could not write to file `%s'!" % \ config_file_name) # End of write_me_extraction_config. ########## def ref_hist_mappings_needed(self, dataset_name=None): """Check if we need to load and check the reference mappings. For data the reference histograms should be taken automatically from the GlobalTag, so we don't need any mappings. For RelVals we need to know a mapping to be used in the es_prefer code snippet (different references for each of the datasets.) WARNING: This implementation is a bit convoluted. """ # If no dataset name given, do everything, otherwise check # only this one dataset. if not dataset_name is None: data_type = self.datasets_information[dataset_name] \ ["datatype"] mappings_needed = (data_type == "mc") # DEBUG DEBUG DEBUG if not mappings_needed: assert data_type == "data" # DEBUG DEBUG DEBUG end else: tmp = [self.ref_hist_mappings_needed(dataset_name) \ for dataset_name in \ self.datasets_information.keys()] mappings_needed = (True in tmp) # End of ref_hist_mappings_needed. return mappings_needed ########## def load_ref_hist_mappings(self): """Load the reference histogram mappings from file. The dataset name to reference histogram name mappings are read from a text file specified in self.ref_hist_mappings_file_name. """ # DEBUG DEBUG DEBUG assert len(self.ref_hist_mappings) < 1, \ "ERROR Should not be RE-loading " \ "reference histogram mappings!" # DEBUG DEBUG DEBUG end self.logger.info("Loading reference histogram mappings " \ "from file `%s'" % \ self.ref_hist_mappings_file_name) mappings_lines = None try: mappings_file = file(self.ref_hist_mappings_file_name, "r") mappings_lines = mappings_file.readlines() mappings_file.close() except IOError: msg = "ERROR: Could not open reference histogram mapping "\ "file `%s'" % self.ref_hist_mappings_file_name self.logger.fatal(msg) raise Error(msg) ########## # The format we expect is: two white-space separated pieces # per line. The first the dataset name for which the reference # should be used, the second one the name of the reference # histogram in the database. for mapping in mappings_lines: # Skip comment lines. if not mapping.startswith("#"): mapping = mapping.strip() if len(mapping) > 0: mapping_pieces = mapping.split() if len(mapping_pieces) != 2: msg = "ERROR: The reference histogram mapping " \ "file contains a line I don't " \ "understand:\n %s" % mapping self.logger.fatal(msg) raise Error(msg) dataset_name = mapping_pieces[0].strip() ref_hist_name = mapping_pieces[1].strip() # We don't want people to accidentally specify # multiple mappings for the same dataset. Just # don't accept those cases. if dataset_name in self.ref_hist_mappings: msg = "ERROR: The reference histogram mapping " \ "file contains multiple mappings for " \ "dataset `%s'." self.logger.fatal(msg) raise Error(msg) # All is well that ends well. self.ref_hist_mappings[dataset_name] = ref_hist_name ########## self.logger.info(" Successfully loaded %d mapping(s)" % \ len(self.ref_hist_mappings)) max_len = max([len(i) for i in self.ref_hist_mappings.keys()]) for (map_from, map_to) in six.iteritems(self.ref_hist_mappings): self.logger.info(" %-*s -> %s" % \ (max_len, map_from, map_to)) # End of load_ref_hist_mappings. ########## def check_ref_hist_mappings(self): """Make sure all necessary reference histograms exist. Check that for each of the datasets to be processed a reference histogram is specified and that that histogram exists in the database. NOTE: There's a little complication here. Since this whole thing was designed to allow (in principle) harvesting of both data and MC datasets in one go, we need to be careful to check the availability fof reference mappings only for those datasets that need it. """ self.logger.info("Checking reference histogram mappings") for dataset_name in self.datasets_to_use: try: ref_hist_name = self.ref_hist_mappings[dataset_name] except KeyError: msg = "ERROR: No reference histogram mapping found " \ "for dataset `%s'" % \ dataset_name self.logger.fatal(msg) raise Error(msg) if not self.check_ref_hist_tag(ref_hist_name): msg = "Reference histogram tag `%s' " \ "(used for dataset `%s') does not exist!" % \ (ref_hist_name, dataset_name) self.logger.fatal(msg) raise Usage(msg) self.logger.info(" Done checking reference histogram mappings.") # End of check_ref_hist_mappings. ########## def build_datasets_information(self): """Obtain all information on the datasets that we need to run. Use DBS to figure out all required information on our datasets, like the run numbers and the GlobalTag. All information is stored in the datasets_information member variable. """ # Get a list of runs in the dataset. # NOTE: The harvesting has to be done run-by-run, so we # split up datasets based on the run numbers. Strictly # speaking this is not (yet?) necessary for Monte Carlo # since all those samples use run number 1. Still, this # general approach should work for all samples. # Now loop over all datasets in the list and process them. # NOTE: This processing has been split into several loops # to be easier to follow, sacrificing a bit of efficiency. self.datasets_information = {} self.logger.info("Collecting information for all datasets to process") dataset_names = sorted(self.datasets_to_use.keys()) for dataset_name in dataset_names: # Tell the user which dataset: nice with many datasets. sep_line = "-" * 30 self.logger.info(sep_line) self.logger.info(" `%s'" % dataset_name) self.logger.info(sep_line) runs = self.dbs_resolve_runs(dataset_name) self.logger.info(" found %d run(s)" % len(runs)) if len(runs) > 0: self.logger.debug(" run number(s): %s" % \ ", ".join([str(i) for i in runs])) else: # DEBUG DEBUG DEBUG # This should never happen after the DBS checks. self.logger.warning(" --> skipping dataset " "without any runs") assert False, "Panic: found a dataset without runs " \ "after DBS checks!" # DEBUG DEBUG DEBUG end cmssw_version = self.dbs_resolve_cmssw_version(dataset_name) self.logger.info(" found CMSSW version `%s'" % cmssw_version) # Figure out if this is data or MC. datatype = self.dbs_resolve_datatype(dataset_name) self.logger.info(" sample is data or MC? --> %s" % \ datatype) ### # Try and figure out the GlobalTag to be used. if self.globaltag is None: globaltag = self.dbs_resolve_globaltag(dataset_name) else: globaltag = self.globaltag self.logger.info(" found GlobalTag `%s'" % globaltag) # DEBUG DEBUG DEBUG if globaltag == "": # Actually we should not even reach this point, after # our dataset sanity checks. assert datatype == "data", \ "ERROR Empty GlobalTag for MC dataset!!!" # DEBUG DEBUG DEBUG end ### # DEBUG DEBUG DEBUG #tmp = self.dbs_check_dataset_spread_old(dataset_name) # DEBUG DEBUG DEBUG end sites_catalog = self.dbs_check_dataset_spread(dataset_name) # Extract the total event counts. num_events = {} for run_number in sites_catalog.keys(): num_events[run_number] = sites_catalog \ [run_number]["all_sites"] del sites_catalog[run_number]["all_sites"] # Extract the information about whether or not datasets # are mirrored. mirror_catalog = {} for run_number in sites_catalog.keys(): mirror_catalog[run_number] = sites_catalog \ [run_number]["mirrored"] del sites_catalog[run_number]["mirrored"] # BUG BUG BUG # I think I could now get rid of that and just fill the # "sites" entry with the `inverse' of this # num_events_catalog(?). #num_sites = self.dbs_resolve_dataset_number_of_sites(dataset_name) #sites_catalog = self.dbs_check_dataset_spread(dataset_name) #sites_catalog = dict(zip(num_events_catalog.keys(), # [[j for i in num_events_catalog.values() for j in i.keys()]])) # BUG BUG BUG end ## # DEBUG DEBUG DEBUG ## # This is probably only useful to make sure we don't muck ## # things up, right? ## # Figure out across how many sites this sample has been spread. ## if num_sites == 1: ## self.logger.info(" sample is contained at a single site") ## else: ## self.logger.info(" sample is spread across %d sites" % \ ## num_sites) ## if num_sites < 1: ## # NOTE: This _should not_ happen with any valid dataset. ## self.logger.warning(" --> skipping dataset which is not " \ ## "hosted anywhere") ## # DEBUG DEBUG DEBUG end # Now put everything in a place where we can find it again # if we need it. self.datasets_information[dataset_name] = {} self.datasets_information[dataset_name]["runs"] = runs self.datasets_information[dataset_name]["cmssw_version"] = \ cmssw_version self.datasets_information[dataset_name]["globaltag"] = globaltag self.datasets_information[dataset_name]["datatype"] = datatype self.datasets_information[dataset_name]["num_events"] = num_events self.datasets_information[dataset_name]["mirrored"] = mirror_catalog self.datasets_information[dataset_name]["sites"] = sites_catalog # Each run of each dataset has a different CASTOR output # path. castor_path_common = self.create_castor_path_name_common(dataset_name) self.logger.info(" output will go into `%s'" % \ castor_path_common) castor_paths = dict(list(zip(runs, [self.create_castor_path_name_special(dataset_name, i, castor_path_common) \ for i in runs]))) for path_name in castor_paths.values(): self.logger.debug(" %s" % path_name) self.datasets_information[dataset_name]["castor_path"] = \ castor_paths # End of build_datasets_information. ########## def show_exit_message(self): """Tell the user what to do now, after this part is done. This should provide the user with some (preferably copy-pasteable) instructions on what to do now with the setups and files that have been created. """ # TODO TODO TODO # This could be improved a bit. # TODO TODO TODO end sep_line = "-" * 60 self.logger.info("") self.logger.info(sep_line) self.logger.info(" Configuration files have been created.") self.logger.info(" From here on please follow the usual CRAB instructions.") self.logger.info(" Quick copy-paste instructions are shown below.") self.logger.info(sep_line) self.logger.info("") self.logger.info(" Create all CRAB jobs:") self.logger.info(" multicrab -create") self.logger.info("") self.logger.info(" Submit all CRAB jobs:") self.logger.info(" multicrab -submit") self.logger.info("") self.logger.info(" Check CRAB status:") self.logger.info(" multicrab -status") self.logger.info("") self.logger.info("") self.logger.info(" For more information please see the CMS Twiki:") self.logger.info(" %s" % twiki_url) self.logger.info(sep_line) # If there were any jobs for which we could not find a # matching site show a warning message about that. if not self.all_sites_found: self.logger.warning(" For some of the jobs no matching " \ "site could be found") self.logger.warning(" --> please scan your multicrab.cfg" \ "for occurrences of `%s'." % \ self.no_matching_site_found_str) self.logger.warning(" You will have to fix those " \ "by hand, sorry.") # End of show_exit_message. ########## def run(self): "Main entry point of the CMS harvester." # Start with a positive thought. exit_code = 0 try: try: # Parse all command line options and arguments self.parse_cmd_line_options() # and check that they make sense. self.check_input_status() # Check if CMSSW is setup. self.check_cmssw() # Check if DBS is setup, self.check_dbs() # and if all is fine setup the Python side. self.setup_dbs() # Fill our dictionary with all the required info we # need to understand harvesting jobs. This needs to be # done after the CMSSW version is known. self.setup_harvesting_info() # Obtain list of dataset names to consider self.build_dataset_use_list() # and the list of dataset names to ignore. self.build_dataset_ignore_list() # The same for the runs lists (if specified). self.build_runs_use_list() self.build_runs_ignore_list() # Process the list of datasets to ignore and fold that # into the list of datasets to consider. # NOTE: The run-based selection is done later since # right now we don't know yet which runs a dataset # contains. self.process_dataset_ignore_list() # Obtain all required information on the datasets, # like run numbers and GlobalTags. self.build_datasets_information() if self.use_ref_hists and \ self.ref_hist_mappings_needed(): # Load the dataset name to reference histogram # name mappings from file. self.load_ref_hist_mappings() # Now make sure that for all datasets we want to # process there is a reference defined. Otherwise # just bomb out before wasting any more time. self.check_ref_hist_mappings() else: self.logger.info("No need to load reference " \ "histogram mappings file") # OBSOLETE OBSOLETE OBSOLETE ## # TODO TODO TODO ## # Need to think about where this should go, but ## # somewhere we have to move over the fact that we want ## # to process all runs for each dataset that we're ## # considering. This basically means copying over the ## # information from self.datasets_information[]["runs"] ## # to self.datasets_to_use[]. ## for dataset_name in self.datasets_to_use.keys(): ## self.datasets_to_use[dataset_name] = self.datasets_information[dataset_name]["runs"] ## # TODO TODO TODO end # OBSOLETE OBSOLETE OBSOLETE end self.process_runs_use_and_ignore_lists() # If we've been asked to sacrifice some parts of # spread-out samples in order to be able to partially # harvest them, we'll do that here. if self.harvesting_mode == "single-step-allow-partial": self.singlify_datasets() # Check dataset name(s) self.check_dataset_list() # and see if there is anything left to do. if len(self.datasets_to_use) < 1: self.logger.info("After all checks etc. " \ "there are no datasets (left?) " \ "to process") else: self.logger.info("After all checks etc. we are left " \ "with %d dataset(s) to process " \ "for a total of %d runs" % \ (len(self.datasets_to_use), sum([len(i) for i in \ self.datasets_to_use.values()]))) # NOTE: The order in which things are done here is # important. At the end of the job, independent on # how it ends (exception, CTRL-C, normal end) the # book keeping is written to file. At that time it # should be clear which jobs are done and can be # submitted. This means we first create the # general files, and then the per-job config # files. # TODO TODO TODO # It would be good to modify the book keeping a # bit. Now we write the crab.cfg (which is the # same for all samples and runs) and the # multicrab.cfg (which contains blocks for all # runs of all samples) without updating our book # keeping. The only place we update the book # keeping is after writing the harvesting config # file for a given dataset. Since there is only # one single harvesting configuration for each # dataset, we have no book keeping information on # a per-run basis. # TODO TODO TODO end # Check if the CASTOR output area exists. If # necessary create it. self.create_and_check_castor_dirs() # Create one crab and one multicrab configuration # for all jobs together. self.write_crab_config() self.write_multicrab_config() # Loop over all datasets and create harvesting # config files for all of them. One harvesting # config per dataset is enough. The same file will # be re-used by CRAB for each run. # NOTE: We always need a harvesting # configuration. For the two-step harvesting we # also need a configuration file for the first # step: the monitoring element extraction. for dataset_name in self.datasets_to_use.keys(): try: self.write_harvesting_config(dataset_name) if self.harvesting_mode == "two-step": self.write_me_extraction_config(dataset_name) except: # Doh! Just re-raise the damn thing. raise else: ## tmp = self.datasets_information[dataset_name] \ ## ["num_events"] tmp = {} for run_number in self.datasets_to_use[dataset_name]: tmp[run_number] = self.datasets_information \ [dataset_name]["num_events"][run_number] if dataset_name in self.book_keeping_information: self.book_keeping_information[dataset_name].update(tmp) else: self.book_keeping_information[dataset_name] = tmp # Explain the user what to do now. self.show_exit_message() except Usage as err: # self.logger.fatal(err.msg) # self.option_parser.print_help() pass except Error as err: # self.logger.fatal(err.msg) exit_code = 1 except Exception as err: # Hmmm, ignore keyboard interrupts from the # user. These are not a `serious problem'. We also # skip SystemExit, which is the exception thrown when # one calls sys.exit(). This, for example, is done by # the option parser after calling print_help(). We # also have to catch all `no such option' # complaints. Everything else we catch here is a # `serious problem'. if isinstance(err, SystemExit): self.logger.fatal(err.code) elif not isinstance(err, KeyboardInterrupt): self.logger.fatal("!" * 50) self.logger.fatal(" This looks like a serious problem.") self.logger.fatal(" If you are sure you followed all " \ "instructions") self.logger.fatal(" please copy the below stack trace together") self.logger.fatal(" with a description of what you were doing to") self.logger.fatal(" [email protected].") self.logger.fatal(" %s" % self.ident_string()) self.logger.fatal("!" * 50) self.logger.fatal(str(err)) import traceback traceback_string = traceback.format_exc() for line in traceback_string.split("\n"): self.logger.fatal(line) self.logger.fatal("!" * 50) exit_code = 2 # This is the stuff that we should really do, no matter # what. Of course cleaning up after ourselves is also done # from this place. This alsokeeps track of the book keeping # so far. (This means that if half of the configuration files # were created before e.g. the disk was full, we should still # have a consistent book keeping file. finally: self.cleanup() ### if self.crab_submission == True: os.system("multicrab -create") os.system("multicrab -submit") # End of run. return exit_code # End of CMSHarvester. ########################################################################### ## Main entry point. ########################################################################### if __name__ == "__main__": "Main entry point for harvesting." CMSHarvester().run() # Done. ###########################################################################
[]
[]
[ "SCRAM_ARCH", "DBSCMD_HOME", "CMSSW_VERSION" ]
[]
["SCRAM_ARCH", "DBSCMD_HOME", "CMSSW_VERSION"]
python
3
0
enterprise/dev/ci/ci/pipeline-steps.go
package ci import ( "fmt" "os" "path/filepath" "strings" bk "github.com/sourcegraph/sourcegraph/internal/buildkite" ) // Verifies the docs formatting and builds the `docsite` command. func addDocs(pipeline *bk.Pipeline) { pipeline.AddStep(":memo: Check and build docsite", bk.Cmd("./dev/ci/yarn-run.sh prettier-check"), bk.Cmd("./dev/check/docsite.sh")) } // Adds the static check test step. func addCheck(pipeline *bk.Pipeline) { pipeline.AddStep(":white_check_mark: Misc Linters", bk.Cmd("./dev/check/all.sh")) } // Adds the lint test step. func addLint(pipeline *bk.Pipeline) { // If we run all lints together it is our slow step (5m). So we split it // into two and try balance the runtime. yarn is a fixed cost so we always // pay it on a step. Aim for around 3m. // // Random sample of timings: // // - yarn 41s // - eslint 137s // - build-ts 60s // - prettier 29s // - stylelint 7s // - graphql-lint 1s pipeline.AddStep(":eslint: Lint all Typescript", bk.Cmd("dev/ci/yarn-run.sh build-ts all:eslint")) // eslint depends on build-ts pipeline.AddStep(":lipstick: :lint-roller: :stylelint: :graphql:", // TODO: Add header - Similar to the previous step bk.Cmd("dev/ci/yarn-run.sh prettier-check all:stylelint graphql-lint all:tsgql")) } // Adds steps for the OSS and Enterprise web app builds. Runs the web app tests. func addWebApp(pipeline *bk.Pipeline) { // Webapp build pipeline.AddStep(":webpack::globe_with_meridians: Build", bk.Cmd("dev/ci/yarn-build.sh client/web"), bk.Env("NODE_ENV", "production"), bk.Env("ENTERPRISE", "0")) // Webapp enterprise build pipeline.AddStep(":webpack::globe_with_meridians::moneybag: Enterprise build", bk.Cmd("dev/ci/yarn-build.sh client/web"), bk.Env("NODE_ENV", "production"), bk.Env("ENTERPRISE", "1")) // Webapp tests pipeline.AddStep(":jest::globe_with_meridians: Test", bk.Cmd("dev/ci/yarn-test.sh client/web"), bk.Cmd("bash <(curl -s https://codecov.io/bash) -c -F typescript -F unit")) } // Builds and tests the browser extension. func addBrowserExt(pipeline *bk.Pipeline) { // Browser extension build pipeline.AddStep(":webpack::chrome: Build browser extension", bk.Cmd("dev/ci/yarn-build.sh client/browser")) // Browser extension tests pipeline.AddStep(":jest::chrome: Test browser extension", bk.Cmd("dev/ci/yarn-test.sh client/browser"), bk.Cmd("bash <(curl -s https://codecov.io/bash) -c -F typescript -F unit")) } // Adds the shared frontend tests (shared between the web app and browser extension). func addSharedTests(c Config) func(pipeline *bk.Pipeline) { return func(pipeline *bk.Pipeline) { // Client integration tests pipeline.AddStep(":puppeteer::electric_plug: Puppeteer tests", bk.Env("PUPPETEER_SKIP_CHROMIUM_DOWNLOAD", ""), bk.Env("ENTERPRISE", "1"), bk.Env("PERCY_ON", "true"), bk.Cmd("COVERAGE_INSTRUMENT=true dev/ci/yarn-run.sh build-web"), bk.Cmd("yarn percy exec -- yarn run cover-integration"), bk.Cmd("yarn nyc report -r json"), bk.Cmd("bash <(curl -s https://codecov.io/bash) -c -F typescript -F integration"), bk.ArtifactPaths("./puppeteer/*.png")) // Upload storybook to Chromatic chromaticCommand := "yarn chromatic --exit-zero-on-changes --exit-once-uploaded" if !c.isPR() { chromaticCommand += " --auto-accept-changes" } pipeline.AddStep(":chromatic: Upload storybook to Chromatic", bk.AutomaticRetry(5), bk.Cmd("yarn --mutex network --frozen-lockfile --network-timeout 60000"), bk.Cmd("yarn gulp generate"), bk.Cmd(chromaticCommand)) // Shared tests pipeline.AddStep(":jest: Test shared client code", bk.Cmd("dev/ci/yarn-test.sh client/shared"), bk.Cmd("bash <(curl -s https://codecov.io/bash) -c -F typescript -F unit")) } } // Adds PostgreSQL backcompat tests. func addPostgresBackcompat(pipeline *bk.Pipeline) { // TODO: We do not test Postgres DB backcompat anymore. } // Adds the Go test step. func addGoTests(pipeline *bk.Pipeline) { pipeline.AddStep(":go: Test", bk.Cmd("./dev/ci/go-test.sh"), bk.Cmd("bash <(curl -s https://codecov.io/bash) -c -F go")) } // Builds the OSS and Enterprise Go commands. func addGoBuild(pipeline *bk.Pipeline) { pipeline.AddStep(":go: Build", bk.Cmd("./dev/ci/go-build.sh"), ) } // Lints the Dockerfiles. func addDockerfileLint(pipeline *bk.Pipeline) { pipeline.AddStep(":docker: Lint", bk.Cmd("./dev/ci/docker-lint.sh")) } // Adds backend integration tests step. func addBackendIntegrationTests(c Config) func(*bk.Pipeline) { return func(pipeline *bk.Pipeline) { if !c.isMasterDryRun && c.branch != "master" && c.branch != "main" { return } pipeline.AddStep(":chains: Backend integration tests", bk.Cmd("pushd enterprise"), bk.Cmd("./cmd/server/pre-build.sh"), bk.Cmd("./cmd/server/build.sh"), bk.Cmd("popd"), bk.Cmd("./dev/ci/backend-integration.sh"), bk.Cmd(`docker image rm -f "$IMAGE"`), ) } } func addBrowserExtensionE2ESteps(pipeline *bk.Pipeline) { for _, browser := range []string{"chrome"} { // Run e2e tests pipeline.AddStep(fmt.Sprintf(":%s: E2E for %s extension", browser, browser), bk.Env("PUPPETEER_SKIP_CHROMIUM_DOWNLOAD", ""), bk.Env("EXTENSION_PERMISSIONS_ALL_URLS", "true"), bk.Env("BROWSER", browser), bk.Env("LOG_BROWSER_CONSOLE", "true"), bk.Env("SOURCEGRAPH_BASE_URL", "https://sourcegraph.com"), bk.Cmd("yarn --frozen-lockfile --network-timeout 60000"), bk.Cmd("pushd client/browser"), bk.Cmd("yarn -s run build"), bk.Cmd("yarn -s mocha ./src/end-to-end/github.test.ts ./src/end-to-end/gitlab.test.ts"), bk.Cmd("popd"), bk.ArtifactPaths("./puppeteer/*.png")) } } // Release the browser extension. func addBrowserExtensionReleaseSteps(pipeline *bk.Pipeline) { addBrowserExtensionE2ESteps(pipeline) pipeline.AddWait() // Release to the Chrome Webstore pipeline.AddStep(":rocket::chrome: Extension release", bk.Cmd("yarn --frozen-lockfile --network-timeout 60000"), bk.Cmd("pushd client/browser"), bk.Cmd("yarn -s run build"), bk.Cmd("yarn release:chrome"), bk.Cmd("popd")) // Build and self sign the FF add-on and upload it to a storage bucket pipeline.AddStep(":rocket::firefox: Extension release", bk.Cmd("yarn --frozen-lockfile --network-timeout 60000"), bk.Cmd("pushd client/browser"), bk.Cmd("yarn release:ff"), bk.Cmd("popd")) // Release to npm pipeline.AddStep(":rocket::npm: NPM Release", bk.Cmd("yarn --frozen-lockfile --network-timeout 60000"), bk.Cmd("pushd client/browser"), bk.Cmd("yarn -s run build"), bk.Cmd("yarn release:npm"), bk.Cmd("popd")) } // Adds a Buildkite pipeline "Wait". func wait(pipeline *bk.Pipeline) { pipeline.AddWait() } // Trigger the async pipeline to run. func triggerAsync(c Config) func(*bk.Pipeline) { env := copyEnv( "BUILDKITE_PULL_REQUEST", "BUILDKITE_PULL_REQUEST_BASE_BRANCH", "BUILDKITE_PULL_REQUEST_REPO", ) return func(pipeline *bk.Pipeline) { pipeline.AddTrigger(":snail: Trigger Async", bk.Trigger("sourcegraph-async"), bk.Async(true), bk.Build(bk.BuildOptions{ Message: os.Getenv("BUILDKITE_MESSAGE"), Commit: c.commit, Branch: c.branch, Env: env, }), ) } } func triggerE2EandQA(c Config, commonEnv map[string]string) func(*bk.Pipeline) { var async bool if c.branch == "main" { async = true } else { async = false } env := copyEnv( "BUILDKITE_PULL_REQUEST", "BUILDKITE_PULL_REQUEST_BASE_BRANCH", "BUILDKITE_PULL_REQUEST_REPO", ) env["COMMIT_SHA"] = commonEnv["COMMIT_SHA"] env["DATE"] = commonEnv["DATE"] env["VERSION"] = commonEnv["VERSION"] env["CI_DEBUG_PROFILE"] = commonEnv["CI_DEBUG_PROFILE"] // Set variables that indicate the tag for 'us.gcr.io/sourcegraph-dev' images built // from this CI run's commit, and credentials to access them. env["CANDIDATE_VERSION"] = candidateImageTag(c) env["VAGRANT_SERVICE_ACCOUNT"] = "[email protected]" // Test upgrades from mininum upgradeable Sourcegraph version env["MINIMUM_UPGRADEABLE_VERSION"] = "3.20.0" env["DOCKER_IMAGES_TXT"] = strings.Join(SourcegraphDockerImages, "\n") return func(pipeline *bk.Pipeline) { if !c.shouldRunE2EandQA() { return } pipeline.AddTrigger(":chromium: Trigger E2E", bk.Trigger("sourcegraph-e2e"), bk.Async(async), bk.Build(bk.BuildOptions{ Message: os.Getenv("BUILDKITE_MESSAGE"), Commit: c.commit, Branch: c.branch, Env: env, }), ) pipeline.AddTrigger(":chromium: Trigger QA", bk.Trigger("qa"), bk.Async(async), bk.Build(bk.BuildOptions{ Message: os.Getenv("BUILDKITE_MESSAGE"), Commit: c.commit, Branch: c.branch, Env: env, }), ) } } func copyEnv(keys ...string) map[string]string { m := map[string]string{} for _, k := range keys { if v, ok := os.LookupEnv(k); ok { m[k] = v } } return m } // Build all relevant Docker images for Sourcegraph (for example, candidates and final // images), given the current CI case (e.g., "tagged release", "release branch", // "master branch", etc.) // // Notes: // // - Publishing of `insiders` implies deployment // - See `images.go` for more details on what images get built and where they get published func addDockerImages(c Config, final bool) func(*bk.Pipeline) { addDockerImage := func(c Config, app string, insiders bool) func(*bk.Pipeline) { if !final { return addCandidateDockerImage(c, app) } return addFinalDockerImage(c, app, insiders) } return func(pipeline *bk.Pipeline) { switch { // build all images for tagged releases case c.taggedRelease: for _, dockerImage := range SourcegraphDockerImages { addDockerImage(c, dockerImage, false)(pipeline) } // replicates `main` build but does not deploy `insiders` images case c.isMasterDryRun: for _, dockerImage := range SourcegraphDockerImages { addDockerImage(c, dockerImage, false)(pipeline) } // deploy `insiders` images for `main` case c.branch == "main": for _, dockerImage := range SourcegraphDockerImages { addDockerImage(c, dockerImage, true)(pipeline) } // ensure candidate images are available for testing case c.shouldRunE2EandQA(): for _, dockerImage := range SourcegraphDockerImages { addDockerImage(c, dockerImage, false)(pipeline) } // only build candidate image for the specified image in the branch name // see https://about.sourcegraph.com/handbook/engineering/deployments/testing#building-docker-images-for-a-specific-branch case strings.HasPrefix(c.branch, "docker-images-patch/"): addDockerImage(c, c.branch[20:], false)(pipeline) } } } // Build a candidate docker image that will re-tagged with the final // tags once the e2e tests pass. func addCandidateDockerImage(c Config, app string) func(*bk.Pipeline) { return func(pipeline *bk.Pipeline) { image := strings.ReplaceAll(app, "/", "-") localImage := "sourcegraph/" + image + ":" + c.version cmds := []bk.StepOpt{ bk.Cmd(fmt.Sprintf(`echo "Building candidate %s image..."`, app)), bk.Env("DOCKER_BUILDKIT", "1"), bk.Env("IMAGE", localImage), bk.Env("VERSION", c.version), bk.Cmd("yes | gcloud auth configure-docker"), } if _, err := os.Stat(filepath.Join("docker-images", app)); err == nil { // Building Docker image located under $REPO_ROOT/docker-images/ cmds = append(cmds, bk.Cmd(filepath.Join("docker-images", app, "build.sh"))) } else { // Building Docker images located under 4REPO_ROOT/cmd/ cmdDir := func() string { if _, err := os.Stat(filepath.Join("enterprise/cmd", app)); err != nil { fmt.Fprintf(os.Stderr, "github.com/sourcegraph/sourcegraph/enterprise/cmd/%s does not exist so building github.com/sourcegraph/sourcegraph/cmd/%s instead\n", app, app) return "cmd/" + app } return "enterprise/cmd/" + app }() preBuildScript := cmdDir + "/pre-build.sh" if _, err := os.Stat(preBuildScript); err == nil { cmds = append(cmds, bk.Cmd(preBuildScript)) } cmds = append(cmds, bk.Cmd(cmdDir+"/build.sh")) } devImage := fmt.Sprintf("%s/%s", SourcegraphDockerDevRegistry, image) devTag := candidateImageTag(c) cmds = append(cmds, // Retag the local image for dev registry bk.Cmd(fmt.Sprintf("docker tag %s %s:%s", localImage, devImage, devTag)), // Publish tagged image bk.Cmd(fmt.Sprintf("docker push %s:%s", devImage, devTag)), ) pipeline.AddStep(fmt.Sprintf(":docker: :construction: %s", app), cmds...) } } // Tag and push final Docker image for the service defined by `app` // after the e2e tests pass. func addFinalDockerImage(c Config, app string, insiders bool) func(*bk.Pipeline) { return func(pipeline *bk.Pipeline) { image := strings.ReplaceAll(app, "/", "-") devImage := fmt.Sprintf("%s/%s", SourcegraphDockerDevRegistry, image) publishImage := fmt.Sprintf("%s/%s", SourcegraphDockerPublishRegistry, image) var images []string for _, image := range []string{publishImage, devImage} { if app != "server" || c.taggedRelease || c.patch || c.patchNoTest { images = append(images, fmt.Sprintf("%s:%s", image, c.version)) } if app == "server" && c.releaseBranch { images = append(images, fmt.Sprintf("%s:%s-insiders", image, c.branch)) } if insiders { images = append(images, fmt.Sprintf("%s:insiders", image)) } } candidateImage := fmt.Sprintf("%s:%s", devImage, candidateImageTag(c)) cmd := fmt.Sprintf("./dev/ci/docker-publish.sh %s %s", candidateImage, strings.Join(images, " ")) pipeline.AddStep(fmt.Sprintf(":docker: :white_check_mark: %s", app), bk.Cmd(cmd)) } }
[ "\"BUILDKITE_MESSAGE\"", "\"BUILDKITE_MESSAGE\"", "\"BUILDKITE_MESSAGE\"" ]
[]
[ "BUILDKITE_MESSAGE" ]
[]
["BUILDKITE_MESSAGE"]
go
1
0
examples/high-level/message_event.py
# Example of sending and receiving an event after pressing the Callback button # Documentation: https://vk.com/dev/bots_docs_5?f=4.4.%20Callback-кнопки import logging import os from vkbottle import Callback, GroupEventType, Keyboard from vkbottle.bot import Bot, Message, MessageEvent, rules bot = Bot(os.environ["TOKEN"]) logging.basicConfig(level=logging.INFO) KEYBOARD = ( Keyboard(one_time=False, inline=True) .add(Callback("Показать текст", payload={"cmd": "snackbar"})) .row() .add(Callback("Дата регистрации (tool42)", payload={"cmd": "app"})) .row() .add(Callback("Закрыть", payload={"cmd": "close"})) .get_json() ) @bot.on.private_message(text="/callback") async def send_callback_button(message: Message): await message.answer("Лови!", keyboard=KEYBOARD) @bot.on.raw_event( rules.PayloadRule({"cmd": "snackbar"}), event=GroupEventType.MESSAGE_EVENT, dataclass=MessageEvent, ) async def show_snackbar(event: MessageEvent): await event.show_snackbar("Сейчас я исчезну") @bot.on.raw_event( rules.PayloadRule({"cmd": "app"}), event=GroupEventType.MESSAGE_EVENT, dataclass=MessageEvent, ) async def open_app(event: MessageEvent): await event.open_app(6798836, "reg", event.user_id) @bot.on.raw_event( rules.PayloadRule({"cmd": "close"}), event=GroupEventType.MESSAGE_EVENT, dataclass=MessageEvent, ) async def edit_message(event: MessageEvent): await event.edit_message("Окей") bot.run_forever()
[]
[]
[ "TOKEN" ]
[]
["TOKEN"]
python
1
0
backend/relimas_29009/wsgi.py
""" WSGI config for relimas_29009 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "relimas_29009.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
client/src/main/java/me/retrodaredevil/solarthing/config/options/AnalyticsOption.java
package me.retrodaredevil.solarthing.config.options; public interface AnalyticsOption extends ProgramOptions { boolean isAnalyticsOptionEnabled(); default boolean isAnalyticsEnabled() { return isAnalyticsOptionEnabled() && System.getenv("ANALYTICS_DISABLED") == null; } boolean DEFAULT_IS_ANALYTICS_ENABLED = true; String PROPERTY_NAME = "analytics_enabled"; }
[ "\"ANALYTICS_DISABLED\"" ]
[]
[ "ANALYTICS_DISABLED" ]
[]
["ANALYTICS_DISABLED"]
java
1
0
lib_sources/owl/scripts/util.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import os.path as path import subprocess import sys import owlpy.defaults as owl_defaults import owlpy.formula as owl_formula import owlpy.tool as owl_tool def _test(args): if len(args) > 2: print("Usage: util.py test <test names> <dataset>?") sys.exit(1) database = owl_defaults.get_test_path() test_names = args[0].split(";") test_set_override = None if len(args) is 2: test_set_override = args[1] test_config = owl_defaults.load_json(database) for test_name in test_names: if test_name not in test_config["tests"]: raise KeyError("Unknown test case {0!s}".format(test_name)) defaults_json = test_config["defaults"] dataset_json = test_config["dataset"] print("Running tests for datasets " + ", ".join(test_names)) print() sys.stdout.flush() for test_name in test_names: test_json = test_config["tests"][test_name] if type(test_json) is str: test_json = {"tools": [test_json]} for key, value in defaults_json.items(): if key not in test_json: test_json[key] = value reference = test_json["reference"] if type(test_json["tools"]) is str: tools = [test_json["tools"]] else: tools = test_json["tools"] test_data_sets = test_json["data"] if type(test_data_sets) is str: if test_data_sets in dataset_json: test_data_sets = dataset_json[test_data_sets] else: test_data_sets = [test_data_sets] test_arguments = [owl_defaults.get_script_path("ltlcross-run.sh"), reference["name"], " ".join(reference["exec"])] loaded_tools = [] def load_tool(tool_name): if type(tool_name) is not str: raise TypeError("Unknown tool type {0!s}".format(type(tool_name))) tool_database = owl_defaults.load_json(owl_defaults.get_tool_path()) return owl_tool.get_tool(tool_database, tool_name) if type(tools) is dict: for name, tool_description in tools.items(): loaded_tools.append((name, load_tool(tool_description))) elif type(tools) is list: for tool_description in tools: tool = load_tool(tool_description) loaded_tools.append(tool) elif type(tools) is str: tool = load_tool(tools) loaded_tools.append(tool) else: raise TypeError("Unknown tools type {0!s}".format(type(tools))) enable_server = True port = 6060 servers = {} for test_tool in loaded_tools: if type(test_tool) is tuple: tool_test_name, loaded_tool = test_tool else: loaded_tool = test_tool tool_test_name = loaded_tool.get_name() if loaded_tool.flags and len(loaded_tools) > 1: tool_test_name = tool_test_name + "#" + ",".join(loaded_tool.flags.keys()) test_arguments.append("-t") test_arguments.append(tool_test_name) if type(loaded_tool) is owl_tool.OwlTool: if enable_server: servers[port] = loaded_tool.get_server_execution(port) test_arguments.append("\"build/bin/owl-client\"" + " localhost " + str(port) + " %f") port += 1 else: test_arguments.append(" ".join(loaded_tool.get_input_execution("%f"))) elif type(loaded_tool) is owl_tool.SpotTool: test_arguments.append(" ".join(loaded_tool.get_input_execution("%f"))) else: raise TypeError("Unknown tool type {0!s}".format(type(loaded_tool))) formulas_json = owl_defaults.load_json(owl_defaults.get_formula_path()) formula_sets = owl_formula.read_formula_sets(formulas_json) test_formula_sets = [] if test_set_override is None: for data_set in test_data_sets: if type(data_set) is dict: if "name" not in data_set: raise KeyError("No dataset name provided") formula_set_name = data_set["name"] formula_set_det = data_set.get("determinize", False) elif type(data_set) is str: formula_set_name = data_set formula_set_det = False else: raise TypeError("Unknown specification format") test_formula_sets.append((formula_set_name, formula_set_det)) else: test_formula_sets.append((test_set_override, False)) for (formula_set_name, formula_set_det) in test_formula_sets: if formula_set_name not in formula_sets: raise KeyError("Unknown formula set {0!s}".format(formula_set_name)) if formula_set_det: test_arguments.append("-d") test_arguments.append(formula_set_name) sub_env = os.environ.copy() sub_env["JAVA_OPTS"] = "-enableassertions -Xss64M" server_processes = {} if servers: print("Servers:") for server in servers.values(): print(" ".join(server)) print() print() sys.stdout.flush() for port, server in servers.items(): server_process = subprocess.Popen(server, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=None, env=sub_env) server_processes[port] = server_process import socket import time from contextlib import closing for port, process in server_processes.items(): while True: with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: if sock.connect_ex(('localhost', port)) == 0: break if process.poll() is not None: sys.exit(1) time.sleep(0.25) process = subprocess.run(test_arguments, env=sub_env) for server_process in server_processes.values(): server_process.terminate() for server_process in server_processes.values(): try: server_process.wait(timeout=10) except subprocess.TimeoutExpired: server_process.kill() time.sleep(0.5) if process.returncode: sys.exit(process.returncode) sys.exit(0) def _formula(args): if len(args) > 1 and path.exists(args[0]): database = args[0] sets = args[1:] else: database = owl_defaults.get_formula_path() sets = args formulas = owl_formula.read_formula_sets(owl_defaults.load_json(database)) for set_name in sets: if set_name not in formulas: raise KeyError("Unknown set {0!s}".format(set_name)) for set_name in sets: for formula in formulas[set_name]: print(formula) sys.exit(0) def _tool(args): if len(args) > 2: print("Usage: util.py tool <database>? <tool name>") sys.exit(1) if len(args) == 2: database = args[0] tool_name = args[1] else: database = owl_defaults.get_tool_path() tool_name = args[0] for line in owl_tool.get_tool(owl_defaults.load_json(database), tool_name).get_execution(): print(line) sys.exit(0) def _benchmark(args): if len(args) > 1: print("Usage: util.py bench <benchmark name>") database = owl_defaults.get_benchmark_path() benchmark_name = args[0] benchmarks = owl_defaults.load_json(database) defaults_json = benchmarks["defaults"] dataset_json = benchmarks["dataset"] if benchmark_name not in benchmarks["benchmark"]: raise KeyError("Unknown benchmark {0!s}".format(benchmark_name)) benchmark_json = benchmarks["benchmark"][benchmark_name] if type(benchmark_json) is str: benchmark_json = {"tool": benchmark_json} for key, value in defaults_json.items(): if key not in benchmark_json: benchmark_json[key] = value tool_description = benchmark_json["tool"] data = benchmark_json.get("data") repeat = benchmark_json.get("repeat") update = benchmark_json.get("update") perf = benchmark_json.get("perf", None) if type(data) is str: if data in dataset_json: data = dataset_json[data] else: data = [data] if type(tool_description) is str: tool_database = owl_defaults.load_json(owl_defaults.get_tool_path()) tool = owl_tool.get_tool(tool_database, tool_description) else: raise TypeError("Unknown tool description type {0!s}".format(type(tool_description))) benchmark_script = [owl_defaults.get_script_path("benchmark.sh"), "--stdin", "--repeat", str(repeat)] if update: benchmark_script += ["--update"] if perf: benchmark_script += ["--perf"] elif perf is not None: benchmark_script += ["--time"] benchmark_script += ["--"] + tool.get_file_execution("%F") formulas = owl_formula.read_formula_sets( owl_defaults.load_json(owl_defaults.get_formula_path())) for formula_set in data: if formula_set not in formulas: raise KeyError("Unknown formula set {0!s}".format(formula_set)) benchmark_input = "\n".join(["\n".join(formulas[formula_set]) for formula_set in data]) benchmark = subprocess.run(benchmark_script, input=benchmark_input.encode("utf-8")) sys.exit(benchmark.returncode) if __name__ == "__main__": if len(sys.argv) is 1: print("Usage: util.py <type> <args>") sys.exit(1) task_type = sys.argv[1] try: if task_type == "test": _test(sys.argv[2:]) elif task_type == "formula": _formula(sys.argv[2:]) elif task_type == "tool": _tool(sys.argv[2:]) elif task_type == "bench": _benchmark(sys.argv[2:]) else: print("<type> must be one of test, formula, tool or bench") sys.exit(1) except KeyboardInterrupt: print("Interrupted") sys.exit(1) except Exception as e: print("Error: {0!s}".format(e), file=sys.stderr) raise e sys.exit(1)
[]
[]
[]
[]
[]
python
0
0
config.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_TRACK_MODIFICATIONS = False MAIL_SERVER = os.environ.get('MAIL_SERVER') MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25) MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None MAIL_USERNAME = os.environ.get('MAIL_USERNAME') MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') ADMINS = ['[email protected]'] POSTS_PER_PAGE = 25
[]
[]
[ "MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS" ]
[]
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS"]
python
7
0
openstack/sharedfilesystems/v2/snapshots/results.go
package snapshots import ( "encoding/json" "net/url" "strconv" "time" "github.com/nexclipper/gophercloud" "github.com/nexclipper/gophercloud/pagination" ) const ( invalidMarker = "-1" ) // Snapshot contains all information associated with an OpenStack Snapshot type Snapshot struct { // The UUID of the snapshot ID string `json:"id"` // The name of the snapshot Name string `json:"name,omitempty"` // A description of the snapshot Description string `json:"description,omitempty"` // UUID of the share from which the snapshot was created ShareID string `json:"share_id"` // The shared file system protocol ShareProto string `json:"share_proto"` // Size of the snapshot share in GB ShareSize int `json:"share_size"` // Size of the snapshot in GB Size int `json:"size"` // The snapshot status Status string `json:"status"` // The UUID of the project in which the snapshot was created ProjectID string `json:"project_id"` // Timestamp when the snapshot was created CreatedAt time.Time `json:"-"` // Snapshot links for pagination Links []map[string]string `json:"links"` } func (r *Snapshot) UnmarshalJSON(b []byte) error { type tmp Snapshot var s struct { tmp CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` } err := json.Unmarshal(b, &s) if err != nil { return err } *r = Snapshot(s.tmp) r.CreatedAt = time.Time(s.CreatedAt) return nil } type commonResult struct { gophercloud.Result } // Extract will get the Snapshot object from the commonResult func (r commonResult) Extract() (*Snapshot, error) { var s struct { Snapshot *Snapshot `json:"snapshot"` } err := r.ExtractInto(&s) return s.Snapshot, err } // CreateResult contains the response body and error from a Create request. type CreateResult struct { commonResult } // SnapshotPage is a pagination.pager that is returned from a call to the List function. type SnapshotPage struct { pagination.MarkerPageBase } // NextPageURL generates the URL for the page of results after this one. func (r SnapshotPage) NextPageURL() (string, error) { currentURL := r.URL mark, err := r.Owner.LastMarker() if err != nil { return "", err } if mark == invalidMarker { return "", nil } q := currentURL.Query() q.Set("offset", mark) currentURL.RawQuery = q.Encode() return currentURL.String(), nil } // LastMarker returns the last offset in a ListResult. func (r SnapshotPage) LastMarker() (string, error) { snapshots, err := ExtractSnapshots(r) if err != nil { return invalidMarker, err } if len(snapshots) == 0 { return invalidMarker, nil } u, err := url.Parse(r.URL.String()) if err != nil { return invalidMarker, err } queryParams := u.Query() offset := queryParams.Get("offset") limit := queryParams.Get("limit") // Limit is not present, only one page required if limit == "" { return invalidMarker, nil } iOffset := 0 if offset != "" { iOffset, err = strconv.Atoi(offset) if err != nil { return invalidMarker, err } } iLimit, err := strconv.Atoi(limit) if err != nil { return invalidMarker, err } iOffset = iOffset + iLimit offset = strconv.Itoa(iOffset) return offset, nil } // IsEmpty satisifies the IsEmpty method of the Page interface func (r SnapshotPage) IsEmpty() (bool, error) { snapshots, err := ExtractSnapshots(r) return len(snapshots) == 0, err } // ExtractSnapshots extracts and returns a Snapshot slice. It is used while // iterating over a snapshots.List call. func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { var s struct { Snapshots []Snapshot `json:"snapshots"` } err := (r.(SnapshotPage)).ExtractInto(&s) return s.Snapshots, err } // DeleteResult contains the response body and error from a Delete request. type DeleteResult struct { gophercloud.ErrResult } // GetResult contains the response body and error from a Get request. type GetResult struct { commonResult } // UpdateResult contains the response body and error from an Update request. type UpdateResult struct { commonResult }
[]
[]
[]
[]
[]
go
null
null
null
run.py
import os from app.app import create_app try: config = os.environ['APP_SETTINGS'] # config_name = "development" app = create_app(config) except KeyError: app = create_app('development') if __name__ == '__main__': app.debug = True app.run()
[]
[]
[ "APP_SETTINGS" ]
[]
["APP_SETTINGS"]
python
1
0
nixos/lib/test-driver/test_driver/machine.py
from contextlib import _GeneratorContextManager from pathlib import Path from queue import Queue from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import base64 import io import os import queue import re import shlex import shutil import socket import subprocess import sys import tempfile import threading import time from test_driver.logger import rootlog CHAR_TO_KEY = { "A": "shift-a", "N": "shift-n", "-": "0x0C", "_": "shift-0x0C", "B": "shift-b", "O": "shift-o", "=": "0x0D", "+": "shift-0x0D", "C": "shift-c", "P": "shift-p", "[": "0x1A", "{": "shift-0x1A", "D": "shift-d", "Q": "shift-q", "]": "0x1B", "}": "shift-0x1B", "E": "shift-e", "R": "shift-r", ";": "0x27", ":": "shift-0x27", "F": "shift-f", "S": "shift-s", "'": "0x28", '"': "shift-0x28", "G": "shift-g", "T": "shift-t", "`": "0x29", "~": "shift-0x29", "H": "shift-h", "U": "shift-u", "\\": "0x2B", "|": "shift-0x2B", "I": "shift-i", "V": "shift-v", ",": "0x33", "<": "shift-0x33", "J": "shift-j", "W": "shift-w", ".": "0x34", ">": "shift-0x34", "K": "shift-k", "X": "shift-x", "/": "0x35", "?": "shift-0x35", "L": "shift-l", "Y": "shift-y", " ": "spc", "M": "shift-m", "Z": "shift-z", "\n": "ret", "!": "shift-0x02", "@": "shift-0x03", "#": "shift-0x04", "$": "shift-0x05", "%": "shift-0x06", "^": "shift-0x07", "&": "shift-0x08", "*": "shift-0x09", "(": "shift-0x0A", ")": "shift-0x0B", } def make_command(args: list) -> str: return " ".join(map(shlex.quote, (map(str, args)))) def _perform_ocr_on_screenshot( screenshot_path: str, model_ids: Iterable[int] ) -> List[str]: if shutil.which("tesseract") is None: raise Exception("OCR requested but enableOCR is false") magick_args = ( "-filter Catrom -density 72 -resample 300 " + "-contrast -normalize -despeckle -type grayscale " + "-sharpen 1 -posterize 3 -negate -gamma 100 " + "-blur 1x65535" ) tess_args = f"-c debug_file=/dev/null --psm 11" cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff" ret = subprocess.run(cmd, shell=True, capture_output=True) if ret.returncode != 0: raise Exception(f"TIFF conversion failed with exit code {ret.returncode}") model_results = [] for model_id in model_ids: cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}" ret = subprocess.run(cmd, shell=True, capture_output=True) if ret.returncode != 0: raise Exception(f"OCR failed with exit code {ret.returncode}") model_results.append(ret.stdout.decode("utf-8")) return model_results def retry(fn: Callable, timeout: int = 900) -> None: """Call the given function repeatedly, with 1 second intervals, until it returns True or a timeout is reached. """ for _ in range(timeout): if fn(False): return time.sleep(1) if not fn(True): raise Exception(f"action timed out after {timeout} seconds") class StartCommand: """The Base Start Command knows how to append the necesary runtime qemu options as determined by a particular test driver run. Any such start command is expected to happily receive and append additional qemu args. """ _cmd: str def cmd( self, monitor_socket_path: Path, shell_socket_path: Path, allow_reboot: bool = False, # TODO: unused, legacy? ) -> str: display_opts = "" display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"]) if not display_available: display_opts += " -nographic" # qemu options qemu_opts = "" qemu_opts += ( "" if allow_reboot else " -no-reboot" " -device virtio-serial" " -device virtconsole,chardev=shell" " -device virtio-rng-pci" " -serial stdio" ) # TODO: qemu script already catpures this env variable, legacy? qemu_opts += " " + os.environ.get("QEMU_OPTS", "") return ( f"{self._cmd}" f" -monitor unix:{monitor_socket_path}" f" -chardev socket,id=shell,path={shell_socket_path}" f"{qemu_opts}" f"{display_opts}" ) @staticmethod def build_environment( state_dir: Path, shared_dir: Path, ) -> dict: # We make a copy to not update the current environment env = dict(os.environ) env.update( { "TMPDIR": str(state_dir), "SHARED_DIR": str(shared_dir), "USE_TMPDIR": "1", } ) return env def run( self, state_dir: Path, shared_dir: Path, monitor_socket_path: Path, shell_socket_path: Path, ) -> subprocess.Popen: return subprocess.Popen( self.cmd(monitor_socket_path, shell_socket_path), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=state_dir, env=self.build_environment(state_dir, shared_dir), ) class NixStartScript(StartCommand): """A start script from nixos/modules/virtualiation/qemu-vm.nix that also satisfies the requirement of the BaseStartCommand. These Nix commands have the particular charactersitic that the machine name can be extracted out of them via a regex match. (Admittedly a _very_ implicit contract, evtl. TODO fix) """ def __init__(self, script: str): self._cmd = script @property def machine_name(self) -> str: match = re.search("run-(.+)-vm$", self._cmd) name = "machine" if match: name = match.group(1) return name class LegacyStartCommand(StartCommand): """Used in some places to create an ad-hoc machine instead of using nix test instrumentation + module system for that purpose. Legacy. """ def __init__( self, netBackendArgs: Optional[str] = None, netFrontendArgs: Optional[str] = None, hda: Optional[Tuple[Path, str]] = None, cdrom: Optional[str] = None, usb: Optional[str] = None, bios: Optional[str] = None, qemuBinary: Optional[str] = None, qemuFlags: Optional[str] = None, ): if qemuBinary is not None: self._cmd = qemuBinary else: self._cmd = "qemu-kvm" self._cmd += " -m 384" # networking net_backend = "-netdev user,id=net0" net_frontend = "-device virtio-net-pci,netdev=net0" if netBackendArgs is not None: net_backend += "," + netBackendArgs if netFrontendArgs is not None: net_frontend += "," + netFrontendArgs self._cmd += f" {net_backend} {net_frontend}" # hda hda_cmd = "" if hda is not None: hda_path = hda[0].resolve() hda_interface = hda[1] if hda_interface == "scsi": hda_cmd += ( f" -drive id=hda,file={hda_path},werror=report,if=none" " -device scsi-hd,drive=hda" ) else: hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report" self._cmd += hda_cmd # cdrom if cdrom is not None: self._cmd += f" -cdrom {cdrom}" # usb usb_cmd = "" if usb is not None: # https://github.com/qemu/qemu/blob/master/docs/usb2.txt usb_cmd += ( " -device usb-ehci" f" -drive id=usbdisk,file={usb},if=none,readonly" " -device usb-storage,drive=usbdisk " ) self._cmd += usb_cmd # bios if bios is not None: self._cmd += f" -bios {bios}" # qemu flags if qemuFlags is not None: self._cmd += f" {qemuFlags}" class Machine: """A handle to the machine with this name, that also knows how to manage the machine lifecycle with the help of a start script / command.""" name: str out_dir: Path tmp_dir: Path shared_dir: Path state_dir: Path monitor_path: Path shell_path: Path start_command: StartCommand keep_vm_state: bool allow_reboot: bool process: Optional[subprocess.Popen] pid: Optional[int] monitor: Optional[socket.socket] shell: Optional[socket.socket] serial_thread: Optional[threading.Thread] booted: bool connected: bool # Store last serial console lines for use # of wait_for_console_text last_lines: Queue = Queue() callbacks: List[Callable] def __repr__(self) -> str: return f"<Machine '{self.name}'>" def __init__( self, out_dir: Path, tmp_dir: Path, start_command: StartCommand, name: str = "machine", keep_vm_state: bool = False, allow_reboot: bool = False, callbacks: Optional[List[Callable]] = None, ) -> None: self.out_dir = out_dir self.tmp_dir = tmp_dir self.keep_vm_state = keep_vm_state self.allow_reboot = allow_reboot self.name = name self.start_command = start_command self.callbacks = callbacks if callbacks is not None else [] # set up directories self.shared_dir = self.tmp_dir / "shared-xchg" self.shared_dir.mkdir(mode=0o700, exist_ok=True) self.state_dir = self.tmp_dir / f"vm-state-{self.name}" self.monitor_path = self.state_dir / "monitor" self.shell_path = self.state_dir / "shell" if (not self.keep_vm_state) and self.state_dir.exists(): self.cleanup_statedir() self.state_dir.mkdir(mode=0o700, exist_ok=True) self.process = None self.pid = None self.monitor = None self.shell = None self.serial_thread = None self.booted = False self.connected = False @staticmethod def create_startcommand(args: Dict[str, str]) -> StartCommand: rootlog.warning( "Using legacy create_startcommand()," "please use proper nix test vm instrumentation, instead" "to generate the appropriate nixos test vm qemu startup script" ) hda = None if args.get("hda"): hda_arg: str = args.get("hda", "") hda_arg_path: Path = Path(hda_arg) hda = (hda_arg_path, args.get("hdaInterface", "")) return LegacyStartCommand( netBackendArgs=args.get("netBackendArgs"), netFrontendArgs=args.get("netFrontendArgs"), hda=hda, cdrom=args.get("cdrom"), usb=args.get("usb"), bios=args.get("bios"), qemuBinary=args.get("qemuBinary"), qemuFlags=args.get("qemuFlags"), ) def is_up(self) -> bool: return self.booted and self.connected def log(self, msg: str) -> None: rootlog.log(msg, {"machine": self.name}) def log_serial(self, msg: str) -> None: rootlog.log_serial(msg, self.name) def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager: my_attrs = {"machine": self.name} my_attrs.update(attrs) return rootlog.nested(msg, my_attrs) def wait_for_monitor_prompt(self) -> str: with self.nested("waiting for monitor prompt"): assert self.monitor is not None answer = "" while True: undecoded_answer = self.monitor.recv(1024) if not undecoded_answer: break answer += undecoded_answer.decode() if answer.endswith("(qemu) "): break return answer def send_monitor_command(self, command: str) -> str: self.run_callbacks() with self.nested("sending monitor command: {}".format(command)): message = ("{}\n".format(command)).encode() assert self.monitor is not None self.monitor.send(message) return self.wait_for_monitor_prompt() def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None: """Wait for a systemd unit to get into "active" state. Throws exceptions on "failed" and "inactive" states as well as after timing out. """ def check_active(_: Any) -> bool: info = self.get_unit_info(unit, user) state = info["ActiveState"] if state == "failed": raise Exception('unit "{}" reached state "{}"'.format(unit, state)) if state == "inactive": status, jobs = self.systemctl("list-jobs --full 2>&1", user) if "No jobs" in jobs: info = self.get_unit_info(unit, user) if info["ActiveState"] == state: raise Exception( ( 'unit "{}" is inactive and there ' "are no pending jobs" ).format(unit) ) return state == "active" with self.nested( "waiting for unit {}{}".format( unit, f" with user {user}" if user is not None else "" ) ): retry(check_active) def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]: status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user) if status != 0: raise Exception( 'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format( unit, "" if user is None else 'under user "{}"'.format(user), status ) ) line_pattern = re.compile(r"^([^=]+)=(.*)$") def tuple_from_line(line: str) -> Tuple[str, str]: match = line_pattern.match(line) assert match is not None return match[1], match[2] return dict( tuple_from_line(line) for line in lines.split("\n") if line_pattern.match(line) ) def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]: if user is not None: q = q.replace("'", "\\'") return self.execute( ( "su -l {} --shell /bin/sh -c " "$'XDG_RUNTIME_DIR=/run/user/`id -u` " "systemctl --user {}'" ).format(user, q) ) return self.execute("systemctl {}".format(q)) def require_unit_state(self, unit: str, require_state: str = "active") -> None: with self.nested( "checking if unit ‘{}’ has reached state '{}'".format(unit, require_state) ): info = self.get_unit_info(unit) state = info["ActiveState"] if state != require_state: raise Exception( "Expected unit ‘{}’ to to be in state ".format(unit) + "'{}' but it is in state ‘{}’".format(require_state, state) ) def _next_newline_closed_block_from_shell(self) -> str: assert self.shell output_buffer = [] while True: # This receives up to 4096 bytes from the socket chunk = self.shell.recv(4096) if not chunk: # Probably a broken pipe, return the output we have break decoded = chunk.decode() output_buffer += [decoded] if decoded[-1] == "\n": break return "".join(output_buffer) def execute( self, command: str, check_return: bool = True, timeout: Optional[int] = 900 ) -> Tuple[int, str]: self.run_callbacks() self.connect() # Always run command with shell opts command = f"set -euo pipefail; {command}" timeout_str = "" if timeout is not None: timeout_str = f"timeout {timeout}" out_command = ( f"{timeout_str} sh -c {shlex.quote(command)} | (base64 --wrap 0; echo)\n" ) assert self.shell self.shell.send(out_command.encode()) # Get the output output = base64.b64decode(self._next_newline_closed_block_from_shell()) if not check_return: return (-1, output.decode()) # Get the return code self.shell.send("echo ${PIPESTATUS[0]}\n".encode()) rc = int(self._next_newline_closed_block_from_shell().strip()) return (rc, output.decode()) def shell_interact(self) -> None: """Allows you to interact with the guest shell Should only be used during test development, not in the production test.""" self.connect() self.log("Terminal is ready (there is no initial prompt):") assert self.shell subprocess.run( ["socat", "READLINE,prompt=$ ", f"FD:{self.shell.fileno()}"], pass_fds=[self.shell.fileno()], ) def console_interact(self) -> None: """Allows you to interact with QEMU's stdin The shell can be exited with Ctrl+D. Note that Ctrl+C is not allowed to be used. QEMU's stdout is read line-wise. Should only be used during test development, not in the production test.""" self.log("Terminal is ready (there is no prompt):") assert self.process assert self.process.stdin while True: try: char = sys.stdin.buffer.read(1) except KeyboardInterrupt: break if char == b"": # ctrl+d self.log("Closing connection to the console") break self.send_console(char.decode()) def succeed(self, *commands: str, timeout: Optional[int] = None) -> str: """Execute each command and check that it succeeds.""" output = "" for command in commands: with self.nested("must succeed: {}".format(command)): (status, out) = self.execute(command, timeout=timeout) if status != 0: self.log("output: {}".format(out)) raise Exception( "command `{}` failed (exit code {})".format(command, status) ) output += out return output def fail(self, *commands: str, timeout: Optional[int] = None) -> str: """Execute each command and check that it fails.""" output = "" for command in commands: with self.nested("must fail: {}".format(command)): (status, out) = self.execute(command, timeout=timeout) if status == 0: raise Exception( "command `{}` unexpectedly succeeded".format(command) ) output += out return output def wait_until_succeeds(self, command: str, timeout: int = 900) -> str: """Wait until a command returns success and return its output. Throws an exception on timeout. """ output = "" def check_success(_: Any) -> bool: nonlocal output status, output = self.execute(command, timeout=timeout) return status == 0 with self.nested("waiting for success: {}".format(command)): retry(check_success, timeout) return output def wait_until_fails(self, command: str, timeout: int = 900) -> str: """Wait until a command returns failure. Throws an exception on timeout. """ output = "" def check_failure(_: Any) -> bool: nonlocal output status, output = self.execute(command, timeout=timeout) return status != 0 with self.nested("waiting for failure: {}".format(command)): retry(check_failure) return output def wait_for_shutdown(self) -> None: if not self.booted: return with self.nested("waiting for the VM to power off"): sys.stdout.flush() assert self.process self.process.wait() self.pid = None self.booted = False self.connected = False def get_tty_text(self, tty: str) -> str: status, output = self.execute( "fold -w$(stty -F /dev/tty{0} size | " "awk '{{print $2}}') /dev/vcs{0}".format(tty) ) return output def wait_until_tty_matches(self, tty: str, regexp: str) -> None: """Wait until the visible output on the chosen TTY matches regular expression. Throws an exception on timeout. """ matcher = re.compile(regexp) def tty_matches(last: bool) -> bool: text = self.get_tty_text(tty) if last: self.log( f"Last chance to match /{regexp}/ on TTY{tty}, " f"which currently contains: {text}" ) return len(matcher.findall(text)) > 0 with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)): retry(tty_matches) def send_chars(self, chars: List[str]) -> None: with self.nested("sending keys ‘{}‘".format(chars)): for char in chars: self.send_key(char) def wait_for_file(self, filename: str) -> None: """Waits until the file exists in machine's file system.""" def check_file(_: Any) -> bool: status, _ = self.execute("test -e {}".format(filename)) return status == 0 with self.nested("waiting for file ‘{}‘".format(filename)): retry(check_file) def wait_for_open_port(self, port: int) -> None: def port_is_open(_: Any) -> bool: status, _ = self.execute("nc -z localhost {}".format(port)) return status == 0 with self.nested("waiting for TCP port {}".format(port)): retry(port_is_open) def wait_for_closed_port(self, port: int) -> None: def port_is_closed(_: Any) -> bool: status, _ = self.execute("nc -z localhost {}".format(port)) return status != 0 with self.nested("waiting for TCP port {} to be closed"): retry(port_is_closed) def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]: return self.systemctl("start {}".format(jobname), user) def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]: return self.systemctl("stop {}".format(jobname), user) def wait_for_job(self, jobname: str) -> None: self.wait_for_unit(jobname) def connect(self) -> None: if self.connected: return with self.nested("waiting for the VM to finish booting"): self.start() assert self.shell tic = time.time() self.shell.recv(1024) # TODO: Timeout toc = time.time() self.log("connected to guest root shell") self.log("(connecting took {:.2f} seconds)".format(toc - tic)) self.connected = True def screenshot(self, filename: str) -> None: word_pattern = re.compile(r"^\w+$") if word_pattern.match(filename): filename = os.path.join(self.out_dir, "{}.png".format(filename)) tmp = "{}.ppm".format(filename) with self.nested( "making screenshot {}".format(filename), {"image": os.path.basename(filename)}, ): self.send_monitor_command("screendump {}".format(tmp)) ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True) os.unlink(tmp) if ret.returncode != 0: raise Exception("Cannot convert screenshot") def copy_from_host_via_shell(self, source: str, target: str) -> None: """Copy a file from the host into the guest by piping it over the shell into the destination file. Works without host-guest shared folder. Prefer copy_from_host for whenever possible. """ with open(source, "rb") as fh: content_b64 = base64.b64encode(fh.read()).decode() self.succeed( f"mkdir -p $(dirname {target})", f"echo -n {content_b64} | base64 -d > {target}", ) def copy_from_host(self, source: str, target: str) -> None: """Copy a file from the host into the guest via the `shared_dir` shared among all the VMs (using a temporary directory). """ host_src = Path(source) vm_target = Path(target) with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td: shared_temp = Path(shared_td) host_intermediate = shared_temp / host_src.name vm_shared_temp = Path("/tmp/shared") / shared_temp.name vm_intermediate = vm_shared_temp / host_src.name self.succeed(make_command(["mkdir", "-p", vm_shared_temp])) if host_src.is_dir(): shutil.copytree(host_src, host_intermediate) else: shutil.copy(host_src, host_intermediate) self.succeed(make_command(["mkdir", "-p", vm_target.parent])) self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target])) def copy_from_vm(self, source: str, target_dir: str = "") -> None: """Copy a file from the VM (specified by an in-VM source path) to a path relative to `$out`. The file is copied via the `shared_dir` shared among all the VMs (using a temporary directory). """ # Compute the source, target, and intermediate shared file names vm_src = Path(source) with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td: shared_temp = Path(shared_td) vm_shared_temp = Path("/tmp/shared") / shared_temp.name vm_intermediate = vm_shared_temp / vm_src.name intermediate = shared_temp / vm_src.name # Copy the file to the shared directory inside VM self.succeed(make_command(["mkdir", "-p", vm_shared_temp])) self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate])) abs_target = self.out_dir / target_dir / vm_src.name abs_target.parent.mkdir(exist_ok=True, parents=True) # Copy the file from the shared directory outside VM if intermediate.is_dir(): shutil.copytree(intermediate, abs_target) else: shutil.copy(intermediate, abs_target) def dump_tty_contents(self, tty: str) -> None: """Debugging: Dump the contents of the TTY<n>""" self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty)) def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: screenshot_path = os.path.join(tmpdir, "ppm") self.send_monitor_command(f"screendump {screenshot_path}") return _perform_ocr_on_screenshot(screenshot_path, model_ids) def get_screen_text_variants(self) -> List[str]: return self._get_screen_text_variants([0, 1, 2]) def get_screen_text(self) -> str: return self._get_screen_text_variants([2])[0] def wait_for_text(self, regex: str) -> None: def screen_matches(last: bool) -> bool: variants = self.get_screen_text_variants() for text in variants: if re.search(regex, text) is not None: return True if last: self.log("Last OCR attempt failed. Text was: {}".format(variants)) return False with self.nested("waiting for {} to appear on screen".format(regex)): retry(screen_matches) def wait_for_console_text(self, regex: str) -> None: with self.nested("waiting for {} to appear on console".format(regex)): # Buffer the console output, this is needed # to match multiline regexes. console = io.StringIO() while True: try: console.write(self.last_lines.get()) except queue.Empty: self.sleep(1) continue console.seek(0) matches = re.search(regex, console.read()) if matches is not None: return def send_key(self, key: str) -> None: key = CHAR_TO_KEY.get(key, key) self.send_monitor_command("sendkey {}".format(key)) time.sleep(0.01) def send_console(self, chars: str) -> None: assert self.process assert self.process.stdin self.process.stdin.write(chars.encode()) self.process.stdin.flush() def start(self) -> None: if self.booted: return self.log("starting vm") def clear(path: Path) -> Path: if path.exists(): path.unlink() return path def create_socket(path: Path) -> socket.socket: s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM) s.bind(str(path)) s.listen(1) return s monitor_socket = create_socket(clear(self.monitor_path)) shell_socket = create_socket(clear(self.shell_path)) self.process = self.start_command.run( self.state_dir, self.shared_dir, self.monitor_path, self.shell_path, ) self.monitor, _ = monitor_socket.accept() self.shell, _ = shell_socket.accept() # Store last serial console lines for use # of wait_for_console_text self.last_lines: Queue = Queue() def process_serial_output() -> None: assert self.process assert self.process.stdout for _line in self.process.stdout: # Ignore undecodable bytes that may occur in boot menus line = _line.decode(errors="ignore").replace("\r", "").rstrip() self.last_lines.put(line) self.log_serial(line) self.serial_thread = threading.Thread(target=process_serial_output) self.serial_thread.start() self.wait_for_monitor_prompt() self.pid = self.process.pid self.booted = True self.log("QEMU running (pid {})".format(self.pid)) def cleanup_statedir(self) -> None: shutil.rmtree(self.state_dir) rootlog.log(f"deleting VM state directory {self.state_dir}") rootlog.log("if you want to keep the VM state, pass --keep-vm-state") def shutdown(self) -> None: if not self.booted: return assert self.shell self.shell.send("poweroff\n".encode()) self.wait_for_shutdown() def crash(self) -> None: if not self.booted: return self.log("forced crash") self.send_monitor_command("quit") self.wait_for_shutdown() def wait_for_x(self) -> None: """Wait until it is possible to connect to the X server. Note that testing the existence of /tmp/.X11-unix/X0 is insufficient. """ def check_x(_: Any) -> bool: cmd = ( "journalctl -b SYSLOG_IDENTIFIER=systemd | " + 'grep "Reached target Current graphical"' ) status, _ = self.execute(cmd) if status != 0: return False status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]") return status == 0 with self.nested("waiting for the X11 server"): retry(check_x) def get_window_names(self) -> List[str]: return self.succeed( r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'" ).splitlines() def wait_for_window(self, regexp: str) -> None: pattern = re.compile(regexp) def window_is_visible(last_try: bool) -> bool: names = self.get_window_names() if last_try: self.log( "Last chance to match {} on the window list,".format(regexp) + " which currently contains: " + ", ".join(names) ) return any(pattern.search(name) for name in names) with self.nested("waiting for a window to appear"): retry(window_is_visible) def sleep(self, secs: int) -> None: # We want to sleep in *guest* time, not *host* time. self.succeed(f"sleep {secs}") def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None: """Forward a TCP port on the host to a TCP port on the guest. Useful during interactive testing. """ self.send_monitor_command( "hostfwd_add tcp::{}-:{}".format(host_port, guest_port) ) def block(self) -> None: """Make the machine unreachable by shutting down eth1 (the multicast interface used to talk to the other VMs). We keep eth0 up so that the test driver can continue to talk to the machine. """ self.send_monitor_command("set_link virtio-net-pci.1 off") def unblock(self) -> None: """Make the machine reachable.""" self.send_monitor_command("set_link virtio-net-pci.1 on") def release(self) -> None: if self.pid is None: return rootlog.info(f"kill machine (pid {self.pid})") assert self.process assert self.shell assert self.monitor assert self.serial_thread self.process.terminate() self.shell.close() self.monitor.close() self.serial_thread.join() def run_callbacks(self) -> None: for callback in self.callbacks: callback()
[]
[]
[ "QEMU_OPTS" ]
[]
["QEMU_OPTS"]
python
1
0
command/util.go
package cmd import ( "fmt" "io/ioutil" "os" "os/exec" "strings" ) func TextEditor(content []byte) ([]byte, error) { editor := os.Getenv("EDITOR") if editor == "" { return []byte{}, fmt.Errorf("Please set the EDITOR environment variable") } f, err := ioutil.TempFile("", "pd_") if err != nil { return []byte{}, err } if err := f.Chmod(0600); err != nil { f.Close() os.Remove(f.Name()) return []byte{}, err } f.Write(content) f.Close() defer os.Remove(f.Name()) cmdParts := strings.Fields(editor) cmd := exec.Command(cmdParts[0], append(cmdParts[1:], f.Name())...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return []byte{}, err } ct, err := ioutil.ReadFile(f.Name()) if err != nil { return []byte{}, err } return ct, nil }
[ "\"EDITOR\"" ]
[]
[ "EDITOR" ]
[]
["EDITOR"]
go
1
0
irma/cmd/config.go
package cmd import ( "crypto/tls" "net/smtp" "os" "path/filepath" "regexp" "strings" "github.com/go-errors/errors" "github.com/mitchellh/mapstructure" irma "github.com/privacybydesign/irmago" "github.com/privacybydesign/irmago/server" "github.com/privacybydesign/irmago/server/keyshare" "github.com/spf13/cast" "github.com/sietseringers/cobra" "github.com/sietseringers/viper" "github.com/sirupsen/logrus" ) func configureEmail() keyshare.EmailConfiguration { // If username/password are specified for the email server, build an authentication object. var emailAuth smtp.Auth if viper.GetString("email-username") != "" { emailAuth = smtp.PlainAuth( "", viper.GetString("email-username"), viper.GetString("email-password"), viper.GetString("email-hostname"), ) } return keyshare.EmailConfiguration{ EmailServer: viper.GetString("email-server"), EmailAuth: emailAuth, EmailFrom: viper.GetString("email-from"), DefaultLanguage: viper.GetString("default-language"), } } func configureIRMAServer() *server.Configuration { return &server.Configuration{ SchemesPath: viper.GetString("schemes-path"), SchemesAssetsPath: viper.GetString("schemes-assets-path"), SchemesUpdateInterval: viper.GetInt("schemes-update"), DisableSchemesUpdate: viper.GetInt("schemes-update") == 0, IssuerPrivateKeysPath: viper.GetString("privkeys"), RevocationDBType: viper.GetString("revocation-db-type"), RevocationDBConnStr: viper.GetString("revocation-db-str"), RevocationSettings: irma.RevocationSettings{}, URL: viper.GetString("url"), DisableTLS: viper.GetBool("no-tls"), Email: viper.GetString("email"), EnableSSE: viper.GetBool("sse"), Verbose: viper.GetInt("verbose"), Quiet: viper.GetBool("quiet"), LogJSON: viper.GetBool("log-json"), Logger: logger, Production: viper.GetBool("production"), JwtIssuer: viper.GetString("jwt-issuer"), JwtPrivateKey: viper.GetString("jwt-privkey"), JwtPrivateKeyFile: viper.GetString("jwt-privkey-file"), AllowUnsignedCallbacks: viper.GetBool("allow-unsigned-callbacks"), AugmentClientReturnURL: viper.GetBool("augment-client-return-url"), } } func configureTLS() *tls.Config { conf, err := server.TLSConf( viper.GetString("tls-cert"), viper.GetString("tls-cert-file"), viper.GetString("tls-privkey"), viper.GetString("tls-privkey-file")) if err != nil { die("", err) } return conf } func readConfig(cmd *cobra.Command, name, logname string, configpaths []string, productionDefaults map[string]interface{}) { dashReplacer := strings.NewReplacer("-", "_") viper.SetEnvKeyReplacer(dashReplacer) viper.SetFileKeyReplacer(dashReplacer) viper.SetEnvPrefix(strings.ToUpper(name)) viper.AutomaticEnv() if err := viper.BindPFlags(cmd.Flags()); err != nil { die("", err) } // Locate and read configuration file confpath := viper.GetString("config") if confpath != "" { info, err := os.Stat(confpath) if err != nil { if os.IsNotExist(err) { die("specified configuration file does not exist", nil) } else { die("failed to stat configuration file", err) } } if info.IsDir() { die("specified configuration file is a directory", nil) } dir, file := filepath.Dir(confpath), filepath.Base(confpath) viper.SetConfigName(strings.TrimSuffix(file, filepath.Ext(file))) viper.AddConfigPath(dir) } else { viper.SetConfigName(name) for _, path := range configpaths { viper.AddConfigPath(path) } } err := viper.ReadInConfig() // Hold error checking until we know how much of it to log // Create our logger instance logger = server.NewLogger(viper.GetInt("verbose"), viper.GetBool("quiet"), viper.GetBool("log-json")) // First log output: hello, development or production mode, log level mode := "development" if viper.GetBool("production") { mode = "production" for key, val := range productionDefaults { viper.SetDefault(key, val) } } logger.WithFields(logrus.Fields{ "version": irma.Version, "mode": mode, "verbosity": server.Verbosity(viper.GetInt("verbose")), }).Info(logname + " running") // Now we finally examine and log any error from viper.ReadInConfig() if err != nil { if _, notfound := err.(viper.ConfigFileNotFoundError); notfound { logger.Info("No configuration file found") } else { die("", errors.WrapPrefix(err, "Failed to unmarshal configuration file at "+viper.ConfigFileUsed(), 0)) } } else { logger.Info("Config file: ", viper.ConfigFileUsed()) } } func handleMapOrString(key string, dest interface{}) error { var m map[string]interface{} var err error if val, flagOrEnv := viper.Get(key).(string); !flagOrEnv || val != "" { if m, err = cast.ToStringMapE(viper.Get(key)); err != nil { return errors.WrapPrefix(err, "Failed to unmarshal "+key+" from flag or env var", 0) } } if len(m) == 0 { return nil } if err := mapstructure.Decode(m, dest); err != nil { return errors.WrapPrefix(err, "Failed to unmarshal "+key+" from config file", 0) } return nil } func handlePermission(typ string) []string { if !viper.IsSet(typ) { if typ == "revoke-perms" || (viper.GetBool("production") && typ == "issue-perms") { return []string{} } else { return []string{"*"} } } perms := viper.GetStringSlice(typ) if perms == nil { return []string{} } return perms } // productionMode examines the arguments passed to the executable to see if --production is enabled. // (This should really be done using viper, but when the help message is printed, viper is not yet // initialized.) func productionMode() bool { r := regexp.MustCompile("^--production(=(.*))?$") for _, arg := range os.Args { matches := r.FindStringSubmatch(arg) if len(matches) != 3 { continue } if matches[1] == "" { return true } return checkConfVal(matches[2]) } return checkConfVal(os.Getenv("IRMASERVER_PRODUCTION")) } func checkConfVal(val string) bool { lc := strings.ToLower(val) return lc == "1" || lc == "true" || lc == "yes" || lc == "t" }
[ "\"IRMASERVER_PRODUCTION\"" ]
[]
[ "IRMASERVER_PRODUCTION" ]
[]
["IRMASERVER_PRODUCTION"]
go
1
0
frontend/main.go
package main import ( "fmt" "html/template" "log" "net/http" "os" "strconv" "strings" ) var stylesheet template.HTML = template.HTML(` <!-- Latest compiled and minified CSS --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous"> <!-- Optional theme --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap-theme.min.css" integrity="sha384-fLW2N01lMqjakBkx3l/M9EahuwpSfeNvV63J5ezn3uZzapT0u7EYsXMjQV+0En5r" crossorigin="anonymous"> <!-- Latest compiled and minified JavaScript --> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS" crossorigin="anonymous"></script> <style> .jumbotron { text-align: center; } .header h3 { color: white; } </style> `) func main() { systemPortString := os.Getenv("PORT") systemPort, err := strconv.Atoi(systemPortString) if err != nil { log.Fatal("invalid required env var PORT") } systemListenString := os.Getenv("LISTEN") if len(strings.TrimSpace(systemListenString)) == 0 { systemListenString = "0.0.0.0" } mux := http.NewServeMux() mux.Handle("/proxy/", &HttpDemoHandler{}) mux.Handle("/udp-test/", &UDPDemoHandler{}) mux.Handle("/", &HomePageHandler{}) http.ListenAndServe(fmt.Sprintf("%s:%d", systemListenString, systemPort), mux) }
[ "\"PORT\"", "\"LISTEN\"" ]
[]
[ "PORT", "LISTEN" ]
[]
["PORT", "LISTEN"]
go
2
0
hugolib/testhelpers_test.go
package hugolib import ( "io/ioutil" "path/filepath" "runtime" "strconv" "testing" "unicode/utf8" "bytes" "fmt" "regexp" "strings" "text/template" "github.com/gohugoio/hugo/langs" "github.com/sanity-io/litter" "github.com/gohugoio/hugo/config" "github.com/gohugoio/hugo/deps" "github.com/spf13/afero" "github.com/gohugoio/hugo/helpers" "github.com/gohugoio/hugo/tpl" "github.com/spf13/viper" "os" "github.com/gohugoio/hugo/common/loggers" "github.com/gohugoio/hugo/hugofs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type sitesBuilder struct { Cfg config.Provider Fs *hugofs.Fs T testing.TB logger *loggers.Logger dumper litter.Options // Aka the Hugo server mode. running bool H *HugoSites theme string // Default toml configFormat string // Default is empty. // TODO(bep) revisit this and consider always setting it to something. // Consider this in relation to using the BaseFs.PublishFs to all publishing. workingDir string // Base data/content contentFilePairs []string templateFilePairs []string i18nFilePairs []string dataFilePairs []string // Additional data/content. // As in "use the base, but add these on top". contentFilePairsAdded []string templateFilePairsAdded []string i18nFilePairsAdded []string dataFilePairsAdded []string } func newTestSitesBuilder(t testing.TB) *sitesBuilder { v := viper.New() fs := hugofs.NewMem(v) litterOptions := litter.Options{ HidePrivateFields: true, StripPackageNames: true, Separator: " ", } return &sitesBuilder{T: t, Fs: fs, configFormat: "toml", dumper: litterOptions} } func createTempDir(prefix string) (string, func(), error) { workDir, err := ioutil.TempDir("", prefix) if err != nil { return "", nil, err } if runtime.GOOS == "darwin" && !strings.HasPrefix(workDir, "/private") { // To get the entry folder in line with the rest. This its a little bit // mysterious, but so be it. workDir = "/private" + workDir } return workDir, func() { os.RemoveAll(workDir) }, nil } func (s *sitesBuilder) Running() *sitesBuilder { s.running = true return s } func (s *sitesBuilder) WithLogger(logger *loggers.Logger) *sitesBuilder { s.logger = logger return s } func (s *sitesBuilder) WithWorkingDir(dir string) *sitesBuilder { s.workingDir = dir return s } func (s *sitesBuilder) WithConfigTemplate(data interface{}, format, configTemplate string) *sitesBuilder { if format == "" { format = "toml" } templ, err := template.New("test").Parse(configTemplate) if err != nil { s.Fatalf("Template parse failed: %s", err) } var b bytes.Buffer templ.Execute(&b, data) return s.WithConfigFile(format, b.String()) } func (s *sitesBuilder) WithViper(v *viper.Viper) *sitesBuilder { loadDefaultSettingsFor(v) s.Cfg = v return s } func (s *sitesBuilder) WithConfigFile(format, conf string) *sitesBuilder { writeSource(s.T, s.Fs, "config."+format, conf) s.configFormat = format return s } func (s *sitesBuilder) WithThemeConfigFile(format, conf string) *sitesBuilder { if s.theme == "" { s.theme = "test-theme" } filename := filepath.Join("themes", s.theme, "config."+format) writeSource(s.T, s.Fs, filename, conf) return s } func (s *sitesBuilder) WithSourceFile(filename, content string) *sitesBuilder { writeSource(s.T, s.Fs, filepath.FromSlash(filename), content) return s } const commonConfigSections = ` [services] [services.disqus] shortname = "disqus_shortname" [services.googleAnalytics] id = "ga_id" [privacy] [privacy.disqus] disable = false [privacy.googleAnalytics] respectDoNotTrack = true anonymizeIP = true [privacy.instagram] simple = true [privacy.twitter] enableDNT = true [privacy.vimeo] disable = false [privacy.youtube] disable = false privacyEnhanced = true ` func (s *sitesBuilder) WithSimpleConfigFile() *sitesBuilder { return s.WithSimpleConfigFileAndBaseURL("http://example.com/") } func (s *sitesBuilder) WithSimpleConfigFileAndBaseURL(baseURL string) *sitesBuilder { config := fmt.Sprintf("baseURL = %q", baseURL) config = config + commonConfigSections return s.WithConfigFile("toml", config) } func (s *sitesBuilder) WithDefaultMultiSiteConfig() *sitesBuilder { var defaultMultiSiteConfig = ` baseURL = "http://example.com/blog" paginate = 1 disablePathToLower = true defaultContentLanguage = "en" defaultContentLanguageInSubdir = true [permalinks] other = "/somewhere/else/:filename" [blackfriday] angledQuotes = true [Taxonomies] tag = "tags" [Languages] [Languages.en] weight = 10 title = "In English" languageName = "English" [Languages.en.blackfriday] angledQuotes = false [[Languages.en.menu.main]] url = "/" name = "Home" weight = 0 [Languages.fr] weight = 20 title = "Le Français" languageName = "Français" [Languages.fr.Taxonomies] plaque = "plaques" [Languages.nn] weight = 30 title = "På nynorsk" languageName = "Nynorsk" paginatePath = "side" [Languages.nn.Taxonomies] lag = "lag" [[Languages.nn.menu.main]] url = "/" name = "Heim" weight = 1 [Languages.nb] weight = 40 title = "På bokmål" languageName = "Bokmål" paginatePath = "side" [Languages.nb.Taxonomies] lag = "lag" ` + commonConfigSections return s.WithConfigFile("toml", defaultMultiSiteConfig) } func (s *sitesBuilder) WithContent(filenameContent ...string) *sitesBuilder { s.contentFilePairs = append(s.contentFilePairs, filenameContent...) return s } func (s *sitesBuilder) WithContentAdded(filenameContent ...string) *sitesBuilder { s.contentFilePairsAdded = append(s.contentFilePairsAdded, filenameContent...) return s } func (s *sitesBuilder) WithTemplates(filenameContent ...string) *sitesBuilder { s.templateFilePairs = append(s.templateFilePairs, filenameContent...) return s } func (s *sitesBuilder) WithTemplatesAdded(filenameContent ...string) *sitesBuilder { s.templateFilePairsAdded = append(s.templateFilePairsAdded, filenameContent...) return s } func (s *sitesBuilder) WithData(filenameContent ...string) *sitesBuilder { s.dataFilePairs = append(s.dataFilePairs, filenameContent...) return s } func (s *sitesBuilder) WithDataAdded(filenameContent ...string) *sitesBuilder { s.dataFilePairsAdded = append(s.dataFilePairsAdded, filenameContent...) return s } func (s *sitesBuilder) WithI18n(filenameContent ...string) *sitesBuilder { s.i18nFilePairs = append(s.i18nFilePairs, filenameContent...) return s } func (s *sitesBuilder) WithI18nAdded(filenameContent ...string) *sitesBuilder { s.i18nFilePairsAdded = append(s.i18nFilePairsAdded, filenameContent...) return s } func (s *sitesBuilder) writeFilePairs(folder string, filenameContent []string) *sitesBuilder { if len(filenameContent)%2 != 0 { s.Fatalf("expect filenameContent for %q in pairs (%d)", folder, len(filenameContent)) } for i := 0; i < len(filenameContent); i += 2 { filename, content := filenameContent[i], filenameContent[i+1] target := folder // TODO(bep) clean up this magic. if strings.HasPrefix(filename, folder) { target = "" } if s.workingDir != "" { target = filepath.Join(s.workingDir, target) } writeSource(s.T, s.Fs, filepath.Join(target, filename), content) } return s } func (s *sitesBuilder) CreateSites() *sitesBuilder { if err := s.CreateSitesE(); err != nil { s.Fatalf("Failed to create sites: %s", err) } return s } func (s *sitesBuilder) CreateSitesE() error { s.addDefaults() s.writeFilePairs("content", s.contentFilePairs) s.writeFilePairs("content", s.contentFilePairsAdded) s.writeFilePairs("layouts", s.templateFilePairs) s.writeFilePairs("layouts", s.templateFilePairsAdded) s.writeFilePairs("data", s.dataFilePairs) s.writeFilePairs("data", s.dataFilePairsAdded) s.writeFilePairs("i18n", s.i18nFilePairs) s.writeFilePairs("i18n", s.i18nFilePairsAdded) if s.Cfg == nil { cfg, _, err := LoadConfig(ConfigSourceDescriptor{Fs: s.Fs.Source, Filename: "config." + s.configFormat}) if err != nil { return err } // TODO(bep) /* expectedConfigs := 1 if s.theme != "" { expectedConfigs = 2 } require.Equal(s.T, expectedConfigs, len(configFiles), fmt.Sprintf("Configs: %v", configFiles)) */ s.Cfg = cfg } sites, err := NewHugoSites(deps.DepsCfg{Fs: s.Fs, Cfg: s.Cfg, Logger: s.logger, Running: s.running}) if err != nil { return err } s.H = sites return nil } func (s *sitesBuilder) BuildE(cfg BuildCfg) error { if s.H == nil { s.CreateSites() } return s.H.Build(cfg) } func (s *sitesBuilder) Build(cfg BuildCfg) *sitesBuilder { return s.build(cfg, false) } func (s *sitesBuilder) BuildFail(cfg BuildCfg) *sitesBuilder { return s.build(cfg, true) } func (s *sitesBuilder) build(cfg BuildCfg, shouldFail bool) *sitesBuilder { if s.H == nil { s.CreateSites() } err := s.H.Build(cfg) if err == nil { logErrorCount := s.H.NumLogErrors() if logErrorCount > 0 { err = fmt.Errorf("logged %d errors", logErrorCount) } } if err != nil && !shouldFail { s.Fatalf("Build failed: %s", err) } else if err == nil && shouldFail { s.Fatalf("Expected error") } return s } func (s *sitesBuilder) addDefaults() { var ( contentTemplate = `--- title: doc1 weight: 1 tags: - tag1 date: "2018-02-28" --- # doc1 *some "content"* {{< shortcode >}} {{< lingo >}} ` defaultContent = []string{ "content/sect/doc1.en.md", contentTemplate, "content/sect/doc1.fr.md", contentTemplate, "content/sect/doc1.nb.md", contentTemplate, "content/sect/doc1.nn.md", contentTemplate, } defaultTemplates = []string{ "_default/single.html", "Single: {{ .Title }}|{{ i18n \"hello\" }}|{{.Lang}}|{{ .Content }}", "_default/list.html", "{{ $p := .Paginator }}List Page {{ $p.PageNumber }}: {{ .Title }}|{{ i18n \"hello\" }}|{{ .Permalink }}|Pager: {{ template \"_internal/pagination.html\" . }}", "index.html", "{{ $p := .Paginator }}Default Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}", "index.fr.html", "{{ $p := .Paginator }}French Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}", // Shortcodes "shortcodes/shortcode.html", "Shortcode: {{ i18n \"hello\" }}", // A shortcode in multiple languages "shortcodes/lingo.html", "LingoDefault", "shortcodes/lingo.fr.html", "LingoFrench", } defaultI18n = []string{ "en.yaml", ` hello: other: "Hello" `, "fr.yaml", ` hello: other: "Bonjour" `, } defaultData = []string{ "hugo.toml", "slogan = \"Hugo Rocks!\"", } ) if len(s.contentFilePairs) == 0 { s.writeFilePairs("content", defaultContent) } if len(s.templateFilePairs) == 0 { s.writeFilePairs("layouts", defaultTemplates) } if len(s.dataFilePairs) == 0 { s.writeFilePairs("data", defaultData) } if len(s.i18nFilePairs) == 0 { s.writeFilePairs("i18n", defaultI18n) } } func (s *sitesBuilder) Fatalf(format string, args ...interface{}) { Fatalf(s.T, format, args...) } func Fatalf(t testing.TB, format string, args ...interface{}) { trace := trace() format = format + "\n%s" args = append(args, trace) t.Fatalf(format, args...) } func trace() string { return strings.Join(assert.CallerInfo(), "\n\r\t\t\t") } func (s *sitesBuilder) AssertFileContent(filename string, matches ...string) { content := readDestination(s.T, s.Fs, filename) for _, match := range matches { if !strings.Contains(content, match) { s.Fatalf("No match for %q in content for %s\n%s\n%q", match, filename, content, content) } } } func (s *sitesBuilder) AssertObject(expected string, object interface{}) { got := s.dumper.Sdump(object) expected = strings.TrimSpace(expected) if expected != got { fmt.Println(got) diff := helpers.DiffStrings(expected, got) s.Fatalf("diff:\n%s\nexpected\n%s\ngot\n%s", diff, expected, got) } } func (s *sitesBuilder) AssertFileContentRe(filename string, matches ...string) { content := readDestination(s.T, s.Fs, filename) for _, match := range matches { r := regexp.MustCompile(match) if !r.MatchString(content) { s.Fatalf("No match for %q in content for %s\n%q", match, filename, content) } } } func (s *sitesBuilder) CheckExists(filename string) bool { return destinationExists(s.Fs, filepath.Clean(filename)) } type testHelper struct { Cfg config.Provider Fs *hugofs.Fs T testing.TB } func (th testHelper) assertFileContent(filename string, matches ...string) { filename = th.replaceDefaultContentLanguageValue(filename) content := readDestination(th.T, th.Fs, filename) for _, match := range matches { match = th.replaceDefaultContentLanguageValue(match) require.True(th.T, strings.Contains(content, match), fmt.Sprintf("File no match for\n%q in\n%q:\n%s", strings.Replace(match, "%", "%%", -1), filename, strings.Replace(content, "%", "%%", -1))) } } func (th testHelper) assertFileContentRegexp(filename string, matches ...string) { filename = th.replaceDefaultContentLanguageValue(filename) content := readDestination(th.T, th.Fs, filename) for _, match := range matches { match = th.replaceDefaultContentLanguageValue(match) r := regexp.MustCompile(match) require.True(th.T, r.MatchString(content), fmt.Sprintf("File no match for\n%q in\n%q:\n%s", strings.Replace(match, "%", "%%", -1), filename, strings.Replace(content, "%", "%%", -1))) } } func (th testHelper) assertFileNotExist(filename string) { exists, err := helpers.Exists(filename, th.Fs.Destination) require.NoError(th.T, err) require.False(th.T, exists) } func (th testHelper) replaceDefaultContentLanguageValue(value string) string { defaultInSubDir := th.Cfg.GetBool("defaultContentLanguageInSubDir") replace := th.Cfg.GetString("defaultContentLanguage") + "/" if !defaultInSubDir { value = strings.Replace(value, replace, "", 1) } return value } func newTestPathSpec(fs *hugofs.Fs, v *viper.Viper) *helpers.PathSpec { l := langs.NewDefaultLanguage(v) ps, _ := helpers.NewPathSpec(fs, l) return ps } func newTestDefaultPathSpec(t *testing.T) *helpers.PathSpec { v := viper.New() // Easier to reason about in tests. v.Set("disablePathToLower", true) v.Set("contentDir", "content") v.Set("dataDir", "data") v.Set("i18nDir", "i18n") v.Set("layoutDir", "layouts") v.Set("archetypeDir", "archetypes") v.Set("assetDir", "assets") v.Set("resourceDir", "resources") v.Set("publishDir", "public") fs := hugofs.NewDefault(v) ps, err := helpers.NewPathSpec(fs, v) if err != nil { t.Fatal(err) } return ps } func newTestCfg() (*viper.Viper, *hugofs.Fs) { v := viper.New() fs := hugofs.NewMem(v) v.SetFs(fs.Source) loadDefaultSettingsFor(v) // Default is false, but true is easier to use as default in tests v.Set("defaultContentLanguageInSubdir", true) return v, fs } // newTestSite creates a new site in the English language with in-memory Fs. // The site will have a template system loaded and ready to use. // Note: This is only used in single site tests. func newTestSite(t testing.TB, configKeyValues ...interface{}) *Site { cfg, fs := newTestCfg() for i := 0; i < len(configKeyValues); i += 2 { cfg.Set(configKeyValues[i].(string), configKeyValues[i+1]) } d := deps.DepsCfg{Fs: fs, Cfg: cfg} s, err := NewSiteForCfg(d) if err != nil { Fatalf(t, "Failed to create Site: %s", err) } return s } func newTestSitesFromConfig(t testing.TB, afs afero.Fs, tomlConfig string, layoutPathContentPairs ...string) (testHelper, *HugoSites) { if len(layoutPathContentPairs)%2 != 0 { Fatalf(t, "Layouts must be provided in pairs") } writeToFs(t, afs, "config.toml", tomlConfig) cfg, err := LoadConfigDefault(afs) require.NoError(t, err) fs := hugofs.NewFrom(afs, cfg) th := testHelper{cfg, fs, t} for i := 0; i < len(layoutPathContentPairs); i += 2 { writeSource(t, fs, layoutPathContentPairs[i], layoutPathContentPairs[i+1]) } h, err := NewHugoSites(deps.DepsCfg{Fs: fs, Cfg: cfg}) require.NoError(t, err) return th, h } func newTestSitesFromConfigWithDefaultTemplates(t testing.TB, tomlConfig string) (testHelper, *HugoSites) { return newTestSitesFromConfig(t, afero.NewMemMapFs(), tomlConfig, "layouts/_default/single.html", "Single|{{ .Title }}|{{ .Content }}", "layouts/_default/list.html", "List|{{ .Title }}|{{ .Content }}", "layouts/_default/terms.html", "Terms List|{{ .Title }}|{{ .Content }}", ) } func createWithTemplateFromNameValues(additionalTemplates ...string) func(templ tpl.TemplateHandler) error { return func(templ tpl.TemplateHandler) error { for i := 0; i < len(additionalTemplates); i += 2 { err := templ.AddTemplate(additionalTemplates[i], additionalTemplates[i+1]) if err != nil { return err } } return nil } } func buildSingleSite(t testing.TB, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site { return buildSingleSiteExpected(t, false, false, depsCfg, buildCfg) } func buildSingleSiteExpected(t testing.TB, expectSiteInitEror, expectBuildError bool, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site { h, err := NewHugoSites(depsCfg) if expectSiteInitEror { require.Error(t, err) return nil } else { require.NoError(t, err) } require.Len(t, h.Sites, 1) if expectBuildError { require.Error(t, h.Build(buildCfg)) return nil } require.NoError(t, h.Build(buildCfg)) return h.Sites[0] } func writeSourcesToSource(t *testing.T, base string, fs *hugofs.Fs, sources ...[2]string) { for _, src := range sources { writeSource(t, fs, filepath.Join(base, src[0]), src[1]) } } func dumpPages(pages ...*Page) { for i, p := range pages { fmt.Printf("%d: Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s Len Sections(): %d\n", i+1, p.Kind, p.title, p.RelPermalink(), p.Path(), p.sections, len(p.Sections())) } } func printStringIndexes(s string) { lines := strings.Split(s, "\n") i := 0 for _, line := range lines { for _, r := range line { fmt.Printf("%-3s", strconv.Itoa(i)) i += utf8.RuneLen(r) } i++ fmt.Println() for _, r := range line { fmt.Printf("%-3s", string(r)) } fmt.Println() } } func isCI() bool { return os.Getenv("CI") != "" } func isGo111() bool { return strings.Contains(runtime.Version(), "1.11") }
[ "\"CI\"" ]
[]
[ "CI" ]
[]
["CI"]
go
1
0
test/e2e/mcd_test.go
package e2e_test import ( "context" "fmt" "strings" "testing" "time" ign3types "github.com/coreos/ignition/v2/config/v3_1/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon/constants" "github.com/openshift/machine-config-operator/test/e2e/framework" "github.com/openshift/machine-config-operator/test/helpers" ) // Test case for https://github.com/openshift/machine-config-operator/issues/358 func TestMCDToken(t *testing.T) { cs := framework.NewClientSet("") listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String(), } mcdList, err := cs.Pods("openshift-machine-config-operator").List(context.TODO(), listOptions) require.Nil(t, err) for _, pod := range mcdList.Items { res, err := cs.Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ Container: "machine-config-daemon", }).DoRaw(context.TODO()) require.Nil(t, err) for _, line := range strings.Split(string(res), "\n") { if strings.Contains(line, "Unable to rotate token") { t.Fatalf("found token rotation failure message: %s", line) } } } } func TestMCDeployed(t *testing.T) { cs := framework.NewClientSet("") // TODO: bring this back to 10 for i := 0; i < 3; i++ { startTime := time.Now() mcadd := createMCToAddFile("add-a-file", fmt.Sprintf("/etc/mytestconf%d", i), "test") // create the dummy MC now _, err := cs.MachineConfigs().Create(context.TODO(), mcadd, metav1.CreateOptions{}) if err != nil { t.Errorf("failed to create machine config %v", err) } t.Logf("Created %s", mcadd.Name) renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) require.Nil(t, err) err = waitForPoolComplete(t, cs, "worker", renderedConfig) require.Nil(t, err) nodes, err := getNodesByRole(cs, "worker") require.Nil(t, err) for _, node := range nodes { assert.Equal(t, renderedConfig, node.Annotations[constants.CurrentMachineConfigAnnotationKey]) assert.Equal(t, constants.MachineConfigDaemonStateDone, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey]) } t.Logf("All nodes updated with %s (%s elapsed)", mcadd.Name, time.Since(startTime)) } } func TestKernelArguments(t *testing.T) { cs := framework.NewClientSet("") // Create infra pool to roll out MC changes unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") createMCP(t, cs, "infra") // create old mc to have something to verify we successfully rolled back oldInfraConfig := createMC("old-infra", "infra") _, err := cs.MachineConfigs().Create(context.TODO(), oldInfraConfig, metav1.CreateOptions{}) require.Nil(t, err) oldInfraRenderedConfig, err := waitForRenderedConfig(t, cs, "infra", oldInfraConfig.Name) err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) // create kargs MC kargsMC := &mcfgv1.MachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("kargs-%s", uuid.NewUUID()), Labels: mcLabelForRole("infra"), }, Spec: mcfgv1.MachineConfigSpec{ Config: runtime.RawExtension{ Raw: helpers.MarshalOrDie(ctrlcommon.NewIgnConfig()), }, KernelArguments: []string{"nosmt", "foo=bar", "foo=baz", " baz=test bar=hello world"}, }, } _, err = cs.MachineConfigs().Create(context.TODO(), kargsMC, metav1.CreateOptions{}) require.Nil(t, err) t.Logf("Created %s", kargsMC.Name) renderedConfig, err := waitForRenderedConfig(t, cs, "infra", kargsMC.Name) require.Nil(t, err) err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) // Re-fetch the infra node for updated annotations infraNode := getSingleNodeByRole(t, cs, "infra") assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) kargs := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/proc/cmdline") expectedKernelArgs := []string{"nosmt", "foo=bar", "foo=baz", "baz=test", "bar=hello world"} for _, v := range expectedKernelArgs { if !strings.Contains(kargs, v) { t.Fatalf("Missing %q in kargs: %q", v, kargs) } } t.Logf("Node %s has expected kargs", infraNode.Name) // cleanup - delete karg mc and rollback if err := cs.MachineConfigs().Delete(context.TODO(), kargsMC.Name, metav1.DeleteOptions{}); err != nil { t.Error(err) } t.Logf("Deleted MachineConfig %s", kargsMC.Name) err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) unlabelFunc() workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) require.Nil(t, err) if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) require.Nil(t, err) if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { return false, nil } return true, nil }); err != nil { t.Errorf("infra node hasn't moved back to worker config: %v", err) } err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) } func TestKernelType(t *testing.T) { cs := framework.NewClientSet("") unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") oldInfraRenderedConfig := getMcName(t, cs, "infra") // create kernel type MC and roll out kernelType := &mcfgv1.MachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("kerneltype-%s", uuid.NewUUID()), Labels: mcLabelForRole("infra"), }, Spec: mcfgv1.MachineConfigSpec{ Config: runtime.RawExtension{ Raw: helpers.MarshalOrDie(ctrlcommon.NewIgnConfig()), }, KernelType: "realtime", }, } _, err := cs.MachineConfigs().Create(context.TODO(), kernelType, metav1.CreateOptions{}) require.Nil(t, err) t.Logf("Created %s", kernelType.Name) renderedConfig, err := waitForRenderedConfig(t, cs, "infra", kernelType.Name) require.Nil(t, err) if err := waitForPoolComplete(t, cs, "infra", renderedConfig); err != nil { t.Fatal(err) } infraNode := getSingleNodeByRole(t, cs, "infra") assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) kernelInfo := execCmdOnNode(t, cs, infraNode, "uname", "-a") if !strings.Contains(kernelInfo, "PREEMPT RT") { t.Fatalf("Node %s doesn't have expected kernel", infraNode.Name) } t.Logf("Node %s has expected kernel", infraNode.Name) // Delete the applied kerneltype MachineConfig to make sure rollback works fine if err := cs.MachineConfigs().Delete(context.TODO(), kernelType.Name, metav1.DeleteOptions{}); err != nil { t.Error(err) } t.Logf("Deleted MachineConfig %s", kernelType.Name) // Wait for the mcp to rollback to previous config if err := waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig); err != nil { t.Fatal(err) } // Re-fetch the infra node for updated annotations infraNode = getSingleNodeByRole(t, cs, "infra") assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], oldInfraRenderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) kernelInfo = execCmdOnNode(t, cs, infraNode, "uname", "-a") if strings.Contains(kernelInfo, "PREEMPT RT") { t.Fatalf("Node %s did not rollback successfully", infraNode.Name) } t.Logf("Node %s has successfully rolled back", infraNode.Name) unlabelFunc() workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) require.Nil(t, err) if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) require.Nil(t, err) if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { return false, nil } return true, nil }); err != nil { t.Errorf("infra node hasn't moved back to worker config: %v", err) } err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) } func TestPoolDegradedOnFailToRender(t *testing.T) { cs := framework.NewClientSet("") mcadd := createMCToAddFile("add-a-file", "/etc/mytestconfs", "test") ignCfg, err := ctrlcommon.ParseAndConvertConfig(mcadd.Spec.Config.Raw) require.Nil(t, err, "failed to parse ignition config") ignCfg.Ignition.Version = "" // invalid, won't render rawIgnCfg := helpers.MarshalOrDie(ignCfg) mcadd.Spec.Config.Raw = rawIgnCfg // create the dummy MC now _, err = cs.MachineConfigs().Create(context.TODO(), mcadd, metav1.CreateOptions{}) require.Nil(t, err, "failed to create machine config") // verify the pool goes degraded if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { mcp, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) if err != nil { return false, err } if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) { return true, nil } return false, nil }); err != nil { t.Errorf("machine config pool never switched to Degraded on failure to render: %v", err) } // now delete the bad MC and watch pool flipping back to not degraded if err := cs.MachineConfigs().Delete(context.TODO(), mcadd.Name, metav1.DeleteOptions{}); err != nil { t.Error(err) } // wait for the mcp to go back to previous config if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { mcp, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) if err != nil { return false, err } if mcfgv1.IsMachineConfigPoolConditionFalse(mcp.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { return true, nil } return false, nil }); err != nil { t.Errorf("machine config pool never switched back to Degraded=False: %v", err) } } func TestReconcileAfterBadMC(t *testing.T) { cs := framework.NewClientSet("") // create a MC that contains a valid ignition config but is not reconcilable mcadd := createMCToAddFile("add-a-file", "/etc/mytestconfs", "test") ignCfg, err := ctrlcommon.ParseAndConvertConfig(mcadd.Spec.Config.Raw) require.Nil(t, err, "failed to parse ignition config") ignCfg.Storage.Disks = []ign3types.Disk{ ign3types.Disk{ Device: "/one", }, } rawIgnCfg := helpers.MarshalOrDie(ignCfg) mcadd.Spec.Config.Raw = rawIgnCfg workerOldMc := getMcName(t, cs, "worker") // create the dummy MC now _, err = cs.MachineConfigs().Create(context.TODO(), mcadd, metav1.CreateOptions{}) if err != nil { t.Errorf("failed to create machine config %v", err) } renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) require.Nil(t, err) // verify that one node picked the above up if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { nodes, err := getNodesByRole(cs, "worker") if err != nil { return false, err } for _, node := range nodes { if node.Annotations[constants.DesiredMachineConfigAnnotationKey] == renderedConfig && node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] != constants.MachineConfigDaemonStateDone { // just check that we have the annotation here, w/o strings checking anything that can flip fast causing flakes if node.Annotations[constants.MachineConfigDaemonReasonAnnotationKey] != "" { return true, nil } } } return false, nil }); err != nil { t.Errorf("machine config hasn't been picked by any MCD: %v", err) } // verify that we got indeed an unavailable machine in the pool if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { mcp, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) if err != nil { return false, err } if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolNodeDegraded) && mcp.Status.DegradedMachineCount >= 1 { return true, nil } return false, nil }); err != nil { t.Errorf("worker pool isn't reporting degraded with a bad MC: %v", err) } // now delete the bad MC and watch the nodes reconciling as expected if err := cs.MachineConfigs().Delete(context.TODO(), mcadd.Name, metav1.DeleteOptions{}); err != nil { t.Error(err) } // wait for the mcp to go back to previous config if err := waitForPoolComplete(t, cs, "worker", workerOldMc); err != nil { t.Fatal(err) } visited := make(map[string]bool) if err := wait.Poll(2*time.Second, 30*time.Minute, func() (bool, error) { nodes, err := getNodesByRole(cs, "worker") if err != nil { return false, err } mcp, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) if err != nil { return false, err } for _, node := range nodes { if node.Annotations[constants.CurrentMachineConfigAnnotationKey] == workerOldMc && node.Annotations[constants.DesiredMachineConfigAnnotationKey] == workerOldMc && node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] == constants.MachineConfigDaemonStateDone { visited[node.Name] = true if len(visited) == len(nodes) { if mcp.Status.UnavailableMachineCount == 0 && mcp.Status.ReadyMachineCount == int32(len(nodes)) && mcp.Status.UpdatedMachineCount == int32(len(nodes)) { return true, nil } } continue } } return false, nil }); err != nil { t.Errorf("machine config didn't roll back on any worker: %v", err) } } // Test that deleting a MC that changes a file does not completely delete the file // entirely but rather restores it to its original state. func TestDontDeleteRPMFiles(t *testing.T) { cs := framework.NewClientSet("") unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") oldInfraRenderedConfig := getMcName(t, cs, "infra") mcHostFile := createMCToAddFileForRole("modify-host-file", "infra", "/etc/motd", "mco-test") // create the dummy MC now _, err := cs.MachineConfigs().Create(context.TODO(), mcHostFile, metav1.CreateOptions{}) if err != nil { t.Errorf("failed to create machine config %v", err) } renderedConfig, err := waitForRenderedConfig(t, cs, "infra", mcHostFile.Name) require.Nil(t, err) err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) // now delete the bad MC and watch the nodes reconciling as expected if err := cs.MachineConfigs().Delete(context.TODO(), mcHostFile.Name, metav1.DeleteOptions{}); err != nil { t.Error(err) } // wait for the mcp to go back to previous config err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) infraNode := getSingleNodeByRole(t, cs, "infra") assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], oldInfraRenderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) found := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/motd") if strings.Contains(found, "mco-test") { t.Fatalf("updated file doesn't contain expected data, got %s", found) } unlabelFunc() workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) require.Nil(t, err) if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) require.Nil(t, err) if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { return false, nil } return true, nil }); err != nil { t.Errorf("infra node hasn't moved back to worker config: %v", err) } err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) } func TestIgn3Cfg(t *testing.T) { cs := framework.NewClientSet("") unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") // create a dummy MC with an sshKey for user Core mcName := fmt.Sprintf("99-ign3cfg-infra-%s", uuid.NewUUID()) mcadd := &mcfgv1.MachineConfig{} mcadd.ObjectMeta = metav1.ObjectMeta{ Name: mcName, Labels: mcLabelForRole("infra"), } // create a new MC that adds a valid user & ssh key testIgn3Config := ign3types.Config{} tempUser := ign3types.PasswdUser{Name: "core", SSHAuthorizedKeys: []ign3types.SSHAuthorizedKey{"1234_test_ign3"}} testIgn3Config.Passwd.Users = append(testIgn3Config.Passwd.Users, tempUser) testIgn3Config.Ignition.Version = "3.1.0" mode := 420 testfiledata := "data:,test-ign3-stuff" tempFile := ign3types.File{Node: ign3types.Node{Path: "/etc/testfileconfig"}, FileEmbedded1: ign3types.FileEmbedded1{Contents: ign3types.Resource{Source: &testfiledata}, Mode: &mode}} testIgn3Config.Storage.Files = append(testIgn3Config.Storage.Files, tempFile) rawIgnConfig := helpers.MarshalOrDie(testIgn3Config) mcadd.Spec.Config.Raw = rawIgnConfig _, err := cs.MachineConfigs().Create(context.TODO(), mcadd, metav1.CreateOptions{}) require.Nil(t, err, "failed to create MC") t.Logf("Created %s", mcadd.Name) // grab the latest worker- MC renderedConfig, err := waitForRenderedConfig(t, cs, "infra", mcadd.Name) require.Nil(t, err) err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) infraNode := getSingleNodeByRole(t, cs, "infra") assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) foundSSH := execCmdOnNode(t, cs, infraNode, "grep", "1234_test_ign3", "/rootfs/home/core/.ssh/authorized_keys") if !strings.Contains(foundSSH, "1234_test_ign3") { t.Fatalf("updated ssh keys not found in authorized_keys, got %s", foundSSH) } t.Logf("Node %s has SSH key", infraNode.Name) foundFile := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/testfileconfig") if !strings.Contains(foundFile, "test-ign3-stuff") { t.Fatalf("updated file doesn't contain expected data, got %s", foundFile) } t.Logf("Node %s has file", infraNode.Name) unlabelFunc() workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) require.Nil(t, err) if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) require.Nil(t, err) if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { return false, nil } return true, nil }); err != nil { t.Errorf("infra node hasn't moved back to worker config: %v", err) } err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) }
[]
[]
[]
[]
[]
go
null
null
null
airmon/app.py
import asyncio import os import time from copy import copy import aiotg import matplotlib matplotlib.use('Agg') # noqa from airmon import const, forecast, storage, chart, date from airmon.storage.models import bind_db BOT_TOKEN = os.environ.get('TG_BOT_TOKEN') BOT_NAME = os.environ.get('TG_BOT_NAME') assert all([BOT_TOKEN, BOT_NAME]) _LAST_ALERT = 0 bot = aiotg.Bot(api_token=BOT_TOKEN, name=BOT_NAME) async def help_msg(chat): msg = ( '/help - display help message\n' '/subscribe - be alerted if forecast goes bad\n' '/unsubscribe - stop receiving alerts\n' '/fire - emit a test alert\n' '/stats3 - renders chart for last 3 hours\n' '/stats6 - renders chart for last 6 hours\n' '/stats12 - renders chart for last 12 hours\n' '/stats24 - renders chart for last 24 hours\n' ) return await chat.send_text(msg) @bot.command(r'/help') async def help_(chat, match): return await help_msg(chat) @bot.command(r'/start') async def start(chat, match): return await help_msg(chat) @bot.command(r'/subscribe') async def subscribe(chat, match): storage.get_or_create_channel(chat.id) return await chat.reply('You\'ve been added to the notifications list') async def unsubscribe(chat): storage.remove_channel(chat.id) return await chat.reply('You\'ve been removed from the notifications list') @bot.command(r'/unsubscribe') async def unsubscribe_(chat, match): return await unsubscribe(chat) @bot.command(r'/stop') async def stop(chat, match): return await unsubscribe(chat) def render_message(data, predictions=None, severity=None): msg = '' if severity is not None: msg += '[%s] CO2 Alert!\n' % severity msg += 'Current level: %dppm\n' % data[-1] if predictions is not None: msg += 'Upcoming level: %dppm' % predictions[-1] return msg @bot.command(r'/stats(\d+)') async def stats(chat, match): hours = int(match.groups()[0]) lookback = date.past(hours=hours) data = storage.get_co2_levels_series(lookback) img = chart.draw_png(data) msg = render_message(data) return await chat.send_photo(photo=img, caption=msg) @bot.command(r'/fire') async def fire(chat, match): lookback = date.past(hours=const.alert_lookback_hours) data = storage.get_co2_levels_series(lookback) predictions = forecast.predict() img = chart.draw_png(data, predictions) msg = render_message(data, predictions, 'TEST') return await chat.send_photo(photo=img, caption=msg) async def fire_alerts(predictions, severity): global _LAST_ALERT since_last_alert = time.time() - _LAST_ALERT if since_last_alert < const.alert_cooldown_secs: return _LAST_ALERT = time.time() lookback = date.past(hours=const.alert_lookback_hours) data = storage.get_co2_levels_series(lookback) img = chart.draw_png(data, predictions) msg = render_message(data, predictions, severity) for chid in storage.get_channels_id(): chat = bot.channel(chid) await chat.send_photo(photo=img, caption=msg) img.close() async def monitor(): print('Starting monitoring') while True: predictions = forecast.predict() val = predictions[-1] if val > const.co2_level_critical: await fire_alerts(predictions, 'CRITICAL') elif val > const.co2_level_warning: await fire_alerts(predictions, 'WARNING') await asyncio.sleep(const.monitor_interval_secs) if __name__ == '__main__': print('Starting app') bind_db() asyncio.ensure_future(monitor()) loop = asyncio.get_event_loop() loop.run_until_complete(bot.loop())
[]
[]
[ "TG_BOT_TOKEN", "TG_BOT_NAME" ]
[]
["TG_BOT_TOKEN", "TG_BOT_NAME"]
python
2
0
cmd/root.go
/* Copyright © 2019 NAME HERE <EMAIL ADDRESS> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "errors" "fmt" "os" "path/filepath" "github.com/Optum/dce-cli/configs" "github.com/Optum/dce-cli/internal/constants" observ "github.com/Optum/dce-cli/internal/observation" utl "github.com/Optum/dce-cli/internal/util" svc "github.com/Optum/dce-cli/pkg/service" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var cfgFile string var Config = &configs.Root{} var Service *svc.ServiceContainer var Util *utl.UtilContainer var Observation *observ.ObservationContainer // Expose logger as global for ease of use var log observ.Logger var Log observ.Logger func init() { // Global Flags // --------------- // --config flag, to specify path to dce.yml config // default to ~/.dce/config.yaml RootCmd.PersistentFlags().StringVar( &cfgFile, "config", "", "config file (default is \"$HOME/.dce/config.yaml\")", ) } // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "dce", Short: "Disposable Cloud Environment (DCE)", Long: `Disposable Cloud Environment (DCE) The DCE cli allows: - Admins to provision DCE to a master account and administer said account - Users to lease accounts and execute commands against them`, PersistentPreRunE: preRun, } func preRun(cmd *cobra.Command, args []string) error { err := onInit(cmd, args) if err != nil { return err } // Check if the requested command is for a version check // If it is, return here, as no creds are needed if cmd.Name() == versionCmd.Name() { return nil } // Check if the user has valid creds, // otherwise require authentication creds := Util.AWSSession.Config.Credentials _, _ = creds.Get() hasValidCreds := !creds.IsExpired() isAuthCommand := cmd.Name() == authCmd.Name() isInitCommand := cmd.Name() == initCmd.Name() if !hasValidCreds && !isAuthCommand && !isInitCommand { log.Print("No valid DCE credentials found") err := Service.Authenticate() if err != nil { return err } } return nil } // Execute adds all child commands to the root command and sets flags appropriately. func Execute() { if err := RootCmd.Execute(); err != nil { os.Exit(1) } // Print an extra newline when we're done, // so users terminal prompt shows up on a new line fmt.Println("") } type FmtOutputFormatter struct { } func (f *FmtOutputFormatter) Format(entry *logrus.Entry) ([]byte, error) { var serialized []byte serialized = []byte(fmt.Sprintf("%s\n", entry.Message)) return serialized, nil } func onInit(cmd *cobra.Command, args []string) error { // Configure observation / logging initObservation() // Expose global `log` object for ease of use log = Observation.Logger Log = log if len(cfgFile) == 0 { homeDir, err := os.UserHomeDir() if err != nil { log.Fatalf("error: %v", err) } cfgFile = filepath.Join(homeDir, ".dce", constants.DefaultConfigFileName) } fsUtil := &utl.FileSystemUtil{Config: Config, ConfigFile: cfgFile} // Initialize config // If config file does not exist, // run the `dce init` command if !fsUtil.IsExistingFile(cfgFile) { if cmd.Name() == versionCmd.Name() { return nil } else if cmd.Name() != initCmd.Name() { return errors.New("Config file not found. Please type 'dce init' to generate one.") } } else { // Load config from the configuration file err := fsUtil.ReadInConfig() if err != nil { return fmt.Errorf("Failed to parse dce.yml: %s", err) } } // initialize utilities and interfaces to external things Util = utl.New(Config, cfgFile, Observation) // initialize business logic services Service = svc.New(Config, Observation, Util) return nil } // initialize anything related to logging, metrics, or tracing func initObservation() { logrusInstance := logrus.New() logrusInstance.SetOutput(os.Stderr) //TODO: Make configurable var logLevel logrus.Level switch os.Getenv("DCE_LOG_LEVEL") { case "TRACE": logLevel = logrus.TraceLevel case "DEBUG": logLevel = logrus.DebugLevel case "INFO": logLevel = logrus.InfoLevel case "WARN": logLevel = logrus.WarnLevel case "ERROR": logLevel = logrus.ErrorLevel case "FATAL": logLevel = logrus.FatalLevel case "PANIC": logLevel = logrus.PanicLevel default: logLevel = logrus.InfoLevel } logrusInstance.SetLevel(logLevel) if logLevel == logrus.InfoLevel { logrusInstance.SetFormatter(&FmtOutputFormatter{}) } else { logrusInstance.SetFormatter(&logrus.TextFormatter{}) } Observation = observ.New(logrusInstance) }
[ "\"DCE_LOG_LEVEL\"" ]
[]
[ "DCE_LOG_LEVEL" ]
[]
["DCE_LOG_LEVEL"]
go
1
0
botocore/configprovider.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """This module contains the inteface for controlling how configuration is loaded. """ import logging import os from botocore import utils logger = logging.getLogger(__name__) #: A default dictionary that maps the logical names for session variables #: to the specific environment variables and configuration file names #: that contain the values for these variables. #: When creating a new Session object, you can pass in your own dictionary #: to remap the logical names or to add new logical names. You can then #: get the current value for these variables by using the #: ``get_config_variable`` method of the :class:`botocore.session.Session` #: class. #: These form the keys of the dictionary. The values in the dictionary #: are tuples of (<config_name>, <environment variable>, <default value>, #: <conversion func>). #: The conversion func is a function that takes the configuration value #: as an argument and returns the converted value. If this value is #: None, then the configuration value is returned unmodified. This #: conversion function can be used to type convert config values to #: values other than the default values of strings. #: The ``profile`` and ``config_file`` variables should always have a #: None value for the first entry in the tuple because it doesn't make #: sense to look inside the config file for the location of the config #: file or for the default profile to use. #: The ``config_name`` is the name to look for in the configuration file, #: the ``env var`` is the OS environment variable (``os.environ``) to #: use, and ``default_value`` is the value to use if no value is otherwise #: found. BOTOCORE_DEFAUT_SESSION_VARIABLES = { # logical: config_file, env_var, default_value, conversion_func 'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None), 'region': ('region', 'AWS_DEFAULT_REGION', None, None), 'data_path': ('data_path', 'AWS_DATA_PATH', None, None), 'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None), 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None), 'api_versions': ('api_versions', None, {}, None), # This is the shared credentials file amongst sdks. 'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE', '~/.aws/credentials', None), # These variables only exist in the config file. # This is the number of seconds until we time out a request to # the instance metadata service. 'metadata_service_timeout': ( 'metadata_service_timeout', 'AWS_METADATA_SERVICE_TIMEOUT', 1, int), # This is the number of request attempts we make until we give # up trying to retrieve data from the instance metadata service. 'metadata_service_num_attempts': ( 'metadata_service_num_attempts', 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int), 'parameter_validation': ('parameter_validation', None, True, None), # Client side monitoring configurations. # Note: These configurations are considered internal to botocore. # Do not use them until publicly documented. 'csm_enabled': ( 'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean), 'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None), 'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int), 'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None), # Endpoint discovery configuration 'endpoint_discovery_enabled': ( 'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED', False, utils.ensure_boolean), 'sts_regional_endpoints': ( 'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy', None ), 'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None), # We can't have a default here for v1 because we need to defer to # whatever the defaults are in _retry.json. 'max_attempts': ('max_attempts', 'AWS_MAX_ATTEMPTS', None, int), } # A mapping for the s3 specific configuration vars. These are the configuration # vars that typically go in the s3 section of the config file. This mapping # follows the same schema as the previous session variable mapping. DEFAULT_S3_CONFIG_VARS = { 'addressing_style': ( ('s3', 'addressing_style'), None, None, None), 'use_accelerate_endpoint': ( ('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean ), 'use_dualstack_endpoint': ( ('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean ), 'payload_signing_enabled': ( ('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean ), 'use_arn_region': ( ['s3_use_arn_region', ('s3', 'use_arn_region')], 'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean ), 'us_east_1_regional_endpoint': ( ['s3_us_east_1_regional_endpoint', ('s3', 'us_east_1_regional_endpoint')], 'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None ) } def create_botocore_default_config_mapping(session): chain_builder = ConfigChainFactory(session=session) config_mapping = _create_config_chain_mapping( chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES) config_mapping['s3'] = SectionConfigProvider( 's3', session, _create_config_chain_mapping( chain_builder, DEFAULT_S3_CONFIG_VARS) ) return config_mapping def _create_config_chain_mapping(chain_builder, config_variables): mapping = {} for logical_name, config in config_variables.items(): mapping[logical_name] = chain_builder.create_config_chain( instance_name=logical_name, env_var_names=config[1], config_property_names=config[0], default=config[2], conversion_func=config[3] ) return mapping class ConfigChainFactory(object): """Factory class to create our most common configuration chain case. This is a convenience class to construct configuration chains that follow our most common pattern. This is to prevent ordering them incorrectly, and to make the config chain construction more readable. """ def __init__(self, session, environ=None): """Initialize a ConfigChainFactory. :type session: :class:`botocore.session.Session` :param session: This is the session that should be used to look up values from the config file. :type environ: dict :param environ: A mapping to use for environment variables. If this is not provided it will default to use os.environ. """ self._session = session if environ is None: environ = os.environ self._environ = environ def create_config_chain(self, instance_name=None, env_var_names=None, config_property_names=None, default=None, conversion_func=None): """Build a config chain following the standard botocore pattern. In botocore most of our config chains follow the the precendence: session_instance_variables, environment, config_file, default_value. This is a convenience function for creating a chain that follow that precendence. :type instance_name: str :param instance_name: This indicates what session instance variable corresponds to this config value. If it is None it will not be added to the chain. :type env_var_names: str or list of str or None :param env_var_names: One or more environment variable names to search for this value. They are searched in order. If it is None it will not be added to the chain. :type config_property_names: str/tuple or list of str/tuple or None :param config_property_names: One of more strings or tuples representing the name of the key in the config file for this config option. They are searched in order. If it is None it will not be added to the chain. :type default: Any :param default: Any constant value to be returned. :type conversion_func: None or callable :param conversion_func: If this value is None then it has no effect on the return type. Otherwise, it is treated as a function that will conversion_func our provided type. :rvalue: ConfigChain :returns: A ConfigChain that resolves in the order env_var_names -> config_property_name -> default. Any values that were none are omitted form the chain. """ providers = [] if instance_name is not None: providers.append( InstanceVarProvider( instance_var=instance_name, session=self._session ) ) if env_var_names is not None: providers.extend(self._get_env_providers(env_var_names)) if config_property_names is not None: providers.extend( self._get_scoped_config_providers(config_property_names) ) if default is not None: providers.append(ConstantProvider(value=default)) return ChainProvider( providers=providers, conversion_func=conversion_func, ) def _get_env_providers(self, env_var_names): env_var_providers = [] if not isinstance(env_var_names, list): env_var_names = [env_var_names] for env_var_name in env_var_names: env_var_providers.append( EnvironmentProvider(name=env_var_name, env=self._environ) ) return env_var_providers def _get_scoped_config_providers(self, config_property_names): scoped_config_providers = [] if not isinstance(config_property_names, list): config_property_names = [config_property_names] for config_property_name in config_property_names: scoped_config_providers.append( ScopedConfigProvider( config_var_name=config_property_name, session=self._session, ) ) return scoped_config_providers class ConfigValueStore(object): """The ConfigValueStore object stores configuration values.""" def __init__(self, mapping=None): """Initialize a ConfigValueStore. :type mapping: dict :param mapping: The mapping parameter is a map of string to a subclass of BaseProvider. When a config variable is asked for via the get_config_variable method, the corresponding provider will be invoked to load the value. """ self._overrides = {} self._mapping = {} if mapping is not None: for logical_name, provider in mapping.items(): self.set_config_provider(logical_name, provider) def get_config_variable(self, logical_name): """ Retrieve the value associeated with the specified logical_name from the corresponding provider. If no value is found None will be returned. :type logical_name: str :param logical_name: The logical name of the session variable you want to retrieve. This name will be mapped to the appropriate environment variable name for this session as well as the appropriate config file entry. :returns: value of variable or None if not defined. """ if logical_name in self._overrides: return self._overrides[logical_name] if logical_name not in self._mapping: return None provider = self._mapping[logical_name] return provider.provide() def set_config_variable(self, logical_name, value): """Set a configuration variable to a specific value. By using this method, you can override the normal lookup process used in ``get_config_variable`` by explicitly setting a value. Subsequent calls to ``get_config_variable`` will use the ``value``. This gives you per-session specific configuration values. :: >>> # Assume logical name 'foo' maps to env var 'FOO' >>> os.environ['FOO'] = 'myvalue' >>> s.get_config_variable('foo') 'myvalue' >>> s.set_config_variable('foo', 'othervalue') >>> s.get_config_variable('foo') 'othervalue' :type logical_name: str :param logical_name: The logical name of the session variable you want to set. These are the keys in ``SESSION_VARIABLES``. :param value: The value to associate with the config variable. """ self._overrides[logical_name] = value def clear_config_variable(self, logical_name): """Remove an override config variable from the session. :type logical_name: str :param logical_name: The name of the parameter to clear the override value from. """ self._overrides.pop(logical_name, None) def set_config_provider(self, logical_name, provider): """Set the provider for a config value. This provides control over how a particular configuration value is loaded. This replaces the provider for ``logical_name`` with the new ``provider``. :type logical_name: str :param logical_name: The name of the config value to change the config provider for. :type provider: :class:`botocore.configprovider.BaseProvider` :param provider: The new provider that should be responsible for providing a value for the config named ``logical_name``. """ self._mapping[logical_name] = provider class BaseProvider(object): """Base class for configuration value providers. A configuration provider has some method of providing a configuration value. """ def provide(self): """Provide a config value.""" raise NotImplementedError('provide') class ChainProvider(BaseProvider): """This provider wraps one or more other providers. Each provider in the chain is called, the first one returning a non-None value is then returned. """ def __init__(self, providers=None, conversion_func=None): """Initalize a ChainProvider. :type providers: list :param providers: The initial list of providers to check for values when invoked. :type conversion_func: None or callable :param conversion_func: If this value is None then it has no affect on the return type. Otherwise, it is treated as a function that will transform provided value. """ if providers is None: providers = [] self._providers = providers self._conversion_func = conversion_func def provide(self): """Provide the value from the first provider to return non-None. Each provider in the chain has its provide method called. The first one in the chain to return a non-None value is the returned from the ChainProvider. When no non-None value is found, None is returned. """ for provider in self._providers: value = provider.provide() if value is not None: return self._convert_type(value) return None def _convert_type(self, value): if self._conversion_func is not None: return self._conversion_func(value) return value def __repr__(self): return '[%s]' % ', '.join([str(p) for p in self._providers]) class InstanceVarProvider(BaseProvider): """This class loads config values from the session instance vars.""" def __init__(self, instance_var, session): """Initialize InstanceVarProvider. :type instance_var: str :param instance_var: The instance variable to load from the session. :type session: :class:`botocore.session.Session` :param session: The botocore session to get the loaded configuration file variables from. """ self._instance_var = instance_var self._session = session def provide(self): """Provide a config value from the session instance vars.""" instance_vars = self._session.instance_variables() value = instance_vars.get(self._instance_var) return value def __repr__(self): return 'InstanceVarProvider(instance_var=%s, session=%s)' % ( self._instance_var, self._session, ) class ScopedConfigProvider(BaseProvider): def __init__(self, config_var_name, session): """Initialize ScopedConfigProvider. :type config_var_name: str or tuple :param config_var_name: The name of the config variable to load from the configuration file. If the value is a tuple, it must only consist of two items, where the first item represents the section and the second item represents the config var name in the section. :type session: :class:`botocore.session.Session` :param session: The botocore session to get the loaded configuration file variables from. """ self._config_var_name = config_var_name self._session = session def provide(self): """Provide a value from a config file property.""" scoped_config = self._session.get_scoped_config() if isinstance(self._config_var_name, tuple): section_config = scoped_config.get(self._config_var_name[0]) if not isinstance(section_config, dict): return None return section_config.get(self._config_var_name[1]) return scoped_config.get(self._config_var_name) def __repr__(self): return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % ( self._config_var_name, self._session, ) class EnvironmentProvider(BaseProvider): """This class loads config values from environment variables.""" def __init__(self, name, env): """Initialize with the keys in the dictionary to check. :type name: str :param name: The key with that name will be loaded and returned. :type env: dict :param env: Environment variables dictionary to get variables from. """ self._name = name self._env = env def provide(self): """Provide a config value from a source dictionary.""" if self._name in self._env: return self._env[self._name] return None def __repr__(self): return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env) class SectionConfigProvider(BaseProvider): """Provides a dictionary from a section in the scoped config This is useful for retrieving scoped config variables (i.e. s3) that have their own set of config variables and resolving logic. """ def __init__(self, section_name, session, override_providers=None): self._section_name = section_name self._session = session self._scoped_config_provider = ScopedConfigProvider( self._section_name, self._session) self._override_providers = override_providers if self._override_providers is None: self._override_providers = {} def provide(self): section_config = self._scoped_config_provider.provide() if section_config and not isinstance(section_config, dict): logger.debug("The %s config key is not a dictionary type, " "ignoring its value of: %s", self._section_name, section_config) return None for section_config_var, provider in self._override_providers.items(): provider_val = provider.provide() if provider_val is not None: if section_config is None: section_config = {} section_config[section_config_var] = provider_val return section_config def __repr__(self): return ( 'SectionConfigProvider(section_name=%s, ' 'session=%s, override_providers=%s)' % ( self._section_name, self._session, self._override_providers, ) ) class ConstantProvider(BaseProvider): """This provider provides a constant value.""" def __init__(self, value): self._value = value def provide(self): """Provide the constant value given during initialization.""" return self._value def __repr__(self): return 'ConstantProvider(value=%s)' % self._value
[]
[]
[ "FOO" ]
[]
["FOO"]
python
1
0
tests/unit/gapic/dialogflow_v2/test_versions.py
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os # try/except added for compatibility with python < 3.8 try: from unittest import mock from unittest.mock import AsyncMock except ImportError: import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient from google.cloud.dialogflow_v2.services.versions import VersionsClient from google.cloud.dialogflow_v2.services.versions import pagers from google.cloud.dialogflow_v2.services.versions import transports from google.cloud.dialogflow_v2.types import version from google.cloud.dialogflow_v2.types import version as gcd_version from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert VersionsClient._get_default_mtls_endpoint(None) is None assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.VersionsGrpcTransport, "grpc"), (transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") def test_versions_client_get_transport_class(): transport = VersionsClient.get_transport_class() available_transports = [ transports.VersionsGrpcTransport, ] assert transport in available_transports transport = VersionsClient.get_transport_class("grpc") assert transport == transports.VersionsGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_versions_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient]) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_versions_client_client_options_from_dict(): with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=None, default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "request_type", [ version.ListVersionsRequest, dict, ], ) def test_list_versions(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse( next_page_token="next_page_token_value", ) response = client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsPager) assert response.next_page_token == "next_page_token_value" def test_list_versions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: client.list_versions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() @pytest.mark.asyncio async def test_list_versions_async( transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_versions_async_from_dict(): await test_list_versions_async(request_type=dict) def test_list_versions_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_list_versions_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_list_versions_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_versions_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_versions_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_versions_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) def test_list_versions_pager(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_versions(request={}) assert pager._metadata == metadata results = list(pager) assert len(results) == 6 assert all(isinstance(i, version.Version) for i in results) def test_list_versions_pages(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = list(client.list_versions(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_versions_async_pager(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) async_pager = await client.list_versions( request={}, ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 assert all(isinstance(i, version.Version) for i in responses) @pytest.mark.asyncio async def test_list_versions_async_pages(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = [] async for page_ in ( await client.list_versions(request={}) ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [ version.GetVersionRequest, dict, ], ) def test_get_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) response = client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS def test_get_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: client.get_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() @pytest.mark.asyncio async def test_get_version_async( transport: str = "grpc_asyncio", request_type=version.GetVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_get_version_async_from_dict(): await test_get_version_async(request_type=dict) def test_get_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_get_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_get_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.parametrize( "request_type", [ gcd_version.CreateVersionRequest, dict, ], ) def test_create_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_create_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: client.create_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() @pytest.mark.asyncio async def test_create_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_create_version_async_from_dict(): await test_create_version_async(request_type=dict) def test_create_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_create_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_create_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val def test_create_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.asyncio async def test_create_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val @pytest.mark.asyncio async def test_create_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ gcd_version.UpdateVersionRequest, dict, ], ) def test_update_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_update_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: client.update_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() @pytest.mark.asyncio async def test_update_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_update_version_async_from_dict(): await test_update_version_async(request_type=dict) def test_update_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_update_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] def test_update_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ version.DeleteVersionRequest, dict, ], ) def test_delete_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None response = client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None def test_delete_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: client.delete_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() @pytest.mark.asyncio async def test_delete_version_async( transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio async def test_delete_version_async_from_dict(): await test_delete_version_async(request_type=dict) def test_delete_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_delete_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_delete_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_version( version.DeleteVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_version( version.DeleteVersionRequest(), name="name_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, transport=transport, ) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = VersionsClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.VersionsGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @pytest.mark.parametrize( "transport_name", [ "grpc", ], ) def test_transport_kind(transport_name): transport = VersionsClient.get_transport_class(transport_name)( credentials=ga_credentials.AnonymousCredentials(), ) assert transport.kind == transport_name def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.VersionsGrpcTransport, ) def test_versions_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_versions_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "list_versions", "get_version", "create_version", "update_version", "delete_version", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Catch all for all remaining methods and properties remainder = [ "kind", ] for r in remainder: with pytest.raises(NotImplementedError): getattr(transport, r)() def test_versions_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) def test_versions_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport() adc.assert_called_once() def test_versions_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) VersionsClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_versions_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.VersionsGrpcTransport, grpc_helpers), (transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_versions_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=["1", "2"], default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_no_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_with_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:8000") def test_versions_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_versions_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_version_path(): project = "squid" version = "clam" expected = "projects/{project}/agent/versions/{version}".format( project=project, version=version, ) actual = VersionsClient.version_path(project, version) assert expected == actual def test_parse_version_path(): expected = { "project": "whelk", "version": "octopus", } path = VersionsClient.version_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_version_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = VersionsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = VersionsClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) actual = VersionsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = VersionsClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) actual = VersionsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = VersionsClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format( project=project, ) actual = VersionsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = VersionsClient.common_project_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = VersionsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = VersionsClient.common_location_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: transport_class = VersionsClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (VersionsClient, transports.VersionsGrpcTransport), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
[]
[]
[]
[]
[]
python
0
0
sdk/servicebus/azure-servicebus/samples/async_samples/receive_subscription_async.py
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- """ Example to show receiving batch messages from a Service Bus Subscription under specific Topic asynchronously. """ # pylint: disable=C0111 import os import asyncio from azure.servicebus.aio import ServiceBusClient CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR'] TOPIC_NAME = os.environ["SERVICE_BUS_TOPIC_NAME"] SUBSCRIPTION_NAME = os.environ["SERVICE_BUS_SUBSCRIPTION_NAME"] async def main(): servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR) async with servicebus_client: receiver = servicebus_client.get_subscription_receiver( topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME ) async with receiver: received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5) for msg in received_msgs: print(str(msg)) await msg.complete() loop = asyncio.get_event_loop() loop.run_until_complete(main())
[]
[]
[ "SERVICE_BUS_CONNECTION_STR", "SERVICE_BUS_SUBSCRIPTION_NAME", "SERVICE_BUS_TOPIC_NAME" ]
[]
["SERVICE_BUS_CONNECTION_STR", "SERVICE_BUS_SUBSCRIPTION_NAME", "SERVICE_BUS_TOPIC_NAME"]
python
3
0
app/sample-go/app.go
package main import ( "fmt" "html/template" "log" "net/http" "os" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) // PageVars : These values are used by consuming web pages. type PageVars struct { Message string Language string } var requestsCounter = prometheus.NewCounter( prometheus.CounterOpts{ Name: "requests_counter_total", Help: "Total Requests Made.", }, ) func init() { // Metrics have to be registered to be exposed: prometheus.MustRegister(requestsCounter) } func main() { //client := appinsights.NewTelemetryClient(os.Getenv("APPINSIGHTS_INSTRUMENTATIONKEY")) //request := appinsights.NewRequestTelemetry("GET", "https://myapp.azurewebsites.net/", 1 , "Success") //client.Track(request) http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css")))) http.Handle("/img/", http.StripPrefix("/img/", http.FileServer(http.Dir("img")))) http.Handle("/fonts/", http.StripPrefix("/fonts/", http.FileServer(http.Dir("fonts")))) http.HandleFunc("/", home) http.Handle("/metrics", promhttp.Handler()) port := getPort() log.Printf("listening on port %s", port) log.Fatal(http.ListenAndServe(port, nil)) } func getPort() string { p := os.Getenv("HTTP_PLATFORM_PORT") if p != "" { return ":" + p } return ":8080" } func render(w http.ResponseWriter, tmpl string, pageVars PageVars) { tmpl = fmt.Sprintf("views/%s", tmpl) t, err := template.ParseFiles(tmpl) if err != nil { // if there is an error log.Print("template parsing error: ", err) // log it } err = t.Execute(w, pageVars) //execute the template and pass in the variables to fill the gaps if err != nil { // if there is an error log.Print("template executing error: ", err) //log it } } func home(w http.ResponseWriter, req *http.Request) { requestsCounter.Inc() pageVars := PageVars{ Message: "Success!", Language: "Go Lang", } render(w, "index.html", pageVars) log.Print("page rendering complete") }
[ "\"APPINSIGHTS_INSTRUMENTATIONKEY\"", "\"HTTP_PLATFORM_PORT\"" ]
[]
[ "APPINSIGHTS_INSTRUMENTATIONKEY", "HTTP_PLATFORM_PORT" ]
[]
["APPINSIGHTS_INSTRUMENTATIONKEY", "HTTP_PLATFORM_PORT"]
go
2
0
controllers/mongodb_controller.go
/* # Copyright 2021 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. */ package controllers import ( "bytes" "context" "fmt" "math" "math/rand" "os" "text/template" "time" "github.com/ghodss/yaml" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" mongodbv1alpha1 "github.com/IBM/ibm-mongodb-operator/api/v1alpha1" ) // MongoDBReconciler reconciles a MongoDB object type MongoDBReconciler struct { Client client.Client Reader client.Reader Log logr.Logger Scheme *runtime.Scheme } // MongoDB StatefulSet Data type mongoDBStatefulSetData struct { Replicas int ImageRepo string StorageClass string InitImage string BootstrapImage string MetricsImage string CPULimit string CPURequest string MemoryLimit string MemoryRequest string NamespaceName string } // +kubebuilder:rbac:groups=mongodb.operator.ibm.com,namespace=ibm-common-services,resources=mongodbs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=mongodb.operator.ibm.com,namespace=ibm-common-services,resources=mongodbs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=core,namespace=ibm-common-services,resources=services;services/finalizers;serviceaccounts;endpoints;persistentvolumeclaims;events;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=apps,namespace=ibm-common-services,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=monitoring.coreos.com,namespace=ibm-common-services,resources=servicemonitors,verbs=get;create // +kubebuilder:rbac:groups=apps,namespace=ibm-common-services,resourceNames=ibm-mongodb-operator,resources=deployments/finalizers,verbs=update // +kubebuilder:rbac:groups=operator.ibm.com,namespace=ibm-common-services,resources=mongodbs;mongodbs/finalizers;mongodbs/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=certmanager.k8s.io,namespace=ibm-common-services,resources=certificates;certificaterequests;orders;challenges;issuers,verbs=get;list;watch;create;update;patch;delete func (r *MongoDBReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { _ = context.Background() _ = r.Log.WithValues("mongodb", request.NamespacedName) // Fetch the MongoDB instance instance := &mongodbv1alpha1.MongoDB{} err := r.Client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue return reconcile.Result{}, nil } // Error reading the object - requeue the request. return reconcile.Result{}, err } r.Log.Info("creating mongodb service account") if err := r.createFromYaml(instance, []byte(mongoSA)); err != nil { return reconcile.Result{}, err } r.Log.Info("creating mongodb service") if err := r.createFromYaml(instance, []byte(service)); err != nil { return reconcile.Result{}, err } r.Log.Info("creating mongodb icp service") if err := r.createFromYaml(instance, []byte(icpService)); err != nil { return reconcile.Result{}, err } metadatalabel := map[string]string{"app.kubernetes.io/name": "icp-mongodb", "app.kubernetes.io/component": "database", "app.kubernetes.io/managed-by": "operator", "app.kubernetes.io/instance": "icp-mongodb", "release": "mongodb"} r.Log.Info("creating icp mongodb config map") //Calculate MongoDB cache Size var cacheSize float64 var cacheSizeGB float64 if instance.Spec.Resources.Limits.Memory().String() != "0" { ramMB := instance.Spec.Resources.Limits.Memory().ScaledValue(resource.Mega) // Cache Size is 40 percent of RAM cacheSize = float64(ramMB) * 0.4 // Convert to gig cacheSizeGB = cacheSize / 1000.0 // Round to fit config cacheSizeGB = math.Floor(cacheSizeGB*100) / 100 } else { //default value is 5Gi cacheSizeGB = 2.0 } monogdbConfigmapData := struct { CacheSize float64 }{ CacheSize: cacheSizeGB, } // TO DO -- convert configmap to take option. var mongodbConfigYaml bytes.Buffer tc := template.Must(template.New("mongodbconfigmap").Parse(mongodbConfigMap)) if err := tc.Execute(&mongodbConfigYaml, monogdbConfigmapData); err != nil { return reconcile.Result{}, err } r.Log.Info("creating or updating mongodb configmap") if err := r.createUpdateFromYaml(instance, mongodbConfigYaml.Bytes()); err != nil { return reconcile.Result{}, err } if err := r.createFromYaml(instance, []byte(mongodbConfigMap)); err != nil { return reconcile.Result{}, err } r.Log.Info("creating icp mongodb init config map") if err := r.createFromYaml(instance, []byte(initConfigMap)); err != nil { return reconcile.Result{}, err } r.Log.Info("creating icp mongodb install config map") if err := r.createFromYaml(instance, []byte(installConfigMap)); err != nil { return reconcile.Result{}, err } // Create admin user and password as random string // TODO: allow user to give a Secret var pass, user string user = createRandomAlphaNumeric(8) pass = createRandomAlphaNumeric(13) mongodbAdmin := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "app": "icp-mongodb", }, Name: "icp-mongodb-admin", Namespace: instance.GetNamespace(), }, Type: corev1.SecretTypeOpaque, StringData: map[string]string{ "user": user, "password": pass, }, } // Set CommonServiceConfig instance as the owner and controller // if err := controllerutil.SetControllerReference(instance, mongodbAdmin, r.scheme); err != nil { // return reconcile.Result{}, err // } r.Log.Info("creating icp mongodb admin secret") if err = r.Client.Create(context.TODO(), mongodbAdmin); err != nil && !errors.IsAlreadyExists(err) { return reconcile.Result{}, err } mongodbMetric := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: metadatalabel, Name: "icp-mongodb-metrics", Namespace: instance.GetNamespace(), }, Type: corev1.SecretTypeOpaque, StringData: map[string]string{ "user": "metrics", "password": "icpmetrics", }, } // Set CommonServiceConfig instance as the owner and controller if err := controllerutil.SetControllerReference(instance, mongodbMetric, r.Scheme); err != nil { return reconcile.Result{}, err } r.Log.Info("creating icp mongodb metric secret") if err = r.Client.Create(context.TODO(), mongodbMetric); err != nil && !errors.IsAlreadyExists(err) { return reconcile.Result{}, err } keyfileSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: metadatalabel, Name: "icp-mongodb-keyfile", Namespace: instance.GetNamespace(), }, Type: corev1.SecretTypeOpaque, StringData: map[string]string{ "key.txt": "icptest", }, } // Set CommonServiceConfig instance as the owner and controller if err := controllerutil.SetControllerReference(instance, keyfileSecret, r.Scheme); err != nil { return reconcile.Result{}, err } r.Log.Info("creating icp mongodb keyfile secret") if err = r.Client.Create(context.TODO(), keyfileSecret); err != nil && !errors.IsAlreadyExists(err) { return reconcile.Result{}, err } var storageclass string if instance.Status.StorageClass == "" { if instance.Spec.StorageClass == "" { // TODO: weird because the storage class on OCP is opened for all // Need to deploy an OCP cluster on AWS to verify storageclass, err = r.getstorageclass() if err != nil { return reconcile.Result{}, err } } else { storageclass = instance.Spec.StorageClass } } else { if instance.Spec.StorageClass != "" && instance.Spec.StorageClass != instance.Status.StorageClass { r.Log.Info("You need to delete the monogodb cr before switch the storage class. Please note that this will lose all your datamake") } storageclass = instance.Status.StorageClass } // Default values cpuRequest := "2000m" memoryRequest := "5Gi" cpuLimit := "2000m" memoryLimit := "5Gi" // Check cpu request values and default if not there if instance.Spec.Resources.Requests.Cpu().String() != "0" { cpuRequest = instance.Spec.Resources.Requests.Cpu().String() } // Check memory request values and default if not there if instance.Spec.Resources.Requests.Memory().String() != "0" { memoryRequest = instance.Spec.Resources.Requests.Memory().String() } // Check cpu limit values and default if not there if instance.Spec.Resources.Limits.Cpu().String() != "0" { cpuLimit = instance.Spec.Resources.Limits.Cpu().String() } // Check memory limit values and default if not there if instance.Spec.Resources.Limits.Memory().String() != "0" { memoryLimit = instance.Spec.Resources.Limits.Memory().String() } stsData := mongoDBStatefulSetData{ Replicas: instance.Spec.Replicas, ImageRepo: instance.Spec.ImageRegistry, StorageClass: storageclass, InitImage: os.Getenv("IBM_MONGODB_INSTALL_IMAGE"), BootstrapImage: os.Getenv("IBM_MONGODB_IMAGE"), MetricsImage: os.Getenv("IBM_MONGODB_EXPORTER_IMAGE"), CPULimit: cpuLimit, CPURequest: cpuRequest, MemoryLimit: memoryLimit, MemoryRequest: memoryRequest, NamespaceName: instance.Namespace, } var stsYaml bytes.Buffer t := template.Must(template.New("statefulset").Parse(statefulset)) if err := t.Execute(&stsYaml, stsData); err != nil { return reconcile.Result{}, err } r.Log.Info("creating mongodb statefulset") if err := r.createFromYaml(instance, stsYaml.Bytes()); err != nil { return reconcile.Result{}, err } instance.Status.StorageClass = storageclass if err := r.Client.Status().Update(context.TODO(), instance); err != nil { return reconcile.Result{}, err } // sign certificate r.Log.Info("creating root-ca-cert") if err := r.createFromYaml(instance, []byte(godIssuerYaml)); err != nil { r.Log.Error(err, "create god-issuer fail") return reconcile.Result{}, err } r.Log.Info("creating root-ca-cert") if err := r.createFromYaml(instance, []byte(rootCertYaml)); err != nil { r.Log.Error(err, "create root-ca-cert fail") return reconcile.Result{}, err } r.Log.Info("creating root-issuer") if err := r.createFromYaml(instance, []byte(rootIssuerYaml)); err != nil { r.Log.Error(err, "create root-issuer fail") return reconcile.Result{}, err } r.Log.Info("creating icp-mongodb-client-cert") if err := r.createFromYaml(instance, []byte(clientCertYaml)); err != nil { r.Log.Error(err, "create icp-mongodb-client-cert fail") return reconcile.Result{}, err } // Get the StatefulSet sts := &appsv1.StatefulSet{} if err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "icp-mongodb", Namespace: instance.Namespace}, sts); err != nil { return reconcile.Result{}, err } // Add controller on PVC if err = r.addControlleronPVC(instance, sts); err != nil { return reconcile.Result{}, err } // Need to check for statefulset update before waiting for Mongo to be ready if err = r.updateStatefulset(&stsData); err != nil { r.Log.Error(err, "failed to call update StatefulSet") return reconcile.Result{}, err } if sts.Status.UpdatedReplicas != sts.Status.Replicas || sts.Status.UpdatedReplicas != sts.Status.ReadyReplicas { r.Log.Info("Waiting Mongodb to be ready ...") return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil } r.Log.Info("Mongodb is ready") return ctrl.Result{}, nil } // Move to separate file begin func (r *MongoDBReconciler) updateStatefulset(stsData *mongoDBStatefulSetData) error { // Update Needed Boolean needUpdate := false // Get Current Statefulset sts := &appsv1.StatefulSet{} err := r.Client.Get(context.TODO(), types.NamespacedName{Name: "icp-mongodb", Namespace: stsData.NamespaceName}, sts) if err != nil { r.Log.Error(err, "failed to get statefulset for update check") return err } // create a desired statefulset desiredSts := sts // Check and change configurable variables in statefulset // Check Replicas if stsData.Replicas != int(*sts.Spec.Replicas) { r.Log.Info("need to update replica count") rep := int32(stsData.Replicas) desiredSts.Spec.Replicas = &rep needUpdate = true } // Check InitImage if stsData.InitImage != sts.Spec.Template.Spec.InitContainers[0].Image { r.Log.Info("need to update Install Container image") desiredSts.Spec.Template.Spec.InitContainers[0].Image = stsData.InitImage needUpdate = true } // Check BootstrapImage if stsData.BootstrapImage != sts.Spec.Template.Spec.InitContainers[1].Image { r.Log.Info("need to update Bootstrap Container image") desiredSts.Spec.Template.Spec.InitContainers[1].Image = stsData.BootstrapImage needUpdate = true } if stsData.BootstrapImage != sts.Spec.Template.Spec.Containers[0].Image { r.Log.Info("need to update ICP MongoDB Container image") desiredSts.Spec.Template.Spec.Containers[0].Image = stsData.BootstrapImage needUpdate = true } // Check MetricsImage if stsData.MetricsImage != sts.Spec.Template.Spec.Containers[1].Image { r.Log.Info("need to update Metrics Container image") desiredSts.Spec.Template.Spec.Containers[1].Image = stsData.MetricsImage needUpdate = true } // Check CPULimit cpuLimit, err := resource.ParseQuantity(stsData.CPULimit) if err != nil { r.Log.Error(err, "failed to get cpu Limit in updateStatefulset") return err } if !(cpuLimit.Equal(sts.Spec.Template.Spec.InitContainers[0].Resources.Limits["cpu"])) { r.Log.Info("need to update CPU Limit for Install container") desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Limits["cpu"] = cpuLimit needUpdate = true } if !(cpuLimit.Equal(sts.Spec.Template.Spec.InitContainers[1].Resources.Limits["cpu"])) { r.Log.Info("need to update CPU Limit for Bootstrap container") desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Limits["cpu"] = cpuLimit needUpdate = true } if !(cpuLimit.Equal(sts.Spec.Template.Spec.Containers[0].Resources.Limits["cpu"])) { r.Log.Info("need to update CPU Limit for ICP MongoDB container") desiredSts.Spec.Template.Spec.Containers[0].Resources.Limits["cpu"] = cpuLimit needUpdate = true } // Check CPURequest cpuRequest, err := resource.ParseQuantity(stsData.CPURequest) if err != nil { r.Log.Error(err, "failed to get cpu request in updateStatefulset") return err } if !(cpuRequest.Equal(sts.Spec.Template.Spec.InitContainers[0].Resources.Requests["cpu"])) { r.Log.Info("need to update CPU Request for Install container") _, found := desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests["cpu"] if found { desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests["cpu"] = cpuRequest } else { desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests = corev1.ResourceList{} desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests["cpu"] = cpuRequest } desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests["cpu"] = cpuRequest needUpdate = true } if !(cpuRequest.Equal(sts.Spec.Template.Spec.InitContainers[1].Resources.Requests["cpu"])) { r.Log.Info("need to update CPU Request for Bootstrap container") _, found := desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests["cpu"] if found { desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests["cpu"] = cpuRequest } else { desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests = corev1.ResourceList{} desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests["cpu"] = cpuRequest } desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests["cpu"] = cpuRequest needUpdate = true } if !(cpuRequest.Equal(sts.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"])) { r.Log.Info("need to update CPU Request for ICP MongoDB container") _, found := desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"] if found { desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"] = cpuRequest } else { desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{} desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"] = cpuRequest } desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests["cpu"] = cpuRequest needUpdate = true } // Check MemoryLimit memoryLimit, err := resource.ParseQuantity(stsData.MemoryLimit) if err != nil { r.Log.Error(err, "failed to get memory Limit in updateStatefulset") return err } if !(memoryLimit.Equal(sts.Spec.Template.Spec.InitContainers[0].Resources.Limits["memory"])) { r.Log.Info("need to update Memory Limit for Install container") desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Limits["memory"] = memoryLimit needUpdate = true } if !(memoryLimit.Equal(sts.Spec.Template.Spec.InitContainers[1].Resources.Limits["memory"])) { r.Log.Info("need to update Memory Limit for Bootstrap container") desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Limits["memory"] = memoryLimit needUpdate = true } if !(memoryLimit.Equal(sts.Spec.Template.Spec.Containers[0].Resources.Limits["memory"])) { r.Log.Info("need to update Memory Limit for ICP MongoDB container") desiredSts.Spec.Template.Spec.Containers[0].Resources.Limits["memory"] = memoryLimit needUpdate = true } // Check MemoryRequest memoryRequest, err := resource.ParseQuantity(stsData.MemoryRequest) if err != nil { r.Log.Error(err, "failed to get memory Request in updateStatefulset") return err } if !(memoryRequest.Equal(sts.Spec.Template.Spec.InitContainers[0].Resources.Requests["memory"])) { r.Log.Info("need to update Memory Request for Install container") desiredSts.Spec.Template.Spec.InitContainers[0].Resources.Requests["memory"] = memoryRequest needUpdate = true } if !(memoryRequest.Equal(sts.Spec.Template.Spec.InitContainers[1].Resources.Requests["memory"])) { r.Log.Info("need to update Memory Request for Bootstrap container") desiredSts.Spec.Template.Spec.InitContainers[1].Resources.Requests["memory"] = memoryRequest needUpdate = true } if !(memoryRequest.Equal(sts.Spec.Template.Spec.Containers[0].Resources.Requests["memory"])) { r.Log.Info("need to update Memory Request for ICP MongoDB container") desiredSts.Spec.Template.Spec.Containers[0].Resources.Requests["memory"] = memoryRequest needUpdate = true } // Update Statefulset if needed if needUpdate { r.Log.Info("updating statefulset") if err := r.Client.Update(context.TODO(), desiredSts); err != nil { return fmt.Errorf("could not Update resource: %v", err) } return nil } return nil } func (r *MongoDBReconciler) createFromYaml(instance *mongodbv1alpha1.MongoDB, yamlContent []byte) error { obj := &unstructured.Unstructured{} jsonSpec, err := yaml.YAMLToJSON(yamlContent) if err != nil { return fmt.Errorf("could not convert yaml to json: %v", err) } if err := obj.UnmarshalJSON(jsonSpec); err != nil { return fmt.Errorf("could not unmarshal resource: %v", err) } obj.SetNamespace(instance.Namespace) // Set CommonServiceConfig instance as the owner and controller if err := controllerutil.SetControllerReference(instance, obj, r.Scheme); err != nil { return err } err = r.Client.Create(context.TODO(), obj) if err != nil && !errors.IsAlreadyExists(err) { return fmt.Errorf("could not Create resource: %v", err) } return nil } func (r *MongoDBReconciler) createUpdateFromYaml(instance *mongodbv1alpha1.MongoDB, yamlContent []byte) error { obj := &unstructured.Unstructured{} jsonSpec, err := yaml.YAMLToJSON(yamlContent) if err != nil { return fmt.Errorf("could not convert yaml to json: %v", err) } if err := obj.UnmarshalJSON(jsonSpec); err != nil { return fmt.Errorf("could not unmarshal resource: %v", err) } obj.SetNamespace(instance.Namespace) // Set CommonServiceConfig instance as the owner and controller if err := controllerutil.SetControllerReference(instance, obj, r.Scheme); err != nil { return err } err = r.Client.Create(context.TODO(), obj) if err != nil { if errors.IsAlreadyExists(err) { if err := r.Client.Update(context.TODO(), obj); err != nil { return fmt.Errorf("could not Update resource: %v", err) } return nil } return fmt.Errorf("could not Create resource: %v", err) } return nil } func (r *MongoDBReconciler) getstorageclass() (string, error) { scList := &storagev1.StorageClassList{} err := r.Reader.List(context.TODO(), scList) if err != nil { return "", err } if len(scList.Items) == 0 { return "", fmt.Errorf("could not find storage class in the cluster") } var defaultSC []string var nonDefaultSC []string for _, sc := range scList.Items { if sc.ObjectMeta.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" { defaultSC = append(defaultSC, sc.GetName()) continue } if sc.Provisioner == "kubernetes.io/no-provisioner" { continue } nonDefaultSC = append(nonDefaultSC, sc.GetName()) } if len(defaultSC) != 0 { return defaultSC[0], nil } if len(nonDefaultSC) != 0 { return nonDefaultSC[0], nil } return "", fmt.Errorf("could not find dynamic provisioner storage class in the cluster nor is there a default storage class") } func (r *MongoDBReconciler) addControlleronPVC(instance *mongodbv1alpha1.MongoDB, sts *appsv1.StatefulSet) error { // Fetch the list of the PersistentVolumeClaim generated by the StatefulSet pvcList := &corev1.PersistentVolumeClaimList{} err := r.Client.List(context.TODO(), pvcList, &client.ListOptions{ Namespace: instance.Namespace, LabelSelector: labels.SelectorFromSet(sts.ObjectMeta.Labels), }) if err != nil { return err } for _, pvc := range pvcList.Items { if pvc.ObjectMeta.OwnerReferences == nil { if err := controllerutil.SetControllerReference(instance, &pvc, r.Scheme); err != nil { return err } if err = r.Client.Update(context.TODO(), &pvc); err != nil { return err } } } return nil } // Create Random String func createRandomAlphaNumeric(length int) string { const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" var seededRand = rand.New( rand.NewSource(time.Now().UnixNano())) byteString := make([]byte, length) for i := range byteString { byteString[i] = charset[seededRand.Intn(len(charset))] } return string(byteString) } // Move to separate file? func (r *MongoDBReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&mongodbv1alpha1.MongoDB{}). Owns(&appsv1.StatefulSet{}).Owns(&corev1.ConfigMap{}).Owns(&corev1.ServiceAccount{}). Owns(&corev1.Service{}). Complete(r) }
[ "\"IBM_MONGODB_INSTALL_IMAGE\"", "\"IBM_MONGODB_IMAGE\"", "\"IBM_MONGODB_EXPORTER_IMAGE\"" ]
[]
[ "IBM_MONGODB_IMAGE", "IBM_MONGODB_INSTALL_IMAGE", "IBM_MONGODB_EXPORTER_IMAGE" ]
[]
["IBM_MONGODB_IMAGE", "IBM_MONGODB_INSTALL_IMAGE", "IBM_MONGODB_EXPORTER_IMAGE"]
go
3
0
google/cloud/bigquery/storage/v1beta2/bigquery-storage-v1beta2-py/google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.bigquery_storage_v1beta2.types import arrow from google.cloud.bigquery_storage_v1beta2.types import avro from google.cloud.bigquery_storage_v1beta2.types import storage from google.cloud.bigquery_storage_v1beta2.types import stream from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigQueryReadTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigQueryReadGrpcTransport from .transports.grpc_asyncio import BigQueryReadGrpcAsyncIOTransport class BigQueryReadClientMeta(type): """Metaclass for the BigQueryRead client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[BigQueryReadTransport]] _transport_registry["grpc"] = BigQueryReadGrpcTransport _transport_registry["grpc_asyncio"] = BigQueryReadGrpcAsyncIOTransport def get_transport_class(cls, label: str = None, ) -> Type[BigQueryReadTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class BigQueryReadClient(metaclass=BigQueryReadClientMeta): """BigQuery Read API. The Read API can be used to read data from BigQuery. New code should use the v1 Read API going forward, if they don't use Write API at the same time. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "bigquerystorage.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BigQueryReadClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BigQueryReadClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> BigQueryReadTransport: """Returns the transport used by the client instance. Returns: BigQueryReadTransport: The transport used by the client instance. """ return self._transport @staticmethod def read_session_path(project: str,location: str,session: str,) -> str: """Returns a fully-qualified read_session string.""" return "projects/{project}/locations/{location}/sessions/{session}".format(project=project, location=location, session=session, ) @staticmethod def parse_read_session_path(path: str) -> Dict[str,str]: """Parses a read_session path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/sessions/(?P<session>.+?)$", path) return m.groupdict() if m else {} @staticmethod def read_stream_path(project: str,location: str,session: str,stream: str,) -> str: """Returns a fully-qualified read_stream string.""" return "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}".format(project=project, location=location, session=session, stream=stream, ) @staticmethod def parse_read_stream_path(path: str) -> Dict[str,str]: """Parses a read_stream path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/sessions/(?P<session>.+?)/streams/(?P<stream>.+?)$", path) return m.groupdict() if m else {} @staticmethod def table_path(project: str,dataset: str,table: str,) -> str: """Returns a fully-qualified table string.""" return "projects/{project}/datasets/{dataset}/tables/{table}".format(project=project, dataset=dataset, table=table, ) @staticmethod def parse_table_path(path: str) -> Dict[str,str]: """Parses a table path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/tables/(?P<table>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str, ) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str,str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str, ) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder, ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str,str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str, ) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization, ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str,str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str, ) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project, ) @staticmethod def parse_common_project_path(path: str) -> Dict[str,str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str, ) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format(project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str,str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__(self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigQueryReadTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query read client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, BigQueryReadTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, BigQueryReadTransport): # transport is a BigQueryReadTransport instance. if credentials or client_options.credentials_file: raise ValueError("When providing a transport instance, " "provide its credentials directly.") if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, ) def create_read_session(self, request: Union[storage.CreateReadSessionRequest, dict] = None, *, parent: str = None, read_session: stream.ReadSession = None, max_stream_count: int = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.ReadSession: r"""Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push- down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller. Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateReadSessionRequest, dict]): The request object. Request message for `CreateReadSession`. parent (str): Required. The request project that owns the session, in the form of ``projects/{project_id}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. read_session (google.cloud.bigquery_storage_v1beta2.types.ReadSession): Required. Session to be created. This corresponds to the ``read_session`` field on the ``request`` instance; if ``request`` is provided, this should not be set. max_stream_count (int): Max initial number of streams. If unset or zero, the server will provide a value of streams so as to produce reasonable throughput. Must be non- negative. The number of streams may be lower than the requested number, depending on the amount parallelism that is reasonable for the table. Error will be returned if the max count is greater than the current system max limit of 1,000. Streams must be read starting from offset 0. This corresponds to the ``max_stream_count`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.ReadSession: Information about the ReadSession. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, read_session, max_stream_count]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a storage.CreateReadSessionRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, storage.CreateReadSessionRequest): request = storage.CreateReadSessionRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if read_session is not None: request.read_session = read_session if max_stream_count is not None: request.max_stream_count = max_stream_count # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_read_session] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("read_session.table", request.read_session.table), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def read_rows(self, request: Union[storage.ReadRowsRequest, dict] = None, *, read_stream: str = None, offset: int = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[storage.ReadRowsResponse]: r"""Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. Each request also returns a set of stream statistics reflecting the current state of the stream. Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.ReadRowsRequest, dict]): The request object. Request message for `ReadRows`. read_stream (str): Required. Stream to read rows from. This corresponds to the ``read_stream`` field on the ``request`` instance; if ``request`` is provided, this should not be set. offset (int): The offset requested must be less than the last row read from Read. Requesting a larger offset is undefined. If not specified, start reading from offset zero. This corresponds to the ``offset`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: Iterable[google.cloud.bigquery_storage_v1beta2.types.ReadRowsResponse]: Response from calling ReadRows may include row data, progress and throttling information. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([read_stream, offset]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a storage.ReadRowsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, storage.ReadRowsRequest): request = storage.ReadRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if read_stream is not None: request.read_stream = read_stream if offset is not None: request.offset = offset # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.read_rows] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("read_stream", request.read_stream), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def split_read_stream(self, request: Union[storage.SplitReadStreamRequest, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.SplitReadStreamResponse: r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects. These ``ReadStream`` objects are referred to as the primary and the residual streams of the split. The original ``ReadStream`` can still be read from in the same manner as before. Both of the returned ``ReadStream`` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back-to-back in the original ``ReadStream``. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion. Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamRequest, dict]): The request object. Request message for `SplitReadStream`. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamResponse: """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a storage.SplitReadStreamRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, storage.SplitReadStreamRequest): request = storage.SplitReadStreamRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.split_read_stream] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("name", request.name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def __enter__(self): return self def __exit__(self, type, value, traceback): """Releases underlying transport's resources. .. warning:: ONLY use as a context manager if the transport is NOT shared with other clients! Exiting the with block will CLOSE the transport and may cause errors in other clients! """ self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-bigquery-storage", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ( "BigQueryReadClient", )
[]
[]
[ "GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE" ]
[]
["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"]
python
2
0
pkg/controller/cassandracluster/pod_operation.go
// Copyright 2019 Orange // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cassandracluster import ( "context" "errors" "fmt" "math/rand" "net" "os" "strings" "time" api "github.com/wahed-tech/cassandra-k8s-operator/pkg/apis/db/v1alpha1" "github.com/wahed-tech/cassandra-k8s-operator/pkg/k8s" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" ) type finalizedOp struct { err error cc *api.CassandraCluster dcRackName string pod v1.Pod status *api.CassandraClusterStatus operationName string } type op struct { Action func(*ReconcileCassandraCluster, string, *api.CassandraCluster, string, v1.Pod, *api.CassandraClusterStatus) error Monitor func(*JolokiaClient) (bool, error) PostAction func(*ReconcileCassandraCluster, *api.CassandraCluster, string, v1.Pod, *api.CassandraClusterStatus) error } var podOperationMap = map[string]op{ api.OperationCleanup: op{(*ReconcileCassandraCluster).runCleanup, (*JolokiaClient).hasCleanupCompactions, nil}, api.OperationRebuild: op{(*ReconcileCassandraCluster).runRebuild, (*JolokiaClient).hasStreamingSessions, nil}, api.OperationUpgradeSSTables: op{(*ReconcileCassandraCluster).runUpgradeSSTables, (*JolokiaClient).hasUpgradeSSTablesCompactions, nil}, api.OperationRemove: op{(*ReconcileCassandraCluster).runRemove, (*JolokiaClient).hasLeavingNodes, (*ReconcileCassandraCluster).postRunRemove}} const breakResyncLoop bool = true const continueResyncLoop bool = false const monitorSleepDelay = 10 * time.Second const deletedPvcTimeout = 30 * time.Second var chanRunningOp = make(chan finalizedOp) var runningFinalizedRoutine bool func randomPodOperationKey() string { r := rand.Intn(len(podOperationMap)) for k := range podOperationMap { if r == 0 { return k } r-- } return "" // will never happen but make the compiler happy ¯\_(ツ)_/¯ } //executePodOperation will ensure that all Pod Operations which needed to be performed are done accordingly. //It may return a breakResyncloop order meaning that the Operator won't update the statefulset until //PodOperations are finishing gracefully. func (rcc *ReconcileCassandraCluster) executePodOperation(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus) (bool, error) { dcRackName := cc.GetDCRackName(dcName, rackName) dcRackStatus := status.CassandraRackStatus[dcRackName] var breakResyncloop = false var err error // If we ask a ScaleDown, We can't update the Statefulset before the nodetool decommission has finished if rcc.weAreScalingDown(dcRackStatus){ //If a Decommission is Ongoing, we want to break the Resyncloop until the Decommission is succeed breakResyncloop, err = rcc.ensureDecommission(cc, dcName, rackName, status) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "dc": dcName, "rack": rackName, "err": err}).Error("Error with decommission") } return breakResyncloop, err } // If LastClusterAction was a ScaleUp and It is Done then // Execute Cleanup On labeled Pods if status.LastClusterActionStatus == api.StatusDone { // If I enable test on ScaleUp then it may be too restrictive : // we won't be able to label pods to execute an action outside of a scaleup // && status.LastClusterAction == api.ActionScaleUp { // We run approximately a different operation each time rcc.ensureOperation(cc, dcName, rackName, status, randomPodOperationKey()) } return breakResyncloop, err } //addPodOperationLabels will add Pod Labels labels on all Pod in the Current dcRackName func (rcc *ReconcileCassandraCluster) addPodOperationLabels(cc *api.CassandraCluster, dcName string, rackName string, labels map[string]string) { dcRackName := cc.GetDCRackName(dcName, rackName) //Select all Pods in the Rack selector := k8s.MergeLabels(k8s.LabelsForCassandraDCRack(cc, dcName, rackName)) podsList, err := rcc.ListPods(cc.Namespace, selector) if err != nil || len(podsList.Items) < 1 { return } for _, pod := range podsList.Items { if pod.Status.Phase != v1.PodRunning || pod.DeletionTimestamp != nil { continue } newlabels := k8s.MergeLabels(pod.GetLabels(), labels) pod.SetLabels(newlabels) err = rcc.UpdatePod(&pod) if err != nil { logrus.Errorf("[%s][%s]:[%s] UpdatePod Error: %v", cc.Name, dcRackName, pod.Name, err) } logrus.Infof("[%s][%s]:[%s] UpdatePod Labels: %v", cc.Name, dcRackName, pod.Name, labels) } } // initOperation finds pods waiting for operation to run func (rcc *ReconcileCassandraCluster) initOperation(cc *api.CassandraCluster, status *api.CassandraClusterStatus, dcName, rackName, operationName string) []v1.Pod { dcRackName := cc.GetDCRackName(dcName, rackName) selector := k8s.MergeLabels(k8s.LabelsForCassandraDCRack(cc, dcName, rackName), map[string]string{"operation-name": operationName, "operation-status": api.StatusToDo}) podsList, err := rcc.ListPods(cc.Namespace, selector) now := metav1.Now() podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation if err != nil || len(podsList.Items) < 1 { if podLastOperation.Name == operationName && podLastOperation.Status == api.StatusOngoing && len(podLastOperation.Pods) < 1 { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "operation": strings.Title(operationName)}).Debug("Set podLastOperation to Done as there is no more Pod to work on") podLastOperation.Status = api.StatusDone podLastOperation.EndTime = &now //We want dynamic view of status on CassandraCluster rcc.updateCassandraStatus(cc, status) } return nil } if podLastOperation.Status != api.StatusOngoing { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "operation": strings.Title(operationName)}).Debug("Reset podLastOperation attributes") podLastOperation.Name = operationName podLastOperation.Status = api.StatusOngoing podLastOperation.StartTime = &now podLastOperation.EndTime = nil podLastOperation.PodsOK = []string{} podLastOperation.PodsKO = []string{} podLastOperation.Pods = []string{} //We want dynamic view of status on CassandraCluster rcc.updateCassandraStatus(cc, status) } return func(podsList *v1.PodList) []v1.Pod { podsSlice := make([]v1.Pod, 0) for _, pod := range podsList.Items { if pod.Status.Phase != v1.PodRunning || pod.DeletionTimestamp != nil { continue } podsSlice = append(podsSlice, pod) } return podsSlice }(podsList) } func (rcc *ReconcileCassandraCluster) startOperation(cc *api.CassandraCluster, status *api.CassandraClusterStatus, pod v1.Pod, dcRackName, operationName string) error { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "operation": strings.Title(operationName)}).Info("Start operation") labels := map[string]string{"operation-status": api.StatusOngoing, "operation-start": k8s.LabelTime(), "operation-end": ""} err := rcc.UpdatePodLabel(&pod, labels) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "err": err.Error(), "labels": labels}).Debug("Failed to add labels to pod") return err } podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation podLastOperation.Pods = append(podLastOperation.Pods, pod.Name) podLastOperation.PodsOK = k8s.RemoveString(podLastOperation.PodsOK, pod.Name) podLastOperation.PodsKO = k8s.RemoveString(podLastOperation.PodsKO, pod.Name) rcc.updateCassandraStatus(cc, status) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "operation": strings.Title(operationName), "podLastOperation.OperatorName": podLastOperation.OperatorName, "podLastOperation.Pods": podLastOperation.Pods}).Debug("Display information about pods") return nil } // ensureOperation goal is to find pods with Labels : // - operation-name=xxxx and operation-status=To-Do // This method is asynchronous func (rcc *ReconcileCassandraCluster) ensureOperation(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus, operationName string) { dcRackName := cc.GetDCRackName(dcName, rackName) podsSlice, checkOnly := rcc.getPodsToWorkOn(cc, dcName, rackName, status, operationName) if !runningFinalizedRoutine { go rcc.finalizeOperations() runningFinalizedRoutine = true } // For each pod where we need to run the operation on for _, pod := range podsSlice { hostName := fmt.Sprintf("%s.%s", pod.Spec.Hostname, pod.Spec.Subdomain) // We check if an operation is running if checkOnly { go rcc.monitorOperation(hostName, cc, dcRackName, pod, status, operationName) continue } err := rcc.startOperation(cc, status, pod, dcRackName, operationName) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "err": err}).Debug("Failed to start operation on pod") continue } go rcc.runOperation(operationName, hostName, cc, dcRackName, pod, status) } } func (rcc *ReconcileCassandraCluster) finalizeOperations() { for { select { case op := <-chanRunningOp: rcc.finalizeOperation(op.err, op.cc, op.dcRackName, op.pod, op.status, strings.Title(op.operationName)) } } } func (rcc *ReconcileCassandraCluster) runOperation(operationName, hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) { err := podOperationMap[operationName].Action(rcc, hostName, cc, dcRackName, pod, status) // If there is an error we finalize the operation but skip any existing post action if err != nil { chanRunningOp <- finalizedOp{err, cc, dcRackName, pod, status, operationName} return } postAction := podOperationMap[operationName].PostAction if postAction != nil { err = postAction(rcc, cc, dcRackName, pod, status) } chanRunningOp <- finalizedOp{err, cc, dcRackName, pod, status, operationName} } /* ensureDecommission will ensure that the Last Pod of the StatefulSet will be decommissionned - If pod.status=To-DO then executeDecommission in the Pod and flag pod.status as **Ongoing** - If pod.status=Ongoing then if pod is not running then flag its status as **Done** - If pod.status=Done then delete Pod PVC and ChangeActionStatus to **Continue** it return breakResyncloop=true is we need to bypass update of the Statefulset. it return breakResyncloop=false if we want to call the ensureStatefulset method. */ func (rcc *ReconcileCassandraCluster) ensureDecommission(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus) (bool, error) { dcRackName := cc.GetDCRackName(dcName, rackName) podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation if podLastOperation.Name != api.OperationDecommission { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "lastOperation": podLastOperation.Name}).Warnf("We should decommission only if pod.Operation == decommission, not the case here") return continueResyncLoop, nil } switch podLastOperation.Status { case api.StatusToDo: return rcc.ensureDecommissionToDo(cc, dcName, rackName, status) case api.StatusOngoing, api.StatusFinalizing: if podLastOperation.Pods == nil || podLastOperation.Pods[0] == "" { return breakResyncLoop, fmt.Errorf("For Status Ongoing we should have a PodLastOperation Pods item") } lastPod, err := rcc.GetPod(cc.Namespace, podLastOperation.Pods[0]) if err != nil { if !apierrors.IsNotFound(err) { return breakResyncLoop, fmt.Errorf("failed to get last cassandra's pods '%s': %v", podLastOperation.Pods[0], err) } } //If Node is already Gone, We Delete PVC if apierrors.IsNotFound(err) { return rcc.ensureDecommissionFinalizing(cc, dcName, rackName, status, lastPod) } //LastPod Still Exists if !PodContainersReady(lastPod) && lastPod.DeletionTimestamp != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "lastPod": lastPod.Name}).Infof("We already asked Statefulset to scaleDown, waiting..") return breakResyncLoop, nil } hostName := fmt.Sprintf("%s.%s", lastPod.Spec.Hostname, lastPod.Spec.Subdomain) jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err != nil { return breakResyncLoop, err } operationMode, err := jolokiaClient.NodeOperationMode() if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "hostName": hostName, "err": err}).Error("Jolokia call failed") return breakResyncLoop, err } if operationMode == "NORMAL" { t, err := k8s.LabelTime2Time(lastPod.Labels["operation-start"]) if err != nil { logrus.WithFields(logrus.Fields{"operation-start": lastPod.Labels["operation-start"]}).Debugf("Can't parse time") } now, _ := k8s.LabelTime2Time(k8s.LabelTime()) if t.Add(api.DefaultDelayWaitForDecommission * time.Second).After(now) { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name, "operationMode": operationMode, "DefaultDelayWaitForDecommission": api.DefaultDelayWaitForDecommission}).Info("Decommission was applied less " + "than DefaultDelayWaitForDecommission seconds, waiting") } else { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name, "operationMode": operationMode}).Info("Seems that decommission has not correctly been applied, trying again..") status.CassandraRackStatus[dcRackName].PodLastOperation.Status = api.StatusToDo } return breakResyncLoop, nil } if operationMode == "DECOMMISSIONED" || operationMode == "" { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "lastPod": lastPod.Name, "operationMode": operationMode}).Infof("Node has left the ring, " + "waiting for statefulset Scaledown") podLastOperation.Status = api.StatusFinalizing return continueResyncLoop, nil } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name, "operationMode": operationMode}).Info("Cassandra Node is decommissioning, we need to wait") return breakResyncLoop, nil //In case of PodLastOperation Done we set LastAction to Continue to see if we need to decommission more case api.StatusDone: if podLastOperation.PodsOK == nil || podLastOperation.PodsOK[0] == "" { return breakResyncLoop, fmt.Errorf("For Status Done we should have a PodLastOperation.PodsOK item") } status.CassandraRackStatus[dcRackName].CassandraLastAction.Status = api.StatusContinue return breakResyncLoop, nil default: logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "status": podLastOperation.Status}).Errorf("Error this should not happened: unknown status") } return continueResyncLoop, nil } //ensureDecommissionToDo // State To-DO -> Ongoing // set podLastOperation.Pods and label targeted pod (lastPod) func (rcc *ReconcileCassandraCluster) ensureDecommissionToDo(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus) (bool, error) { dcRackName := cc.GetDCRackName(dcName, rackName) var list []string podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation // We Get LastPod From StatefulSet lastPod, err := rcc.GetLastPod(cc.Namespace, k8s.LabelsForCassandraDCRack(cc, dcName, rackName)) if err != nil { return breakResyncLoop, fmt.Errorf("Failed to get last cassandra's pods: %v", err) } //If Pod is unschedulable, we bypass decommission (cassandra is not running) if lastPod.Status.Phase == v1.PodPending && lastPod.Status.Conditions != nil && lastPod.Status.Conditions[0].Reason == "Unschedulable"{ logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Warn("ScaleDown detected on a pending Pod. we don't launch decommission") podLastOperation.Status = api.StatusFinalizing podLastOperation.PodsOK = []string{} podLastOperation.Pods = append(list, lastPod.Name) podLastOperation.PodsKO = []string{} status.CassandraRackStatus[dcRackName].CassandraLastAction.Status = api.StatusContinue return continueResyncLoop, nil } if lastPod.Status.Phase != v1.PodRunning || lastPod.DeletionTimestamp != nil { return breakResyncLoop, fmt.Errorf("Pod is not running") } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Info("ScaleDown detected, we launch decommission") //Ensure node is not leaving or absent from the ring hostName := fmt.Sprintf("%s.%s", lastPod.Spec.Hostname, lastPod.Spec.Subdomain) jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err != nil { return breakResyncLoop, err } operationMode, err := jolokiaClient.NodeOperationMode() if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "hostName": hostName, "err": err}).Error("Jolokia call failed") return breakResyncLoop, err } if operationMode == "DECOMMISSIONED" || operationMode == "" || operationMode == "LEAVING" { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Info("Node is leaving or has already been decommissioned") return breakResyncLoop, nil } err = rcc.UpdatePodLabel(lastPod, map[string]string{ "operation-status": api.StatusOngoing, "operation-start": k8s.LabelTime(), "operation-name": api.OperationDecommission}) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name, "err": err}).Debug("Error updating pod") } podLastOperation.Status = api.StatusOngoing podLastOperation.Pods = append(list, lastPod.Name) podLastOperation.PodsOK = []string{} podLastOperation.PodsKO = []string{} logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Debug("Decommissioning cassandra node") go func() { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Debug("Node decommission starts") err = jolokiaClient.NodeDecommission() logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name}).Debug("Node decommission ended") if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": lastPod.Name, "err": err}).Debug("Node decommission failed") } }() return breakResyncLoop, nil } //ensureDecommissionFinalizing // State To-DO -> Ongoing func (rcc *ReconcileCassandraCluster) ensureDecommissionFinalizing(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus, lastPod *v1.Pod) (bool, error) { dcRackName := cc.GetDCRackName(dcName, rackName) podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation pvcName := "data-" + podLastOperation.Pods[0] logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Info("Decommission done -> we delete PVC") pvc, err := rcc.GetPVC(cc.Namespace, pvcName) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Error("Cannot get PVC") } if err == nil { err = rcc.deletePVC(pvc) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Error("Error deleting PVC, Please make manual Actions..") } else { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Info("PVC deleted") } } podLastOperation.Status = api.StatusDone podLastOperation.PodsOK = []string{lastPod.Name} now := metav1.Now() podLastOperation.EndTime = &now podLastOperation.Pods = []string{} //Important, We must break loop if multipleScaleDown has been asked return breakResyncLoop, nil } // Get pods that need an operation to run on // Returns if checking is needed (can happen if the operator has been killed during an operation) func (rcc *ReconcileCassandraCluster) getPodsToWorkOn(cc *api.CassandraCluster, dcName, rackName string, status *api.CassandraClusterStatus, operationName string) ([]v1.Pod, bool) { dcRackName := cc.GetDCRackName(dcName, rackName) var checkOnly bool podsSlice := make([]v1.Pod, 0) operatorName := os.Getenv("POD_NAME") if len(operatorName) == 0 { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName}).Info("POD_NAME is not defined and is mandatory") return podsSlice, checkOnly } // Every time we update this variable we have to run updateCassandraStatus podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "podLastOperation.OperatorName": podLastOperation.OperatorName, "podLastOperation.Pods": podLastOperation.Pods}).Debug("Display information about pods") // Operator is different from when the previous operation was started // Set checkOnly to restart the monitoring function to wait until the operation is done if podLastOperation.Name == operationName && podLastOperation.OperatorName != operatorName && podLastOperation.Status == api.StatusOngoing { checkOnly = true podLastOperation.OperatorName = operatorName logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "operation": strings.Title(operationName)}).Debug("Operator's name is different, we enable checking routines") for _, podName := range podLastOperation.Pods { p, err := rcc.GetPod(cc.Namespace, podName) if err != nil || p.Status.Phase != v1.PodRunning || p.DeletionTimestamp != nil { continue } podsSlice = append(podsSlice, *p) } } else { podsSlice = rcc.initOperation(cc, status, dcName, rackName, operationName) } if checkOnly { if len(podsSlice) == 0 { // If previous running pods are done or cannot be found, we update the operator status podLastOperation.Status = api.StatusDone now := metav1.Now() podLastOperation.EndTime = &now } rcc.updateCassandraStatus(cc, status) } return podsSlice, checkOnly } func (rcc *ReconcileCassandraCluster) updatePodLastOperation(clusterName, dcRackName, podName, operation string, status *api.CassandraClusterStatus, err error) { podLastOperation := &status.CassandraRackStatus[dcRackName].PodLastOperation if err != nil { // We set the operation-status to Error on failing pods logrus.WithFields(logrus.Fields{"cluster": clusterName, "rack": dcRackName, "pod": podName, "operation": operation, "err": err.Error()}).Error("Error in updatePodLastOperation") podLastOperation.PodsKO = append(podLastOperation.PodsKO, podName) } else { podLastOperation.PodsOK = append(podLastOperation.PodsOK, podName) } // We remove the pod from the list of pods running the operation podLastOperation.Pods = k8s.RemoveString(podLastOperation.Pods, podName) } /* finalizeOperation sets the labels on the pod where ran an operation depending on the error status */ func (rcc *ReconcileCassandraCluster) finalizeOperation(err error, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus, operationName string) { labels := map[string]string{"operation-status": api.StatusDone, "operation-end": k8s.LabelTime()} if err != nil { labels["operation-status"] = api.StatusError } ccRefreshed := cc.DeepCopy() rcc.updatePodLastOperation(cc.Name, dcRackName, pod.Name, strings.Title(operationName), status, err) for { err = rcc.UpdatePodLabel(&pod, labels) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "labels": labels}).Error("Can't update labels") continue } if rcc.updateCassandraStatus(ccRefreshed, status) == nil { break } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "status": status}).Debug("Got an error. Get new version of Cassandra Cluster.") for rcc.client.Get(context.TODO(), types.NamespacedName{Name: cc.Name, Namespace: cc.Namespace}, ccRefreshed) != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "status": status}).Debug("Can't get new version of Cassandra Cluster. Try again") time.Sleep(retryInterval) } } } func (rcc *ReconcileCassandraCluster) monitorOperation(hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus, operationName string) { // Wait until there are no more cleanup compactions for { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "host": hostName, "operation": operationName}).Info("Checking if operation is still running on node") jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err == nil { operationIsRunning, err := podOperationMap[operationName].Monitor(jolokiaClient) // When there is an error it returns true to try again during the next loop if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "host": hostName, "operation": operationName, "err": err}).Error("Got an error from Jolokia") operationIsRunning = true } if operationIsRunning != true { break } } time.Sleep(monitorSleepDelay) } postAction := podOperationMap[operationName].PostAction var err error if postAction != nil { err = postAction(rcc, cc, dcRackName, pod, status) } chanRunningOp <- finalizedOp{err, cc, dcRackName, pod, status, operationName} } func (rcc *ReconcileCassandraCluster) runUpgradeSSTables(hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) error { var err error operation := strings.Title(api.OperationUpgradeSSTables) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "hostName": hostName, "operation": operation}).Info("Operation start") // Add the operatorName to the last pod operation in case the operator pod is replaced status.CassandraRackStatus[dcRackName].PodLastOperation.OperatorName = os.Getenv("POD_NAME") rcc.updateCassandraStatus(cc, status) jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err == nil { err = jolokiaClient.NodeUpgradeSSTables(0) } return err } func (rcc *ReconcileCassandraCluster) runRebuild(hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) error { var err error var rebuildFrom, labelSet = pod.GetLabels()["operation-argument"] operation := strings.Title(api.OperationRebuild) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "hostName": hostName, "operation": operation}).Info("Operation start") if labelSet != true { err = errors.New("operation-argument is needed to get the datacenter name to rebuild from") } else if cc.IsValidDC(rebuildFrom) == false { err = fmt.Errorf("%s is not an existing datacenter", rebuildFrom) } // In case of an error set the status on the pod and skip it if err != nil { return err } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "datacenter": rebuildFrom, "operation": operation}).Info("Execute the Jolokia Operation") // Add the operatorName to the last pod operation in case the operator pod is replaced status.CassandraRackStatus[dcRackName].PodLastOperation.OperatorName = os.Getenv("POD_NAME") rcc.updateCassandraStatus(cc, status) jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err == nil { err = jolokiaClient.NodeRebuild(rebuildFrom) } return err } func (rcc *ReconcileCassandraCluster) runRemove(hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) error { operation := strings.Title(api.OperationRemove) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "hostName": hostName, "operation": operation}).Info("Operation start") var label, labelSet = pod.GetLabels()["operation-argument"] if labelSet != true { return errors.New("operation-argument is needed to get the pod name to remove from the cluster") } val := strings.Split(label, "_") podToRemove := val[0] var podIPToRemove string if len(val) == 2 { podIPToRemove = val[1] } if podToRemove == "" && podIPToRemove == "" { return fmt.Errorf("Expected format is `[Name][_IP]` with at least one value but none was found") } // Name can be omitted in case the pod has already been deleted but then IP must be provided // When an IP is provided it will be used by the removeNode operation if podIPToRemove != "" && net.ParseIP(podIPToRemove) == nil { return fmt.Errorf("%s is not an IP address", podIPToRemove) } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "nodeToRemove": podToRemove, "operation": operation}).Info("Execute the Jolokia Operation") // Add the operatorName to the last pod operation in case the operator pod is replaced status.CassandraRackStatus[dcRackName].PodLastOperation.OperatorName = os.Getenv("POD_NAME") rcc.updateCassandraStatus(cc, status) var lostPod *v1.Pod var err error if podToRemove != "" { // We delete the pod that is no longer part of the cluster lostPod, err = rcc.GetPod(cc.Namespace, podToRemove) if err != nil { if !apierrors.IsNotFound(err) { return fmt.Errorf("Failed to get pod '%s': %v", podToRemove, err) } // If we can't find it, it means it has already been deleted somehow. That's okay as long as we got its IP if podIPToRemove == "" { return fmt.Errorf("Pod %s not found. You need to provide its old IP to remove it from the cluster", podToRemove) } } } // If no IP is not provided, we grab it from the existing pod if podIPToRemove == "" { podIPToRemove = lostPod.Status.PodIP if podIPToRemove == "" { return fmt.Errorf("Can't find an IP assigned to pod %s. You need to provide its old IP to remove it from the cluster", podToRemove) } } jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err == nil { var hostIDMap map[string]string // Get hostID from internal map and pass it to removeNode function if hostIDMap, err = jolokiaClient.hostIDMap(); err == nil { if hostID, keyFound := hostIDMap[podIPToRemove]; keyFound != true { err = fmt.Errorf("Host with IP '%s' not found in hostIdMap", podIPToRemove) } else { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "nodeToRemove": podToRemove, "operation": operation}).Info("Jolokia Remove node operation") err = jolokiaClient.NodeRemove(hostID) } } } return err } func (rcc *ReconcileCassandraCluster) waitUntilPvcIsDeleted(namespace, pvcName string) error { err := wait.Poll(retryInterval, deletedPvcTimeout, func() (done bool, err error) { _, err = rcc.GetPVC(namespace, pvcName) if err != nil && apierrors.IsNotFound(err) { logrus.WithFields(logrus.Fields{"namespace": namespace, "pvc": pvcName}).Info("PVC no longer exists") return true, nil } logrus.WithFields(logrus.Fields{"namespace": namespace, "pvc": pvcName}).Info("Waiting for PVC to be deleted") return false, nil }) if err != nil { return err } return nil } func (rcc *ReconcileCassandraCluster) postRunRemove(cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) error { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name}).Info("Post operation start") var label, labelSet = pod.GetLabels()["operation-argument"] if labelSet != true { return errors.New("operation-argument is needed to get the pod name to remove from the cluster") } podToRemove := strings.Split(label, "_")[0] if podToRemove == "" { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName}).Info("RemoveNode done. No pod was provided so we're done'") return nil } // We delete the attached PVC pvcName := "data-" + podToRemove logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Info("RemoveNode done. We now delete its PVC") pvc, err := rcc.GetPVC(cc.Namespace, pvcName) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Error("Cannot get PVC") } else { err = rcc.deletePVC(pvc) if err != nil && !apierrors.IsNotFound(err) { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Error("Error deleting PVC, manual actions required...") return err } _ = rcc.waitUntilPvcIsDeleted(cc.Namespace, pvcName) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pvc": pvcName}).Info("PVC deleted") } // We delete the pod that is no longer part of the cluster lostPod, err := rcc.GetPod(cc.Namespace, podToRemove) if err != nil { if !apierrors.IsNotFound(err) { return fmt.Errorf("Failed to get pod '%s': %v", podToRemove, err) } } err = rcc.ForceDeletePod(lostPod) if err != nil { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": podToRemove}).Error("Error deleting Pod, manual actions required...") } else { logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": podToRemove}).Info("Pod deleted") } return err } func (rcc *ReconcileCassandraCluster) runCleanup(hostName string, cc *api.CassandraCluster, dcRackName string, pod v1.Pod, status *api.CassandraClusterStatus) error { var err error operation := strings.Title(api.OperationCleanup) logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "hostName": hostName, "operation": operation}).Info("Operation start") // In case of an error set the status on the pod and skip it if err != nil { return err } logrus.WithFields(logrus.Fields{"cluster": cc.Name, "rack": dcRackName, "pod": pod.Name, "operation": operation}).Info("Execute the Jolokia Operation") // Add the operatorName to the last pod operation in case the operator pod is replaced status.CassandraRackStatus[dcRackName].PodLastOperation.OperatorName = os.Getenv("POD_NAME") rcc.updateCassandraStatus(cc, status) jolokiaClient, err := NewJolokiaClient(hostName, JolokiaPort, rcc, cc.Spec.ImageJolokiaSecret, cc.Namespace) if err == nil { err = jolokiaClient.NodeCleanup() } return err }
[ "\"POD_NAME\"", "\"POD_NAME\"", "\"POD_NAME\"", "\"POD_NAME\"", "\"POD_NAME\"" ]
[]
[ "POD_NAME" ]
[]
["POD_NAME"]
go
1
0
config/settings/local.py
""" Local settings - Run in Debug mode - Use console backend for emails - Add Django Debug Toolbar - Add django-extensions as app """ from .base import * # noqa import socket import os # APP CONFIGURATION # ------------------------------------------------------------------------------ # INSTALLED_APPS += ['gunicorn', ] # DEBUG # ------------------------------------------------------------------------------ DEBUG = env.bool("DJANGO_DEBUG", default=True) TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # Note: This key only used for development and testing SECRET_KEY = env("DJANGO_SECRET_KEY", default="CHANGEME!!!") # Mail settings # ------------------------------------------------------------------------------ EMAIL_PORT = 1025 EMAIL_HOST = "localhost" EMAIL_BACKEND = env( "DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend" ) # CACHING # ------------------------------------------------------------------------------ CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""}} # django-debug-toolbar # ------------------------------------------------------------------------------ MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] INSTALLED_APPS += ["debug_toolbar"] INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"] # tricks to have debug toolbar when developing with docker if os.environ.get("USE_DOCKER") == "yes": ip = socket.gethostbyname(socket.gethostname()) INTERNAL_IPS += [ip[:-1] + "1"] DEBUG_TOOLBAR_CONFIG = { # "SHOW_TOOLBAR_CALLBACK": lambda r: False, "DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"], "SHOW_TEMPLATE_CONTEXT": True, } # django-extensions # ------------------------------------------------------------------------------ INSTALLED_APPS += ["django_extensions"] GRAPH_MODELS = {"all_applications": False, "group_models": True} # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = "django.test.runner.DiscoverRunner" # Local App Settings # ------------------------------------------------------------------------------ # Plugin settings ENABLED_BACKEND_PLUGINS = ["timeline_backend", "example_backend_app"]
[]
[]
[ "USE_DOCKER" ]
[]
["USE_DOCKER"]
python
1
0
orderer/common/server/main_test.go
// Copyright IBM Corp. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package server import ( "io/ioutil" "net" "net/http" "os" "path/filepath" "strconv" "strings" "testing" "time" "github.com/hyperledger/udo/bccsp/factory" "github.com/hyperledger/udo/common/channelconfig" "github.com/hyperledger/udo/common/flogging" "github.com/hyperledger/udo/common/flogging/floggingtest" "github.com/hyperledger/udo/common/localmsp" "github.com/hyperledger/udo/common/metrics/disabled" "github.com/hyperledger/udo/common/metrics/prometheus" "github.com/hyperledger/udo/common/tools/configtxgen/encoder" genesisconfig "github.com/hyperledger/udo/common/tools/configtxgen/localconfig" "github.com/hyperledger/udo/core/comm" "github.com/hyperledger/udo/core/config/configtest" "github.com/hyperledger/udo/orderer/common/cluster" "github.com/hyperledger/udo/orderer/common/localconfig" "github.com/stretchr/testify/assert" ) func TestInitializeLogging(t *testing.T) { origEnvValue := os.Getenv("UDO_LOGGING_SPEC") os.Setenv("UDO_LOGGING_SPEC", "foo=debug") initializeLogging() assert.Equal(t, "debug", flogging.Global.Level("foo").String()) os.Setenv("UDO_LOGGING_SPEC", origEnvValue) } func TestInitializeProfilingService(t *testing.T) { origEnvValue := os.Getenv("UDO_LOGGING_SPEC") defer os.Setenv("UDO_LOGGING_SPEC", origEnvValue) os.Setenv("UDO_LOGGING_SPEC", "debug") // get a free random port listenAddr := func() string { l, _ := net.Listen("tcp", "localhost:0") l.Close() return l.Addr().String() }() initializeProfilingService( &localconfig.TopLevel{ General: localconfig.General{ Profile: localconfig.Profile{ Enabled: true, Address: listenAddr, }}, Kafka: localconfig.Kafka{Verbose: true}, }, ) time.Sleep(500 * time.Millisecond) if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil { t.Logf("Expected pprof to be up (will retry again in 3 seconds): %s", err) time.Sleep(3 * time.Second) if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil { t.Fatalf("Expected pprof to be up: %s", err) } } } func TestInitializeServerConfig(t *testing.T) { conf := &localconfig.TopLevel{ General: localconfig.General{ TLS: localconfig.TLS{ Enabled: true, ClientAuthRequired: true, Certificate: "main.go", PrivateKey: "main.go", RootCAs: []string{"main.go"}, ClientRootCAs: []string{"main.go"}, }, }, } sc := initializeServerConfig(conf, nil) defaultOpts := comm.DefaultKeepaliveOptions assert.Equal(t, defaultOpts.ServerMinInterval, sc.KaOpts.ServerMinInterval) assert.Equal(t, time.Duration(0), sc.KaOpts.ServerInterval) assert.Equal(t, time.Duration(0), sc.KaOpts.ServerTimeout) testDuration := 10 * time.Second conf.General.Keepalive = localconfig.Keepalive{ ServerMinInterval: testDuration, ServerInterval: testDuration, ServerTimeout: testDuration, } sc = initializeServerConfig(conf, nil) assert.Equal(t, testDuration, sc.KaOpts.ServerMinInterval) assert.Equal(t, testDuration, sc.KaOpts.ServerInterval) assert.Equal(t, testDuration, sc.KaOpts.ServerTimeout) sc = initializeServerConfig(conf, nil) assert.NotNil(t, sc.Logger) assert.Equal(t, &disabled.Provider{}, sc.MetricsProvider) assert.Len(t, sc.UnaryInterceptors, 2) assert.Len(t, sc.StreamInterceptors, 2) sc = initializeServerConfig(conf, &prometheus.Provider{}) assert.Equal(t, &prometheus.Provider{}, sc.MetricsProvider) goodFile := "main.go" badFile := "does_not_exist" oldLogger := logger defer func() { logger = oldLogger }() logger, _ = floggingtest.NewTestLogger(t) testCases := []struct { name string certificate string privateKey string rootCA string clientRootCert string clusterCert string clusterKey string clusterCA string }{ {"BadCertificate", badFile, goodFile, goodFile, goodFile, "", "", ""}, {"BadPrivateKey", goodFile, badFile, goodFile, goodFile, "", "", ""}, {"BadRootCA", goodFile, goodFile, badFile, goodFile, "", "", ""}, {"BadClientRootCertificate", goodFile, goodFile, goodFile, badFile, "", "", ""}, {"ClusterBadCertificate", goodFile, goodFile, goodFile, goodFile, badFile, goodFile, goodFile}, {"ClusterBadPrivateKey", goodFile, goodFile, goodFile, goodFile, goodFile, badFile, goodFile}, {"ClusterBadRootCA", goodFile, goodFile, goodFile, goodFile, goodFile, goodFile, badFile}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { conf := &localconfig.TopLevel{ General: localconfig.General{ TLS: localconfig.TLS{ Enabled: true, ClientAuthRequired: true, Certificate: tc.certificate, PrivateKey: tc.privateKey, RootCAs: []string{tc.rootCA}, ClientRootCAs: []string{tc.clientRootCert}, }, Cluster: localconfig.Cluster{ ClientCertificate: tc.clusterCert, ClientPrivateKey: tc.clusterKey, RootCAs: []string{tc.clusterCA}, }, }, } assert.Panics(t, func() { if tc.clusterCert == "" { initializeServerConfig(conf, nil) } else { initializeClusterConfig(conf) } }, ) }) } } func TestInitializeBootstrapChannel(t *testing.T) { cleanup := configtest.SetDevUDOConfigPath(t) defer cleanup() testCases := []struct { genesisMethod string ledgerType string panics bool }{ {"provisional", "ram", false}, {"provisional", "file", false}, {"provisional", "json", false}, {"invalid", "ram", true}, {"file", "ram", true}, } for _, tc := range testCases { t.Run(tc.genesisMethod+"/"+tc.ledgerType, func(t *testing.T) { fileLedgerLocation, _ := ioutil.TempDir("", "test-ledger") ledgerFactory, _ := createLedgerFactory( &localconfig.TopLevel{ General: localconfig.General{LedgerType: tc.ledgerType}, FileLedger: localconfig.FileLedger{ Location: fileLedgerLocation, }, }, ) bootstrapConfig := &localconfig.TopLevel{ General: localconfig.General{ GenesisMethod: tc.genesisMethod, GenesisProfile: "SampleSingleMSPSolo", GenesisFile: "genesisblock", SystemChannel: genesisconfig.TestChainID, }, } if tc.panics { assert.Panics(t, func() { genesisBlock := extractBootstrapBlock(bootstrapConfig) initializeBootstrapChannel(genesisBlock, ledgerFactory) }) } else { assert.NotPanics(t, func() { genesisBlock := extractBootstrapBlock(bootstrapConfig) initializeBootstrapChannel(genesisBlock, ledgerFactory) }) } }) } } func TestInitializeLocalMsp(t *testing.T) { t.Run("Happy", func(t *testing.T) { assert.NotPanics(t, func() { localMSPDir, _ := configtest.GetDevMspDir() initializeLocalMsp( &localconfig.TopLevel{ General: localconfig.General{ LocalMSPDir: localMSPDir, LocalMSPID: "SampleOrg", BCCSP: &factory.FactoryOpts{ ProviderName: "SW", SwOpts: &factory.SwOpts{ HashFamily: "SHA2", SecLevel: 256, Ephemeral: true, }, }, }, }) }) }) t.Run("Error", func(t *testing.T) { oldLogger := logger defer func() { logger = oldLogger }() logger, _ = floggingtest.NewTestLogger(t) assert.Panics(t, func() { initializeLocalMsp( &localconfig.TopLevel{ General: localconfig.General{ LocalMSPDir: "", LocalMSPID: "", }, }) }) }) } func TestInitializeMultiChainManager(t *testing.T) { cleanup := configtest.SetDevUDOConfigPath(t) defer cleanup() conf := genesisConfig(t) assert.NotPanics(t, func() { initializeLocalMsp(conf) lf, _ := createLedgerFactory(conf) bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system") initializeMultichannelRegistrar(bootBlock, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, conf, localmsp.NewSigner(), &disabled.Provider{}, lf) }) } func TestInitializeGrpcServer(t *testing.T) { // get a free random port listenAddr := func() string { l, _ := net.Listen("tcp", "localhost:0") l.Close() return l.Addr().String() }() host := strings.Split(listenAddr, ":")[0] port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16) conf := &localconfig.TopLevel{ General: localconfig.General{ ListenAddress: host, ListenPort: uint16(port), TLS: localconfig.TLS{ Enabled: false, ClientAuthRequired: false, }, }, } assert.NotPanics(t, func() { grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil)) grpcServer.Listener().Close() }) } func TestUpdateTrustedRoots(t *testing.T) { cleanup := configtest.SetDevUDOConfigPath(t) defer cleanup() initializeLocalMsp(genesisConfig(t)) // get a free random port listenAddr := func() string { l, _ := net.Listen("tcp", "localhost:0") l.Close() return l.Addr().String() }() port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16) conf := &localconfig.TopLevel{ General: localconfig.General{ ListenAddress: "localhost", ListenPort: uint16(port), TLS: localconfig.TLS{ Enabled: false, ClientAuthRequired: false, }, }, } grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil)) caSupport := &comm.CASupport{ AppRootCAsByChain: make(map[string][][]byte), OrdererRootCAsByChain: make(map[string][][]byte), } callback := func(bundle *channelconfig.Bundle) { if grpcServer.MutualTLSRequired() { t.Log("callback called") updateTrustedRoots(grpcServer, caSupport, bundle) } } lf, _ := createLedgerFactory(conf) bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system") initializeMultichannelRegistrar(bootBlock, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, genesisConfig(t), localmsp.NewSigner(), &disabled.Provider{}, lf, callback) t.Logf("# app CAs: %d", len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID])) t.Logf("# orderer CAs: %d", len(caSupport.OrdererRootCAsByChain[genesisconfig.TestChainID])) // mutual TLS not required so no updates should have occurred assert.Equal(t, 0, len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID])) assert.Equal(t, 0, len(caSupport.OrdererRootCAsByChain[genesisconfig.TestChainID])) grpcServer.Listener().Close() conf = &localconfig.TopLevel{ General: localconfig.General{ ListenAddress: "localhost", ListenPort: uint16(port), TLS: localconfig.TLS{ Enabled: true, ClientAuthRequired: true, PrivateKey: filepath.Join(".", "testdata", "tls", "server.key"), Certificate: filepath.Join(".", "testdata", "tls", "server.crt"), }, }, } grpcServer = initializeGrpcServer(conf, initializeServerConfig(conf, nil)) caSupport = &comm.CASupport{ AppRootCAsByChain: make(map[string][][]byte), OrdererRootCAsByChain: make(map[string][][]byte), } predDialer := &cluster.PredicateDialer{} clusterConf := initializeClusterConfig(conf) predDialer.SetConfig(clusterConf) callback = func(bundle *channelconfig.Bundle) { if grpcServer.MutualTLSRequired() { t.Log("callback called") updateTrustedRoots(grpcServer, caSupport, bundle) updateClusterDialer(caSupport, predDialer, clusterConf.SecOpts.ServerRootCAs) } } initializeMultichannelRegistrar(bootBlock, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, genesisConfig(t), localmsp.NewSigner(), &disabled.Provider{}, lf, callback) t.Logf("# app CAs: %d", len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID])) t.Logf("# orderer CAs: %d", len(caSupport.OrdererRootCAsByChain[genesisconfig.TestChainID])) // mutual TLS is required so updates should have occurred // we expect an intermediate and root CA for apps and orderers assert.Equal(t, 2, len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID])) assert.Equal(t, 2, len(caSupport.OrdererRootCAsByChain[genesisconfig.TestChainID])) assert.Len(t, predDialer.Config.Load().(comm.ClientConfig).SecOpts.ServerRootCAs, 2) grpcServer.Listener().Close() } func genesisConfig(t *testing.T) *localconfig.TopLevel { t.Helper() localMSPDir, _ := configtest.GetDevMspDir() return &localconfig.TopLevel{ General: localconfig.General{ LedgerType: "ram", GenesisMethod: "provisional", GenesisProfile: "SampleDevModeSolo", SystemChannel: genesisconfig.TestChainID, LocalMSPDir: localMSPDir, LocalMSPID: "SampleOrg", BCCSP: &factory.FactoryOpts{ ProviderName: "SW", SwOpts: &factory.SwOpts{ HashFamily: "SHA2", SecLevel: 256, Ephemeral: true, }, }, }, } }
[ "\"UDO_LOGGING_SPEC\"", "\"UDO_LOGGING_SPEC\"" ]
[]
[ "UDO_LOGGING_SPEC" ]
[]
["UDO_LOGGING_SPEC"]
go
1
0
content/code/go/ig-topsearch/topsearch_test.go
package instago import ( "os" "testing" ) func ExampleTopsearch(t *testing.T) { tr, err := Topsearch("instagram", os.Getenv("IG_DS_USER_ID"), os.Getenv("IG_SESSIONID"), os.Getenv("IG_CSRFTOKEN")) if err != nil { t.Error(err) return } t.Log(tr) }
[ "\"IG_DS_USER_ID\"", "\"IG_SESSIONID\"", "\"IG_CSRFTOKEN\"" ]
[]
[ "IG_SESSIONID", "IG_DS_USER_ID", "IG_CSRFTOKEN" ]
[]
["IG_SESSIONID", "IG_DS_USER_ID", "IG_CSRFTOKEN"]
go
3
0