filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
examples/create_cluster.go
/* Copyright (c) 2018 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "log" "os" "github.com/openshift-online/uhc-sdk-go/pkg/client" ) func main() { // Create a logger that has the debug level enabled: logger, err := client.NewGoLoggerBuilder(). Debug(true). Build() if err != nil { log.Fatalf("Can't build logger: %v", err) } // Create the connection, and remember to close it: token := os.Getenv("UHC_TOKEN") connection, err := client.NewConnectionBuilder(). Logger(logger). Tokens(token). Build() if err != nil { log.Fatalf("Can't build connection: %v", err) } defer connection.Close() // Send a request to create a cluster: response, err := connection.Post(). Path("/api/clusters_mgmt/v1/clusters"). Parameter("provision", true). String(`{ "name": "mycluster" "flavour": { "id": "4" } "region": { "id": "us-east-1", }, "aws": { "access_key_id": "...", "secret_access_key": "..." } }`). Send() if err != nil { log.Fatalf("Can't create cluster: %s", err) } // Print the result: fmt.Printf("%d\n", response.Status()) fmt.Printf("%s\n", response.String()) }
[ "\"UHC_TOKEN\"" ]
[]
[ "UHC_TOKEN" ]
[]
["UHC_TOKEN"]
go
1
0
app/snyk/config.py
import os from json import loads snyk_config = loads("""{ "OrginisationMap": [ { "Name": "FirstChecked-Org", "Matcher": "1-*" }, { "Name": "SecondChecked-Org", "Matcher": "2-*" }, { "Name": "CatchAll-Org", "Matcher": "*" } ] }""") def get_snyk_token(): return os.environ.get('SECRET_SNYK_API_TOKEN') def get_config_settings(): return snyk_config
[]
[]
[ "SECRET_SNYK_API_TOKEN" ]
[]
["SECRET_SNYK_API_TOKEN"]
python
1
0
video_playlist/views.py
from django.http.response import HttpResponseRedirect from django.shortcuts import render from .models import Video from .forms import VideoForm from django.contrib.auth.decorators import login_required # Create your views here. def index(request): video_playlist = Video.objects.all() response = {"video_playlist" : video_playlist} return render(request, "video_playlist.html", response) @login_required(login_url='/login') def add_video(request): response = {} form = VideoForm(request.POST) if request.method == 'POST': title = request.POST.get('title') link = request.POST.get('link') new_video = Video( Title = title, Link = link ) new_video.save() return HttpResponseRedirect('') response['form']= form return render(request, "video_playlist_form.html", response)
[]
[]
[]
[]
[]
python
null
null
null
main.go
package main import ( "log" "net/http" "os" "strconv" "github.com/moonrhythm/parapet" "github.com/skip2/go-qrcode" ) func main() { port := os.Getenv("PORT") if port == "" { port = "8080" } srv := parapet.NewBackend() srv.Addr = ":" + port srv.Handler = http.HandlerFunc(generator) err := srv.ListenAndServe() if err != nil { log.Fatal(err) } } const defaultSize = 256 func generator(w http.ResponseWriter, r *http.Request) { c := r.FormValue("c") if c == "" { http.Error(w, "empty content", http.StatusBadRequest) return } l := qrcode.Medium switch r.FormValue("l") { case "0": l = qrcode.Low case "2": l = qrcode.High case "3": l = qrcode.Highest } s, _ := strconv.Atoi(r.FormValue("s")) switch { case s == 0: s = defaultSize case s < -10: s = -10 case s > 1000: s = 1000 } qr, err := qrcode.New(c, l) if err != nil { http.Error(w, "error", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "image/png") w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") _ = qr.Write(s, w) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
test/com/facebook/buck/cli/CommandRunnerParamsForTesting.java
/* * Copyright 2013-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.cli; import com.facebook.buck.android.AndroidBuckConfig; import com.facebook.buck.android.AndroidDirectoryResolver; import com.facebook.buck.android.FakeAndroidDirectoryResolver; import com.facebook.buck.event.BuckEventBus; import com.facebook.buck.event.BuckEventBusFactory; import com.facebook.buck.httpserver.WebServer; import com.facebook.buck.io.ExecutableFinder; import com.facebook.buck.java.FakeJavaPackageFinder; import com.facebook.buck.java.JavaPackageFinder; import com.facebook.buck.parser.Parser; import com.facebook.buck.parser.ParserConfig; import com.facebook.buck.python.PythonBuckConfig; import com.facebook.buck.rules.NoopArtifactCache; import com.facebook.buck.rules.Repository; import com.facebook.buck.rules.TestRepositoryBuilder; import com.facebook.buck.testutil.TestConsole; import com.facebook.buck.timing.DefaultClock; import com.facebook.buck.util.Console; import com.facebook.buck.util.NullFileHashCache; import com.facebook.buck.util.ProcessManager; import com.facebook.buck.util.environment.Platform; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import java.io.IOException; public class CommandRunnerParamsForTesting { /** Utility class: do not instantiate. */ private CommandRunnerParamsForTesting() {} public static CommandRunnerParams createCommandRunnerParamsForTesting( Console console, Repository repository, AndroidDirectoryResolver androidDirectoryResolver, ArtifactCacheFactory artifactCacheFactory, BuckEventBus eventBus, BuckConfig config, Platform platform, ImmutableMap<String, String> environment, JavaPackageFinder javaPackageFinder, ObjectMapper objectMapper, Optional<WebServer> webServer) throws IOException, InterruptedException { ParserConfig parserConfig = new ParserConfig(config); PythonBuckConfig pythonBuckConfig = new PythonBuckConfig( config, new ExecutableFinder()); return new CommandRunnerParams( console, repository, Main.createAndroidPlatformTargetSupplier( androidDirectoryResolver, new AndroidBuckConfig(new FakeBuckConfig(), platform), eventBus), artifactCacheFactory, eventBus, Parser.createParser( repository, pythonBuckConfig.getPythonInterpreter(), parserConfig.getAllowEmptyGlobs(), parserConfig.getEnforceBuckPackageBoundary(), parserConfig.getTempFilePatterns(), parserConfig.getBuildFileName(), parserConfig.getDefaultIncludes()), platform, environment, javaPackageFinder, objectMapper, new DefaultClock(), Optional.<ProcessManager>absent(), webServer, config, new NullFileHashCache()); } public static Builder builder() { return new Builder(); } public static class Builder { private AndroidDirectoryResolver androidDirectoryResolver = new FakeAndroidDirectoryResolver(); private ArtifactCacheFactory artifactCacheFactory = new InstanceArtifactCacheFactory( new NoopArtifactCache()); private Console console = new TestConsole(); private BuckConfig config = new FakeBuckConfig(); private BuckEventBus eventBus = BuckEventBusFactory.newInstance(); private Platform platform = Platform.detect(); private ImmutableMap<String, String> environment = ImmutableMap.copyOf(System.getenv()); private JavaPackageFinder javaPackageFinder = new FakeJavaPackageFinder(); private ObjectMapper objectMapper = new ObjectMapper(); private Optional<WebServer> webServer = Optional.absent(); public CommandRunnerParams build() throws IOException, InterruptedException{ return createCommandRunnerParamsForTesting( console, new TestRepositoryBuilder().build(), androidDirectoryResolver, artifactCacheFactory, eventBus, config, platform, environment, javaPackageFinder, objectMapper, webServer); } public Builder setConsole(Console console) { this.console = console; return this; } public Builder setWebserver(Optional<WebServer> webServer) { this.webServer = webServer; return this; } public Builder setArtifactCacheFactory(ArtifactCacheFactory factory) { this.artifactCacheFactory = factory; return this; } public Builder setBuckConfig(BuckConfig buckConfig) { this.config = buckConfig; return this; } } }
[]
[]
[]
[]
[]
java
0
0
Algorithms/Java/com/strings/LoveLetterMystery.java
// Author: Sagar Malik // https://github.com/SagarMalik package com.strings; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.util.Scanner; public class LoveLetterMystery { static class Solution { // Complete the theLoveLetterMystery function below. static int theLoveLetterMystery(String s) { int count = 0, l = 0, r = s.length() - 1; while (l < r) count += Math.abs(s.charAt(l++) - s.charAt(r--)); return count; } private static final Scanner scanner = new Scanner(System.in); public static void main(String[] args) throws IOException { BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH"))); int q = scanner.nextInt(); scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?"); for (int qItr = 0; qItr < q; qItr++) { String s = scanner.nextLine(); int result = theLoveLetterMystery(s); bufferedWriter.write(String.valueOf(result)); bufferedWriter.newLine(); } bufferedWriter.close(); scanner.close(); } } public static void main(String[] args) throws IOException { Solution.main(args); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
dvc/tree/azure.py
import logging import os import threading from datetime import datetime, timedelta from funcy import cached_property, wrap_prop from dvc.path_info import CloudURLInfo from dvc.progress import Tqdm from dvc.scheme import Schemes from .base import BaseTree logger = logging.getLogger(__name__) class AzureTree(BaseTree): scheme = Schemes.AZURE PATH_CLS = CloudURLInfo REQUIRES = { "azure-storage-blob": "azure.storage.blob", "knack": "knack", } PARAM_CHECKSUM = "etag" COPY_POLL_SECONDS = 5 LIST_OBJECT_PAGE_SIZE = 5000 def __init__(self, repo, config): super().__init__(repo, config) url = config.get("url", "azure://") self.path_info = self.PATH_CLS(url) if not self.path_info.bucket: container = self._az_config.get("storage", "container_name", None) self.path_info = self.PATH_CLS(f"azure://{container}") self._conn_kwargs = { opt: config.get(opt) or self._az_config.get("storage", opt, None) for opt in ["connection_string", "sas_token"] } self._conn_kwargs["account_name"] = self._az_config.get( "storage", "account", None ) self._conn_kwargs["account_key"] = self._az_config.get( "storage", "key", None ) @cached_property def _az_config(self): # NOTE: ideally we would've used get_default_cli().config from # azure.cli.core, but azure-cli-core has a lot of conflicts with other # dependencies. So instead we are just use knack directly from knack.config import CLIConfig config_dir = os.getenv( "AZURE_CONFIG_DIR", os.path.expanduser(os.path.join("~", ".azure")) ) return CLIConfig(config_dir=config_dir, config_env_var_prefix="AZURE") @wrap_prop(threading.Lock()) @cached_property def blob_service(self): # pylint: disable=no-name-in-module from azure.storage.blob import BlockBlobService from azure.common import AzureMissingResourceHttpError logger.debug(f"URL {self.path_info}") logger.debug(f"Connection options {self._conn_kwargs}") blob_service = BlockBlobService(**self._conn_kwargs) logger.debug(f"Container name {self.path_info.bucket}") try: # verify that container exists blob_service.list_blobs( self.path_info.bucket, delimiter="/", num_results=1 ) except AzureMissingResourceHttpError: blob_service.create_container(self.path_info.bucket) return blob_service def get_etag(self, path_info): etag = self.blob_service.get_blob_properties( path_info.bucket, path_info.path ).properties.etag return etag.strip('"') def _generate_download_url(self, path_info, expires=3600): from azure.storage.blob import ( # pylint:disable=no-name-in-module BlobPermissions, ) expires_at = datetime.utcnow() + timedelta(seconds=expires) sas_token = self.blob_service.generate_blob_shared_access_signature( path_info.bucket, path_info.path, permission=BlobPermissions.READ, expiry=expires_at, ) download_url = self.blob_service.make_blob_url( path_info.bucket, path_info.path, sas_token=sas_token ) return download_url def exists(self, path_info): paths = self._list_paths(path_info.bucket, path_info.path) return any(path_info.path == path for path in paths) def _list_paths(self, bucket, prefix): blob_service = self.blob_service next_marker = None while True: blobs = blob_service.list_blobs( bucket, prefix=prefix, marker=next_marker ) for blob in blobs: yield blob.name if not blobs.next_marker: break next_marker = blobs.next_marker def walk_files(self, path_info, **kwargs): if not kwargs.pop("prefix", False): path_info = path_info / "" for fname in self._list_paths( path_info.bucket, path_info.path, **kwargs ): if fname.endswith("/"): continue yield path_info.replace(path=fname) def remove(self, path_info): if path_info.scheme != self.scheme: raise NotImplementedError logger.debug(f"Removing {path_info}") self.blob_service.delete_blob(path_info.bucket, path_info.path) def get_file_hash(self, path_info): return self.get_etag(path_info) def _upload( self, from_file, to_info, name=None, no_progress_bar=False, **_kwargs ): with Tqdm(desc=name, disable=no_progress_bar, bytes=True) as pbar: self.blob_service.create_blob_from_path( to_info.bucket, to_info.path, from_file, progress_callback=pbar.update_to, ) def _download( self, from_info, to_file, name=None, no_progress_bar=False, **_kwargs ): with Tqdm(desc=name, disable=no_progress_bar, bytes=True) as pbar: self.blob_service.get_blob_to_path( from_info.bucket, from_info.path, to_file, progress_callback=pbar.update_to, )
[]
[]
[ "AZURE_CONFIG_DIR" ]
[]
["AZURE_CONFIG_DIR"]
python
1
0
relay-java/src/main/java/com/genymobile/gnirehtet/Main.java
/* * Copyright (C) 2017 Genymobile * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.genymobile.gnirehtet; import com.genymobile.gnirehtet.relay.CommandExecutionException; import com.genymobile.gnirehtet.relay.Log; import com.genymobile.gnirehtet.relay.Relay; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Scanner; import java.util.regex.Matcher; import java.util.regex.Pattern; public final class Main { private static final String TAG = "Gnirehtet"; private static final String NL = System.lineSeparator(); private static final String REQUIRED_APK_VERSION_CODE = "8"; private Main() { // not instantiable } private static String getAdbPath() { String adb = System.getenv("ADB"); return adb != null ? adb : "adb"; } private static String getApkPath() { String apk = System.getenv("GNIREHTET_APK"); return apk != null ? apk : "gnirehtet.apk"; } enum Command { INSTALL("install", CommandLineArguments.PARAM_SERIAL) { @Override String getDescription() { return "Install the client on the Android device and exit.\n" + "If several devices are connected via adb, then serial must be\n" + "specified."; } @Override void execute(CommandLineArguments args) throws Exception { cmdInstall(args.getSerial()); } }, UNINSTALL("uninstall", CommandLineArguments.PARAM_SERIAL) { @Override String getDescription() { return "Uninstall the client from the Android device and exit.\n" + "If several devices are connected via adb, then serial must be\n" + "specified."; } @Override void execute(CommandLineArguments args) throws Exception { cmdUninstall(args.getSerial()); } }, REINSTALL("reinstall", CommandLineArguments.PARAM_SERIAL) { @Override String getDescription() { return "Uninstall then install."; } @Override void execute(CommandLineArguments args) throws Exception { cmdReinstall(args.getSerial()); } }, RUN("run", CommandLineArguments.PARAM_SERIAL | CommandLineArguments.PARAM_DNS_SERVER | CommandLineArguments.PARAM_ROUTES | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Enable reverse tethering for exactly one device:\n" + " - install the client if necessary;\n" + " - start the client;\n" + " - start the relay server;\n" + " - on Ctrl+C, stop both the relay server and the client."; } @Override void execute(CommandLineArguments args) throws Exception { cmdRun(args.getSerial(), args.getDnsServers(), args.getRoutes(), args.getPort()); } }, AUTORUN("autorun", CommandLineArguments.PARAM_DNS_SERVER | CommandLineArguments.PARAM_ROUTES | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Enable reverse tethering for all devices:\n" + " - monitor devices and start clients (autostart);\n" + " - start the relay server."; } @Override void execute(CommandLineArguments args) throws Exception { cmdAutorun(args.getDnsServers(), args.getRoutes(), args.getPort()); } }, START("start", CommandLineArguments.PARAM_SERIAL | CommandLineArguments.PARAM_DNS_SERVER | CommandLineArguments.PARAM_ROUTES | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Start a client on the Android device and exit.\n" + "If several devices are connected via adb, then serial must be\n" + "specified.\n" + "If -d is given, then make the Android device use the specified\n" + "DNS server(s). Otherwise, use 8.8.8.8 (Google public DNS).\n" + "If -r is given, then only reverse tether the specified routes.\n" + "If -p is given, then make the relay server listen on the specified\n" + "port. Otherwise, use port 31416.\n" + "Otherwise, use 0.0.0.0/0 (redirect the whole traffic).\n" + "If the client is already started, then do nothing, and ignore\n" + "the other parameters.\n" + "10.0.2.2 is mapped to the host 'localhost'."; } @Override void execute(CommandLineArguments args) throws Exception { cmdStart(args.getSerial(), args.getDnsServers(), args.getRoutes(), args.getPort()); } }, AUTOSTART("autostart", CommandLineArguments.PARAM_DNS_SERVER | CommandLineArguments.PARAM_ROUTES | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Listen for device connexions and start a client on every detected\n" + "device.\n" + "Accept the same parameters as the start command (excluding the\n" + "serial, which will be taken from the detected device)."; } @Override void execute(CommandLineArguments args) throws Exception { cmdAutostart(args.getDnsServers(), args.getRoutes(), args.getPort()); } }, STOP("stop", CommandLineArguments.PARAM_SERIAL) { @Override String getDescription() { return "Stop the client on the Android device and exit.\n" + "If several devices are connected via adb, then serial must be\n" + "specified."; } @Override void execute(CommandLineArguments args) throws Exception { cmdStop(args.getSerial()); } }, RESTART("restart", CommandLineArguments.PARAM_SERIAL | CommandLineArguments.PARAM_DNS_SERVER | CommandLineArguments.PARAM_ROUTES | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Stop then start."; } @Override void execute(CommandLineArguments args) throws Exception { cmdRestart(args.getSerial(), args.getDnsServers(), args.getRoutes(), args.getPort()); } }, TUNNEL("tunnel", CommandLineArguments.PARAM_SERIAL | CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Set up the 'adb reverse' tunnel.\n" + "If a device is unplugged then plugged back while gnirehtet is\n" + "active, resetting the tunnel is sufficient to get the\n" + "connection back."; } @Override void execute(CommandLineArguments args) throws Exception { cmdTunnel(args.getSerial(), args.getPort()); } }, RELAY("relay", CommandLineArguments.PARAM_PORT) { @Override String getDescription() { return "Start the relay server in the current terminal."; } @Override void execute(CommandLineArguments args) throws Exception { cmdRelay(args.getPort()); } }; private String command; private int acceptedParameters; Command(String command, int acceptedParameters) { this.command = command; this.acceptedParameters = acceptedParameters; } abstract String getDescription(); abstract void execute(CommandLineArguments args) throws Exception; } private static void cmdInstall(String serial) throws InterruptedException, IOException, CommandExecutionException { Log.i(TAG, "Installing gnirehtet client..."); execAdb(serial, "install", "-r", getApkPath()); } private static void cmdUninstall(String serial) throws InterruptedException, IOException, CommandExecutionException { Log.i(TAG, "Uninstalling gnirehtet client..."); execAdb(serial, "uninstall", "com.genymobile.gnirehtet"); } private static void cmdReinstall(String serial) throws InterruptedException, IOException, CommandExecutionException { cmdUninstall(serial); cmdInstall(serial); } private static void cmdRun(String serial, String dnsServers, String routes, int port) throws IOException { // start in parallel so that the relay server is ready when the client connects asyncStart(serial, dnsServers, routes, port); Runtime.getRuntime().addShutdownHook(new Thread(() -> { // executed on Ctrl+C try { cmdStop(serial); } catch (Exception e) { Log.e(TAG, "Cannot stop client", e); } })); cmdRelay(port); } private static void cmdAutorun(final String dnsServers, final String routes, int port) throws IOException { new Thread(() -> { try { cmdAutostart(dnsServers, routes, port); } catch (Exception e) { Log.e(TAG, "Cannot auto start clients", e); } }).start(); cmdRelay(port); } @SuppressWarnings("checkstyle:MagicNumber") private static void cmdStart(String serial, String dnsServers, String routes, int port) throws InterruptedException, IOException, CommandExecutionException { if (mustInstallClient(serial)) { cmdInstall(serial); // wait a bit after the app is installed so that intent actions are correctly registered Thread.sleep(500); // ms } Log.i(TAG, "Starting client..."); cmdTunnel(serial, port); List<String> cmd = new ArrayList<>(); Collections.addAll(cmd, "shell", "am", "start", "-a", "com.genymobile.gnirehtet.START", "-n", "com.genymobile.gnirehtet/.GnirehtetActivity"); if (dnsServers != null) { Collections.addAll(cmd, "--esa", "dnsServers", dnsServers); } if (routes != null) { Collections.addAll(cmd, "--esa", "routes", routes); } execAdb(serial, cmd); } private static void cmdAutostart(final String dnsServers, final String routes, int port) { AdbMonitor adbMonitor = new AdbMonitor((serial) -> { asyncStart(serial, dnsServers, routes, port); }); adbMonitor.monitor(); } private static void cmdStop(String serial) throws InterruptedException, IOException, CommandExecutionException { Log.i(TAG, "Stopping client..."); execAdb(serial, "shell", "am", "start", "-a", "com.genymobile.gnirehtet.STOP", "-n", "com.genymobile.gnirehtet/.GnirehtetActivity"); } private static void cmdRestart(String serial, String dnsServers, String routes, int port) throws InterruptedException, IOException, CommandExecutionException { cmdStop(serial); cmdStart(serial, dnsServers, routes, port); } private static void cmdTunnel(String serial, int port) throws InterruptedException, IOException, CommandExecutionException { execAdb(serial, "reverse", "localabstract:gnirehtet", "tcp:" + port); } private static void cmdRelay(int port) throws IOException { Log.i(TAG, "Starting relay server on port " + port + "..."); new Relay(port).run(); } private static void asyncStart(String serial, String dnsServers, String routes, int port) { new Thread(() -> { try { cmdStart(serial, dnsServers, routes, port); } catch (Exception e) { Log.e(TAG, "Cannot start client", e); } }).start(); } private static void execAdb(String serial, String... adbArgs) throws InterruptedException, IOException, CommandExecutionException { execSync(createAdbCommand(serial, adbArgs)); } private static List<String> createAdbCommand(String serial, String... adbArgs) { List<String> command = new ArrayList<>(); command.add(getAdbPath()); if (serial != null) { command.add("-s"); command.add(serial); } Collections.addAll(command, adbArgs); return command; } private static void execAdb(String serial, List<String> adbArgList) throws InterruptedException, IOException, CommandExecutionException { String[] adbArgs = adbArgList.toArray(new String[adbArgList.size()]); execAdb(serial, adbArgs); } private static void execSync(List<String> command) throws InterruptedException, IOException, CommandExecutionException { Log.d(TAG, "Execute: " + command); ProcessBuilder processBuilder = new ProcessBuilder(command); processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT).redirectError(ProcessBuilder.Redirect.INHERIT); Process process = processBuilder.start(); int exitCode = process.waitFor(); if (exitCode != 0) { throw new CommandExecutionException(command, exitCode); } } private static boolean mustInstallClient(String serial) throws InterruptedException, IOException, CommandExecutionException { Log.i(TAG, "Checking gnirehtet client..."); List<String> command = createAdbCommand(serial, "shell", "dumpsys", "package", "com.genymobile.gnirehtet"); Log.d(TAG, "Execute: " + command); Process process = new ProcessBuilder(command).start(); int exitCode = process.waitFor(); if (exitCode != 0) { throw new CommandExecutionException(command, exitCode); } Scanner scanner = new Scanner(process.getInputStream()); // read the versionCode of the installed package Pattern pattern = Pattern.compile("^ versionCode=(\\p{Digit}+).*"); while (scanner.hasNextLine()) { Matcher matcher = pattern.matcher(scanner.nextLine()); if (matcher.matches()) { String installedVersionCode = matcher.group(1); return !REQUIRED_APK_VERSION_CODE.equals(installedVersionCode); } } return true; } private static void printUsage() { StringBuilder builder = new StringBuilder("Syntax: gnirehtet ("); Command[] commands = Command.values(); for (int i = 0; i < commands.length; ++i) { if (i != 0) { builder.append('|'); } builder.append(commands[i].command); } builder.append(") ...").append(NL); for (Command command : commands) { builder.append(NL); appendCommandUsage(builder, command); } System.err.print(builder.toString()); } private static void appendCommandUsage(StringBuilder builder, Command command) { builder.append(" gnirehtet ").append(command.command); if ((command.acceptedParameters & CommandLineArguments.PARAM_SERIAL) != 0) { builder.append(" [serial]"); } if ((command.acceptedParameters & CommandLineArguments.PARAM_DNS_SERVER) != 0) { builder.append(" [-d DNS[,DNS2,...]]"); } if ((command.acceptedParameters & CommandLineArguments.PARAM_PORT) != 0) { builder.append(" [-p PORT]"); } if ((command.acceptedParameters & CommandLineArguments.PARAM_ROUTES) != 0) { builder.append(" [-r ROUTE[,ROUTE2,...]]"); } builder.append(NL); String[] descLines = command.getDescription().split("\n"); for (String descLine : descLines) { builder.append(" ").append(descLine).append(NL); } } private static void printCommandUsage(Command command) { StringBuilder builder = new StringBuilder(); appendCommandUsage(builder, command); System.err.print(builder.toString()); } public static void main(String... args) throws Exception { if (args.length == 0) { printUsage(); return; } String cmd = args[0]; for (Command command : Command.values()) { if (cmd.equals(command.command)) { // forget args[0] containing the command name String[] commandArgs = Arrays.copyOfRange(args, 1, args.length); CommandLineArguments arguments; try { arguments = CommandLineArguments.parse(command.acceptedParameters, commandArgs); } catch (IllegalArgumentException e) { Log.e(TAG, e.getMessage()); printCommandUsage(command); return; } command.execute(arguments); return; } } if ("rt".equals(cmd)) { Log.e(TAG, "The 'rt' command has been renamed to 'run'. Try 'gnirehtet run' instead."); printCommandUsage(Command.RUN); } else { Log.e(TAG, "Unknown command: " + cmd); printUsage(); } } }
[ "\"ADB\"", "\"GNIREHTET_APK\"" ]
[]
[ "GNIREHTET_APK", "ADB" ]
[]
["GNIREHTET_APK", "ADB"]
java
2
0
src/rl.py
import pygcurse import pygame import sys import math import random import map import character import player_character import dialog import inventoryDialog import messagesDialog import skillsDialog import item import xml2object import code import enemy import animation import Message import scores import difficulty import mainmenu import os import messageBox import sys import logging import traceback from tutorial import * from pygame.locals import * # Get chance to hit # Input is To Hit - To Defend # Output between 0 and 1 def getToHitChance (toHit): return math.atan(0.6577 * toHit - 3.5 + math.pi / 2)/math.pi # Get critical chance # Input is To Hit def getToCritChance (toHit): return math.atan(0.2124 * toHit - 3.5 + math.pi / 2)/math.pi def LoseGame (win): win.setscreencolors('lime', 'black', clear=True) win.putchars('For you, the dream ends here.', 6, 5, 'white', 'black') win.putchars('You have died.', 14, 10, 'white', 'black') win.putchars('Well. That sucks.', 13, 15, 'white', 'black') score = scores.CalculateScore(Maps, PC, difficulty, 0) win.putchars('Score: ' + str(score), 2, 17, 'red', 'black') win.update() screen.blit(surface,(0,0)) pygame.display.update() pygame.display.flip() pygcurse.waitforkeypress() pygcurse.waitforkeypress() # Spoiler warning! # # # # # # # # # # # # # def WinGame (victoryCondition, win): if victoryCondition == 1: win.setscreencolors('lime', 'black', clear=True) win.putchars('Congratulations!', 1, 2, 'white', 'black') win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('at last!', 1, 4, 'white', 'black') win.putchars('It may not last, but peace is upon', 1, 5, 'white', 'black') win.putchars('this part of Atlas.', 1, 6, 'white', 'black') elif victoryCondition == 2: win.setscreencolors('lime', 'black', clear=True) win.putchars('Congratulations!', 1, 2, 'white', 'black') win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('It may not have been by your hand, but', 1, 4, 'white', 'black') win.putchars('this part of Atlas is at least for now,', 1, 5, 'white', 'black') win.putchars('in peace.', 1, 6, 'white', 'black') elif victoryCondition == 3: win.setscreencolors('lime', 'black', clear=True) win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('by the horrible demonic goliath.', 1, 4, 'white', 'black') win.putchars('Absorbing his vast power, the ', 1, 5, 'white', 'black') win.putchars('unholy beast unleashes hell upon', 1, 6, 'white', 'black') win.putchars('Atlas.', 1, 7, 'white', 'black') win.putchars('Doom awaits those who survive.', 1, 7, 'white', 'black') elif victoryCondition == 4: win.setscreencolors('lime', 'black', clear=True) win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('by the apocalyptic goliath.', 1, 4, 'white', 'black') win.putchars('Absorbing his vast power, the ', 1, 5, 'white', 'black') win.putchars('unholy beast unleashes a wave', 1, 6, 'white', 'black') win.putchars('of destruction that clouds', 1, 7, 'white', 'black') win.putchars('Atlas in fire and desolation', 1, 8, 'white', 'black') win.putchars('leaving nothing.', 1, 9, 'white', 'black') win.putchars('You achieved your goal of', 1, 10, 'white', 'black') win.putchars('bringing peace to Atlas.', 1, 11, 'white', 'black') win.putchars('You have done so eternally.', 1, 12, 'white', 'black') elif victoryCondition == 5: win.setscreencolors('lime', 'black', clear=True) win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('by the Mortreon, the True Dragon.', 1, 4, 'white', 'black') win.putchars('Mortreon absorbs the explosion of', 1, 5, 'white', 'black') win.putchars('power in its entirity and' , 1, 6, 'white', 'black') win.putchars('unleashes an almighty roar', 1, 7, 'white', 'black') win.putchars('resurrecting Eon, the God King of', 1, 8, 'white', 'black') win.putchars('Dragons.', 1, 9, 'white', 'black') win.putchars('The second age of Eon will', 1, 10, 'white', 'black') win.putchars('come.', 1, 11, 'white', 'black') win.putchars('The sons of Eon will reign.', 1, 12, 'white', 'black') elif victoryCondition == 6: win.setscreencolors('lime', 'black', clear=True) win.putchars('The mighty warlord has been slain', 1, 3, 'white', 'black') win.putchars('by the blackest of the black', 1, 4, 'white', 'black') win.putchars('The Necromancer grins as his ', 1, 5, 'white', 'black') win.putchars('flesh rots away under the', 1, 6, 'white', 'black') win.putchars('force of his absorbed power.', 1, 7, 'white', 'black') win.putchars('With the power of the warlord', 1, 8, 'white', 'black') win.putchars('the Necromancer enslaves', 1, 9, 'white', 'black') win.putchars('Atlas. And what of his body?', 1, 10, 'white', 'black') win.putchars('Bodies are for mortal men.', 1, 11, 'white', 'black') score = scores.CalculateScore(Maps, PC, difficulty, victoryCondition) win.putchars('Score: ' + str(score), 2, 17, 'red', 'black') win.update(); screen.blit(surface,(0,0)) pygame.display.update() pygame.display.flip() pygcurse.waitforkeypress() pygcurse.waitforkeypress() def DrawMap(): for x in range(40): for y in range(20): DrawChar(x, y) def DrawChar(x, y): vis = PC.currentMap.VisibilityStatus(x,y) darkColor = pygame.Color(48,48,48) if (ShowMapCheat == True): vis = 2 if vis == 0 : win.putchar(' ', x, y, 'black', pygame.Color(0,0,0,0)); elif vis == 1: win.putchar(PC.currentMap.Map[x][y].character, x, y, darkColor, pygame.Color(0,0,0,0)); elif vis == 2: win.putchar(PC.currentMap.Map[x][y].character, x, y, PC.currentMap.Map[x][y].forecolor, PC.currentMap.Map[x][y].backcolor); # This is here for dialogs that show during times the game loop isn't running # to be able to be used. # # There is a bug that it can't be closed in this state def DialogOnlyLoop(dialog, surface): while (len(dialog) > 0): for event in pygame.event.get(): dialog[0].process(event) dialog[0].draw(surface) screen.blit(surface,(0,0)) pygame.display.update() pygame.display.flip() if dialog[0].toClose: dialog.remove(dialog[0]) def log_uncaught_exceptions(ex_cls, ex, tb): logging.critical(''.join(traceback.format_tb(tb))) logging.critical('{0}: {1}'.format(ex_cls, ex)) print(''.join(traceback.format_tb(tb))) print('{0}: {1}'.format(ex_cls, ex)) # This needs to be a list so it can be immutable and be passed by reference. # This has the side effect of allowing multiple dialogs. dialog = [] lastDialog = None # Main function # Init # Store log of errors sys.excepthook = log_uncaught_exceptions logging.basicConfig( level=logging.DEBUG, filename='error.log', filemode='w') screen = pygame.display.set_mode((520,648)) pygame.display.set_caption('Atlas Warriors') surface = pygame.Surface((520, 648)) win = pygcurse.PygcurseSurface(width=40, height=27, windowsurface=surface) win.font = pygame.font.Font("DejaVuSansMono.ttf", 20) descriptFont = pygame.font.Font("DejaVuSansMono.ttf", 10) descriptTitleFont = pygame.font.Font("DejaVuSansMono.ttf", 12) messageFont = pygame.font.Font("DejaVuSerif.ttf", 12) hpFont = pygame.font.Font("DejaVuSerif.ttf", 20) clock = pygame.time.Clock() messageLog = Message.MessageLog(None) tutorial = Tutorial(messageBox.MessageBox, dialog); background = False # This should be states or something. Add to the refactor list! # If a difficulty is in the command line arguments, start if len(sys.argv) > 1: action = int(sys.argv[1]) else: action = mainmenu.MainMenu(win, screen, surface) win = pygcurse.PygcurseSurface(width=40, height=27, windowsurface=surface, shadow=True) win.font = pygame.font.Font("DejaVuSansMono.ttf", 20) # Can enable cheat mode by setting difficulty to 6, 7, 8 or 9 in the arguments if action > 6: action = action % 7 cheatMode = True else: cheatMode = False if action == 0: difficulty = difficulty.Easiest() elif action == 1: difficulty = difficulty.Normal() elif action == 2: difficulty = difficulty.Hard() elif action == 3: difficulty = difficulty.Hardest() elif action == 6: sys.exit() win.colors = ('red', 'gray') cellx = 0 celly = 0 ShowMapCheat = False mouseX = 0 mouseY = 0 # To prevent weird startup mouse bugs mousePos = (1,1) mouseCellX = 0 mouseCellY = 0 ssframe = 0 # Load default items DefaultItems = xml2object.parse('items.xml', item.Item) # Create maps Maps = [] for i in range(10): Maps.append(map.Map(i, messageLog, DefaultItems, difficulty)); for i in range(10): if (i != 9): Maps[i].nextMap = Maps[i + 1] if (i != 0): Maps[i].lastMap = Maps[i - 1] Maps[i].background = pygame.image.load(os.path.join('assets','back_level_'+str(i % 4)+'.png')) PC = player_character.PlayerCharacter(messageLog, Maps[0], DefaultItems, difficulty, tutorial) PC.x = Maps[0].startX PC.y = Maps[0].startY Maps[0].UpdateVisibility(PC, PC.x, PC.y) messageLog.PC = PC currentTint = (0,0,0,0) PC.currentMap.characters.append(PC) lastMap = None # This is not great AllMonsters = [] for i in Maps: AllMonsters.extend(i.characters) EndBoss = next(i for i in AllMonsters if i.chartype == "endboss") win.autoupdate = False # THIS DISABLES THE AUTOUPDATE FEATURE win._autodisplayupdate = False pygame.key.set_repeat(300,30); ForceDraw = False Animations = [] # Pathfinding benchmarks: # import timeit # print (' Get Route: ') # times = [] # for x in range(2, 39): # for y in range(2, 19): # #print(x,y) # times.append(timeit.timeit('PC.GetRoute((x, y))', 'gc.enable(); from __main__ import PC, x, y', number = 10)) # # print ('Average ', sum(times)/float(len(times)), ' Min ', min(times), ' Max ', max(times)) # print (' Get Nearest: ') # times = [] # for x in range(2, 39): # for y in range(2, 19): # #print(x,y) # times.append(timeit.timeit('PC.GetNearest((lambda i: i[0] == x and i[1] == y))', 'gc.enable(); from __main__ import PC, x, y', number = 10)) # # print ('Average ', sum(times)/float(len(times)), ' Min ', min(times), ' Max ', max(times)) # PC.ChangeMap(Maps[0]) # Show introduction tutorial message if first run tutorial.TriggerMessage(TUTORIAL_FIRSTRUN) running = True while running or len(Animations) > 0: if len(dialog) != 0 and dialog[0].toClose == True: dialog.remove(dialog[0]) ForceDraw = True for event in pygame.event.get(): if event.type == QUIT: running = False if len(dialog) == 0: if event.type == MOUSEMOTION: mouseCellX, mouseCellY = win.getcoordinatesatpixel(event.pos) mousePos = event.pos if event.type == KEYDOWN and PC.nextMove == 'none': if event.key == pygame.K_LEFT or event.key == pygame.K_4 or event.key == pygame.K_KP4: PC.nextMove = 'move_e' if event.key == pygame.K_UP or event.key == pygame.K_8 or event.key == pygame.K_KP8: PC.nextMove = 'move_n' if event.key == pygame.K_DOWN or event.key == pygame.K_2 or event.key == pygame.K_KP2: PC.nextMove = 'move_s' if event.key == pygame.K_RIGHT or event.key == pygame.K_6 or event.key == pygame.K_KP6: PC.nextMove = 'move_w' if event.key == pygame.K_7 or event.key == pygame.K_KP7: PC.nextMove = 'move_ne' if event.key == pygame.K_9 or event.key == pygame.K_KP9: PC.nextMove = 'move_nw' if event.key == pygame.K_1 or event.key == pygame.K_KP1: PC.nextMove = 'move_se' if event.key == pygame.K_3 or event.key == pygame.K_KP3: PC.nextMove = 'move_sw' if event.key == pygame.K_5 or event.key == pygame.K_KP5: PC.nextMove = 'wait' if event.key == pygame.K_i: dialog.insert(0, inventoryDialog.InventoryDialog(PC)) if event.key == pygame.K_s: dialog.insert(0, skillsDialog.SkillsDialog(PC)) if event.key == pygame.K_m: dialog.insert(0, messagesDialog.MessagesDialog(messageLog)) if event.key == pygame.K_a: PC.autopickup = not(PC.autopickup) if PC.autopickup: messageLog.append(Message.Message(\ "Autopickup has been enabled")) else: messageLog.append(Message.Message(\ "Autopickup has been disabled.")) if event.key == pygame.K_b: background = not(background) ForceDraw = True if cheatMode == True: if event.key == pygame.K_F1: ShowMapCheat = not ShowMapCheat ForceDraw = True if event.key == pygame.K_F2: code.interact(local=locals()) if event.key == pygame.K_F3: PC.ChangeMap(Maps[PC.currentMap.level-1]) PC.currentMap.UpdateVisibility(PC, PC.x, PC.y) ForceDraw = True if event.key == pygame.K_F4: PC.ChangeMap(Maps[PC.currentMap.level+1]) PC.currentMap.UpdateVisibility(PC, PC.x, PC.y) ForceDraw = True else: dialog[0].process(event) if event.type == MOUSEBUTTONDOWN: mouseCellX, mouseCellY = win.getcoordinatesatpixel(event.pos) #print(win.getcoordinatesatpixel(event.pos)) if mouseCellY == 26: if mouseCellX < 9: dialog.insert(0, messagesDialog.MessagesDialog(messageLog)) if mouseCellX >= 9 and mouseCellX < 18: dialog.insert(0, inventoryDialog.InventoryDialog(PC)) if mouseCellX >= 18 and mouseCellX < 27: dialog.insert(0, skillsDialog.SkillsDialog(PC)) # if mouseCellX > 0 and mouseCellY > 0 and mouseCellX < 39 and mouseCellY < 19: #Animations.append(animation.DrawNecromancerSpell(PC.currentMap.characters[2], PC, 'red')) # This slightly odd line means that animations will run more simulateously # if not entirely. That will make the last level a lot less of a pain # in the butt. if not(len(Animations) > 0 and (PC.ticksUntilTurn == 0)): #Update characters if no current animations if (len(dialog) == 0) and not(PC.nextMove == 'none' and PC.ticksUntilTurn <= 0): # Update Characters # This seems hacky, but it's to prevent the monsters # moving at the same time (actually, same turn but # beforehand) meaning that attacks didn't hit if PC.ticksUntilTurn <= 0: for i in messageLog: i.seen = True PC.update() # This needs investigation to see if this goes here. It is the # part that will add more monsters after an uncertain amount # of time PC.currentMap.Tick() # This is to prevent PC getting one tick advantage PC.ticksUntilTurn += 1 for character in PC.currentMap.characters: if character.ticksUntilTurn <= 0: DrawChar(character.x, character.y) character.update() else: character.ticksUntilTurn -= 1 Animations.extend(character.animations) character.animations.clear() else: clock.tick(50) #pygame.image.save(win._windowsurface, "ss\\%05d" % ssframe + ".png") #ssframe += 1 #Draw Screen # Draw Background if background: surface.blit(PC.currentMap.background, (0,0)) else: surface.fill((0,0,0,255)) #Draw Map if (lastMap != PC.currentMap or PC.currentMap.visibilityUpdated == True or ForceDraw == True): DrawMap() PC.currentMap.visibilityUpdated = False ForceDraw = False lastMap = PC.currentMap #Draw map over characters for character in PC.currentMap.characters: DrawChar(character.x, character.y) #Draw Items for item in PC.currentMap.Items: if (PC.currentMap.VisibilityStatus(item.x, item.y)) == 2 or ShowMapCheat: win.putchar(item.Character, item.x, item.y, item.Color, None) #Draw Characters for character in PC.currentMap.characters: if not character.dead(): if (PC.currentMap.VisibilityStatus(character.x,character.y)) == 2 or ShowMapCheat: win.putchar(character.character,character.x, character.y, character.Color()[0], character.Color()[1]) else: #Probably not where this should be PC.currentMap.characters.remove(character) # Draw animations if there are any. if (PC.ticksUntilTurn == 0 or EndBoss.Victory() != 0): for i in Animations: if i.frame >= i.frames: Animations.remove(i) ForceDraw = True else: i.tick(win, PC.currentMap) #pygame.image.save(win._windowsurface, "c:\\ss\\%05d" % ssframe + ".bmp") #ssframe += 1 # Draw redenning if in Second Wind if PC.secondWind: newTint = ((difficulty.secondWindTime + 1 - PC.secondWindTimeLeft) * 255//5,0,0) else: newTint = (0,0,0) if newTint != currentTint: win.settint(newTint[0], newTint[1], newTint[2],(0,0,40,20)) currentTint = newTint #win.putchars('Score: ' + str(scores.CalculateScore(Maps, PC, 1, 0) ), 2, 17, 'red') if len(dialog) != 0: dialog[0].draw(surface) screen.blit(surface,(0,0)) else: win.update() # THIS IS THE NEW CALL TO THE UPDATE() METHOD # Draw HUD # Draw Messages toHit = PC.ToHit() lines = [ hpFont.render('HP ' + str(PC.hp) + ' (' + str(PC.maxhp) + ') Level ' + str(PC.level) +\ ' XP ' + str(PC.xp) + ' (' + str(int(PC.nextLevel)) + ')' \ ' Hit ' + str(toHit[0][0]) +\ (', ' + str(toHit[1][0]) if len(toHit) > 1 else '') + ' Def ' +\ str(PC.ToDefend()), True, (255,255 if PC.ZombieMod() == 0 else 0,255 if PC.ZombieMod() == 0 else 0, 255))] messageLogToShow = [i for i in messageLog if not(i.seen) and not(i.text[:18]=="KILL OR BE KILLED!") ] # Ensure second wind is always shown on top if necessary if (PC.secondWind): messageLogToShow.append(Message.Message("KILL OR BE KILLED! - " + str(PC.secondWindTimeLeft) + " turns left")) if len(messageLogToShow) > 0: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-1].text, True, (255, 0, 0, 255))) else: if len(messageLogToShow) > 0: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-1].text, True, (255, 255, 255, 255))) if len(messageLogToShow) > 1: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-2].text, True, (225, 225, 225, 255))) if len(messageLogToShow) > 2: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-3].text, True, (195, 195, 195, 255))) if len(messageLogToShow) > 3: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-4].text, True, (165, 165, 165, 255))) if len(messageLogToShow) > 4: lines.append(messageFont.render(messageLogToShow[len(messageLogToShow)-5].text, True, (135, 135, 135, 255))) curY = 5 + win._cellheight * 20 spacing = 3 for i in lines: surface.blit(i, (3, curY)) curY += i.get_height() + spacing # Draw Bottom Screen Operations pygame.draw.rect(surface, pygcurse.colornames['black'], pygame.Rect(0, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 0) pygame.draw.rect(surface, pygcurse.colornames['blue'], pygame.Rect(0, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 1) surface.blit(win.font.render(' Messages ', True, (255, 255, 255, 255)), (0, win._cellheight * 26)) pygame.draw.rect(surface, pygcurse.colornames['black'], pygame.Rect(win._cellwidth * 9, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 0) pygame.draw.rect(surface, pygcurse.colornames['blue'], pygame.Rect(win._cellwidth * 9, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 1) surface.blit(win.font.render(' Load Out ', True, (255, 255, 255, 255)), (win._cellwidth * 9, win._cellheight * 26)) pygame.draw.rect(surface, pygcurse.colornames['black'], pygame.Rect(win._cellwidth * 18, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 0) pygame.draw.rect(surface, pygcurse.colornames['blue'], pygame.Rect(win._cellwidth * 18, win._cellheight * 26, win._cellwidth * 9, win._pixelheight - win._cellheight * 1), 1) surface.blit(win.font.render(' Skills ', True, (255, 255, 255, 255)), (win._cellwidth * 18, win._cellheight * 26)) if len(dialog) == 0: top = mousePos[1] #Draw descriptions if mouse over monster for i in PC.currentMap.characters: if mouseCellX == i.x and mouseCellY == i.y and (PC.currentMap.VisibilityStatus(i.x,i.y)) == 2: lines = [descriptTitleFont.render(' ' + i.name, True, (255,0,0,255)), descriptTitleFont.render(str(id(i)), True, (0,0,0,255)), descriptTitleFont.render('Level ' + str(i.level), True, (0,0,0,255)), descriptTitleFont.render('HP ' + str(i.hp) + '/' + str(i.maxhp), True, (0,0,0,255))] for j in i.ToHit(): lines.append(descriptFont.render("To hit: " + str(j[0]), True, (0,0,0,255))) lines.append(descriptFont.render("To defend: " + str(i.ToDefend()), True, (0,0,0,255))) for j in PC.ToHit(): lines.append(descriptFont.render(str(round(PC.ChanceToHit(j[0], i.ToDefend()) * 100)) + '% chance to hit', True, (255,0,0,255))) spacing = 3 widthNeeded = max(l.get_width() for l in lines ) + 6 heightNeeded = 3 * len(lines) + sum(l.get_height() for l in lines) + 12 top = top + heightNeeded pygame.draw.rect(surface, pygcurse.colornames['yellow'], pygame.Rect(min(mousePos[0], surface.get_width()-widthNeeded), mousePos[1], (widthNeeded), (heightNeeded))) curY = 3 for i in lines: surface.blit(i, (min(mousePos[0], surface.get_width() - widthNeeded) + 3, mousePos[1] + 3 + curY)) curY += i.get_height() + spacing itemLines = [] for i in PC.currentMap.Items: if mouseCellX == i.x and mouseCellY == i.y and (PC.currentMap.VisibilityStatus(i.x,i.y)) == 2: itemLines.append(descriptFont.render(i.Description(), True, (255,255,255,255))) if (len(itemLines) > 0): widthNeeded = max(l.get_width() for l in itemLines) + 6 heightNeeded = 3 * len(itemLines) + sum(l.get_height() for l in itemLines) pygame.draw.rect(surface, pygcurse.colornames['green'], pygame.Rect(min(mousePos[0], surface.get_width()-widthNeeded), top, (widthNeeded), (heightNeeded))) curY = 0 for i in itemLines: surface.blit(i, (min(mousePos[0], surface.get_width() - widthNeeded) + 3, top + curY)) curY += i.get_height() + spacing # Draw Screen screen.blit(surface,(0,0)) pygame.display.update() pygame.display.flip() #If Lachlan is dead, you win if (EndBoss.Victory() != 0): running = False #If Character is dead, you lose if (PC.dead()): tutorial.TriggerMessage(TUTORIAL_DEATH) DialogOnlyLoop(dialog, surface) running = False if EndBoss.Victory() != 0: WinGame(EndBoss.Victory(), win) elif PC.dead(): LoseGame(win) tutorial.close() pygame.quit() sys.exit()
[]
[]
[]
[]
[]
python
null
null
null
p2p/host/hostv2/hostv2.go
package hostv2 import ( "context" "fmt" "os" "sync" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/p2p" libp2p "github.com/libp2p/go-libp2p" libp2p_crypto "github.com/libp2p/go-libp2p-crypto" libp2p_host "github.com/libp2p/go-libp2p-host" libp2p_peer "github.com/libp2p/go-libp2p-peer" libp2p_peerstore "github.com/libp2p/go-libp2p-peerstore" libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/rs/zerolog" ) const ( // BatchSizeInByte The batch size in byte (64MB) in which we return data BatchSizeInByte = 1 << 16 // ProtocolID The ID of protocol used in stream handling. ProtocolID = "/harmony/0.0.1" // Constants for discovery service. //numIncoming = 128 //numOutgoing = 16 ) // topicHandle is a pubsub topic handle. type topicHandle interface { Publish(ctx context.Context, data []byte) error Subscribe() (subscription, error) } type topicHandleImpl struct { t *libp2p_pubsub.Topic } func (th topicHandleImpl) Publish(ctx context.Context, data []byte) error { return th.t.Publish(ctx, data) } func (th topicHandleImpl) Subscribe() (subscription, error) { return th.t.Subscribe() } type topicJoiner interface { JoinTopic(topic string) (topicHandle, error) } type topicJoinerImpl struct { pubsub *libp2p_pubsub.PubSub } func (tj topicJoinerImpl) JoinTopic(topic string) (topicHandle, error) { th, err := tj.pubsub.Join(topic) if err != nil { return nil, err } return topicHandleImpl{th}, nil } // HostV2 is the version 2 p2p host type HostV2 struct { h libp2p_host.Host joiner topicJoiner joined map[string]topicHandle self p2p.Peer priKey libp2p_crypto.PrivKey lock sync.Mutex //incomingPeers []p2p.Peer // list of incoming Peers. TODO: fixed number incoming //outgoingPeers []p2p.Peer // list of outgoing Peers. TODO: fixed number of outgoing // logger logger *zerolog.Logger } func (host *HostV2) getTopic(topic string) (topicHandle, error) { host.lock.Lock() defer host.lock.Unlock() if t, ok := host.joined[topic]; ok { return t, nil } else if t, err := host.joiner.JoinTopic(topic); err != nil { return nil, errors.Wrapf(err, "cannot join pubsub topic %x", topic) } else { host.joined[topic] = t return t, nil } } // SendMessageToGroups sends a message to one or more multicast groups. // It returns a nil error if and only if it has succeeded to schedule the given // message for sending. func (host *HostV2) SendMessageToGroups(groups []nodeconfig.GroupID, msg []byte) (err error) { for _, group := range groups { t, e := host.getTopic(string(group)) if e != nil { err = e continue } e = t.Publish(context.Background(), msg) if e != nil { err = e continue } } return err } // subscription captures the subscription interface we expect from libp2p. type subscription interface { Next(ctx context.Context) (*libp2p_pubsub.Message, error) Cancel() } // GroupReceiverImpl is a multicast group receiver implementation. type GroupReceiverImpl struct { sub subscription } // Close closes this receiver. func (r *GroupReceiverImpl) Close() error { r.sub.Cancel() r.sub = nil return nil } // Receive receives a message. func (r *GroupReceiverImpl) Receive(ctx context.Context) ( msg []byte, sender libp2p_peer.ID, err error, ) { if r.sub == nil { return nil, libp2p_peer.ID(""), fmt.Errorf("GroupReceiver has been closed") } m, err := r.sub.Next(ctx) if err == nil { msg = m.Data sender = libp2p_peer.ID(m.From) } return msg, sender, err } // GroupReceiver returns a receiver of messages sent to a multicast group. // See the GroupReceiver interface for details. func (host *HostV2) GroupReceiver(group nodeconfig.GroupID) ( receiver p2p.GroupReceiver, err error, ) { top := string(group) t, err := host.getTopic(top) if err != nil { return nil, err } sub, err := t.Subscribe() if err != nil { return nil, errors.Wrapf(err, "cannot subscribe to topic %x", group) } return &GroupReceiverImpl{sub: sub}, nil } // AddPeer add p2p.Peer into Peerstore func (host *HostV2) AddPeer(p *p2p.Peer) error { if p.PeerID != "" && len(p.Addrs) != 0 { host.Peerstore().AddAddrs(p.PeerID, p.Addrs, libp2p_peerstore.PermanentAddrTTL) return nil } if p.PeerID == "" { host.logger.Error().Msg("AddPeer PeerID is EMPTY") return fmt.Errorf("AddPeer error: peerID is empty") } // reconstruct the multiaddress based on ip/port // PeerID has to be known for the ip/port addr := fmt.Sprintf("/ip4/%s/tcp/%s", p.IP, p.Port) targetAddr, err := ma.NewMultiaddr(addr) if err != nil { host.logger.Error().Err(err).Msg("AddPeer NewMultiaddr error") return err } p.Addrs = append(p.Addrs, targetAddr) host.Peerstore().AddAddrs(p.PeerID, p.Addrs, libp2p_peerstore.PermanentAddrTTL) host.logger.Info().Interface("peer", *p).Msg("AddPeer add to libp2p_peerstore") return nil } //// AddIncomingPeer add peer to incoming peer list //func (host *HostV2) AddIncomingPeer(peer p2p.Peer) { // host.incomingPeers = append(host.incomingPeers, peer) //} // //// AddOutgoingPeer add peer to outgoing peer list //func (host *HostV2) AddOutgoingPeer(peer p2p.Peer) { // host.outgoingPeers = append(host.outgoingPeers, peer) //} // Peerstore returns the peer store func (host *HostV2) Peerstore() libp2p_peerstore.Peerstore { return host.h.Peerstore() } // New creates a host for p2p communication func New(self *p2p.Peer, priKey libp2p_crypto.PrivKey) (*HostV2, error) { // TODO: Convert to zerolog or internal logger interface listenAddr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", self.Port)) if err != nil { return nil, errors.Wrapf(err, "cannot create listen multiaddr from port %#v", self.Port) } // TODO – use WithCancel for orderly host teardown (which we don't have yet) ctx := context.Background() p2pHost, err := libp2p.New(ctx, libp2p.ListenAddrs(listenAddr), libp2p.Identity(priKey), ) if err != nil { return nil, errors.Wrapf(err, "cannot initialize libp2p host") } traceFile := os.Getenv("P2P_TRACEFILE") // TODO first starting with some huge number to see update of libp2p // and also to dump some values about the p2p message sizes // 3MB const MaxSize = 3_145_728 options := []libp2p_pubsub.Option{ libp2p_pubsub.WithPeerOutboundQueueSize(64), libp2p_pubsub.WithMaxMessageSize(MaxSize), } if len(traceFile) > 0 { tracer, _ := libp2p_pubsub.NewJSONTracer(traceFile) options = append(options, libp2p_pubsub.WithEventTracer(tracer)) } pubsub, err := libp2p_pubsub.NewGossipSub(ctx, p2pHost, options...) if err != nil { return nil, errors.Wrapf(err, "cannot initialize libp2p pubsub") } self.PeerID = p2pHost.ID() subLogger := utils.Logger().With().Str("hostID", p2pHost.ID().Pretty()).Logger() // has to save the private key for host h := &HostV2{ h: p2pHost, joiner: topicJoinerImpl{pubsub}, joined: map[string]topicHandle{}, self: *self, priKey: priKey, logger: &subLogger, } h.logger.Debug(). Str("port", self.Port). Str("id", p2pHost.ID().Pretty()). Str("addr", listenAddr.String()). Str("PubKey", self.ConsensusPubKey.SerializeToHexStr()). Msg("HostV2 is up!") return h, nil } // GetID returns ID.Pretty func (host *HostV2) GetID() libp2p_peer.ID { return host.h.ID() } // GetSelfPeer gets self peer func (host *HostV2) GetSelfPeer() p2p.Peer { return host.self } // Close closes the host func (host *HostV2) Close() error { return host.h.Close() } // GetP2PHost returns the p2p.Host func (host *HostV2) GetP2PHost() libp2p_host.Host { return host.h } // GetPeerCount ... func (host *HostV2) GetPeerCount() int { return host.h.Peerstore().Peers().Len() } // ConnectHostPeer connects to peer host func (host *HostV2) ConnectHostPeer(peer p2p.Peer) { ctx := context.Background() addr := fmt.Sprintf("/ip4/%s/tcp/%s/ipfs/%s", peer.IP, peer.Port, peer.PeerID.Pretty()) peerAddr, err := ma.NewMultiaddr(addr) if err != nil { host.logger.Error().Err(err).Interface("peer", peer).Msg("ConnectHostPeer") return } peerInfo, err := libp2p_peerstore.InfoFromP2pAddr(peerAddr) if err != nil { host.logger.Error().Err(err).Interface("peer", peer).Msg("ConnectHostPeer") return } if err := host.h.Connect(ctx, *peerInfo); err != nil { host.logger.Warn().Err(err).Interface("peer", peer).Msg("can't connect to peer") } else { host.logger.Info().Interface("node", *peerInfo).Msg("connected to peer host") } }
[ "\"P2P_TRACEFILE\"" ]
[]
[ "P2P_TRACEFILE" ]
[]
["P2P_TRACEFILE"]
go
1
0
web_app.py
#!/usr/bin/python from apscheduler.schedulers.background import BackgroundScheduler from datetime import datetime, timedelta import ConfigParser import ibmiotf.application import json import os import requests import time class MonitorApplication: DEFAULT_TAP_SIZE = 5.0 DEFAULT_ORDER_AMOUNT = 31.0 DEFAULT_MAX_STORAGE = 310.0 DEFAULT_DAYS_TO_ORDER = 1 UPDATE_CONFIG_PATH = 'config/data/current.cfg' def __init__(self, monitor_config, iot_config): self.iot_config = iot_config self.monitor_config = monitor_config self.configure_monitor(monitor_config) self.configure_iot(iot_config) self.configure_cloudant() self.configure_scheduler() def __exit__(self, exc_type, exc_value, traceback): self.disconnect() def configure_monitor(self, config): parser = ConfigParser.ConfigParser() with open(config) as f: parser.readfp(f) tap_size = parser.get('monitor', 'tap_size') order_amount = parser.get('monitor', 'order_amount') max_storage = parser.get('monitor', 'max_storage') days_to_order = parser.get('monitor', 'days_to_order') if tap_size is None or len(tap_size) == 0: tap_size = MonitorApplication.DEFAULT_TAP_SIZE if order_amount is None or len(order_amount) == 0: order_amount = MonitorApplication.DEFAULT_ORDER_AMOUNT if max_storage is None or len(max_storage) == 0: max_storage = MonitorApplication.DEFAULT_MAX_STORAGE if days_to_order is None or len(days_to_order) == 0: days_to_order = MonitorApplication.DEFAULT_DAYS_TO_ORDER self.monitor = Monitor(float(tap_size), float(order_amount), float(max_storage), float(days_to_order)) while parser.has_section('beverage' + str(len(self.monitor.beverages) + 1)): section = 'beverage' + str(len(self.monitor.beverages) + 1) name = parser.get(section, 'name') tap = parser.get(section, 'tap') storage = parser.get(section, 'storage') total_dispensed = parser.get(section, 'total_dispensed') days_dispensed = parser.get(section, 'days_dispensed') last_order = parser.get(section, 'last_order') auto_update = parser.get(section, 'auto_update') if name is None or len(name) == 0: name = 'Beverage ' + str(len(self.monitor.beverages) + 1) if tap is None or len(tap) == 0: tap = 0.0 if storage is None or len(storage) == 0: storage = 0.0 if total_dispensed is None or len(total_dispensed) == 0: total_dispensed = tap_size if days_dispensed is None or len(days_dispensed) == 0: days_dispensed = 1 if last_order is None or len(last_order) == 0: last_order = 0 if auto_update is None or len(auto_update) == 0: auto_update = False self.monitor.add_beverage(Beverage(name, float(tap), float(storage), float(total_dispensed), int(days_dispensed), long(last_order), auto_update == 'True')) def configure_iot(self, config): options = ibmiotf.application.ParseConfigFile(config) self.client = ibmiotf.application.Client(options) self.client.connect() self.client.deviceEventCallback = self.event_callback self.client.subscribeToDeviceEvents(event='dispensed') self.client.subscribeToDeviceEvents(event='refill') self.client.subscribeToDeviceEvents(event='online') self.client.subscribeToDeviceEvents(event='pouring') self.client.subscribeToDeviceEvents(event='startup') def configure_cloudant(self): self.cloudant = CloudantConnector('beverage_dispense') def configure_scheduler(self): self.sched = BackgroundScheduler() next_date = datetime.now() + timedelta(days=1) run_time = datetime(next_date.year, next_date.month, next_date.day, 0, 0, 0, 0) self.sched.add_job(self.update_event, 'date', run_date=run_time) self.sched.start() def update_event(self): for index in range(len(self.monitor.beverages)): beverage = self.monitor.get_beverage(index) if beverage.auto_update: self.update_beverage_analysis(index) self.sched.shutdown(wait=False) self.configure_scheduler() def event_callback(self, command): if command.event == 'startup': data = {'beverages': self.get_all_beverages()} self.send_command('status', 'info', data) else: data = json.loads(command.payload) index = int(data['beverage']) if command.event == 'dispensed': dispensed_amount = float(data['amount']) self.monitor.dispense_beverage(index, dispensed_amount) if self.monitor.order_status(index): self.publish_order(index) elif command.event == 'refill': self.monitor.refill_beverage(index) elif command.event == 'online': status = data['state'] self.monitor.toggle_online(index, status) info = {'beverages': self.get_all_beverages()} self.send_command('status', 'info', info) elif command.event == 'pouring': status = data['state'] self.monitor.toggle_pouring(index, status) self.publish_beverage(index) def publish_order(self, index): beverage = self.monitor.get_beverage(index) data = {'order_time': beverage.last_order} self.publish(index, 'order', data) def publish_beverage(self, index): data = self.get_beverage_data(index) self.publish(index, 'log', data) self.update_config(index) def publish(self, index, event, data): data['beverage'] = index self.client.publishEvent('Webpage', 'web', event, 'json', data) def send_command(self, deviceId, command, data): self.client.publishCommand('RaspberryPi', deviceId, command, 'json', data) def toggle_device_connection(self, index, command): device = 'bev' + str(index + 1) data = {'beverage': index} self.send_command(device, command, data) def update_config(self, index): beverage = self.monitor.get_beverage(index) parser = ConfigParser.ConfigParser() if os.path.exists(MonitorApplication.UPDATE_CONFIG_PATH): with open(MonitorApplication.UPDATE_CONFIG_PATH) as f: parser.readfp(f) os.remove(MonitorApplication.UPDATE_CONFIG_PATH) else: with open(self.monitor_config) as f: parser.readfp(f) config = open(MonitorApplication.UPDATE_CONFIG_PATH, 'w') section = 'beverage' + str(index + 1) parser.set(section, 'name', beverage.name) parser.set(section, 'tap', beverage.tap) parser.set(section, 'storage', beverage.storage) parser.set(section, 'total_dispensed', beverage.total_dispensed) parser.set(section, 'days_dispensed', beverage.days_dispensed) parser.set(section, 'auto_update', beverage.auto_update) if beverage.last_order != 0: parser.set(section, 'last_order', beverage.last_order) parser.write(config) config.close() def post_daily_total(self, index): beverage = self.monitor.get_beverage(index) data = { 'beverage': index + 1, 'date': int(time.time() * 1000), 'amount_dispensed': beverage.daily_total } self.cloudant.post_json(data) def get_weekly_totals(self, index): beverage = self.monitor.get_beverage(index) week_info = self.cloudant.get_data('by-bev' + str(index + 1), True, 7) data = { 'total_dispensed': beverage.total_dispensed, 'days_dispensed': beverage.days_dispensed, 'auto_update': beverage.auto_update, 'day': beverage.daily_total, 'week': week_info } return data def get_all_beverages(self): beverages = [] for index in range(len(self.monitor.beverages)): beverages.append(self.get_beverage_data(index)) return beverages def get_beverage_data(self, index): beverage = self.monitor.get_beverage(index) data = { 'name': beverage.name, 'tap': beverage.tap, 'storage': beverage.storage, 'days_left': beverage.storage * beverage.days_dispensed / beverage.total_dispensed, 'last_order': beverage.last_order, 'online': beverage.online, 'pouring': beverage.pouring, 'auto_update': beverage.auto_update } return data def get_system_info(self): data = { 'min_tap_size': self.monitor.MIN_TAP_SIZE, 'min_storage_size': self.monitor.MIN_STORAGE_SIZE, 'tap_size': self.monitor.tap_size, 'max_storage': self.monitor.max_storage, 'order_amount': self.monitor.order_amount, 'days_to_order': self.monitor.days_to_order } return data def update_beverage(self, index, data): if data['name'] is not None and len(data['name']) != 0: name = data['name'] self.monitor.update_name(index, name) if data['tap'] is not None and len(data['tap']) != 0: tap = float(data['tap']) self.monitor.update_tap(index, tap) if data['storage'] is not None and len(data['storage']) != 0: storage = float(data['storage']) tap = self.monitor.get_beverage(index).tap self.monitor.update_storage(index, max(tap, storage)) if self.monitor.order_status(index): self.publish_order(index) if data['average_dispensed'] is not None and len(data['average_dispensed']) != 0: average_dispensed = float(data['average_dispensed']) self.monitor.update_average_dispensed(index, average_dispensed) self.publish_beverage(index) def update_system(self, data): if data['tap_size'] is not None and len(data['tap_size']) != 0: tap_size = float(data['tap_size']) self.monitor.update_tap_size(tap_size) if data['order_amount'] is not None and len(data['order_amount']) != 0: order_amount = float(data['order_amount']) self.monitor.update_order_amount(order_amount) if data['max_storage'] is not None and len(data['max_storage']) != 0: max_storage = float(data['max_storage']) self.monitor.update_max_storage(max_storage) if data['days_to_order'] is not None and len(data['days_to_order']) != 0: days_to_order = float(data['days_to_order']) self.monitor.update_days_to_order(days_to_order) for index in range(len(self.monitor.beverages)): if self.monitor.order_status(index): self.publish_order(index) def update_order_analysis(self): for index in range(len(self.monitor.beverages)): self.update_beverage_analysis(index) time.sleep(0.2) def update_beverage_analysis(self, index): self.post_daily_total(index) self.monitor.reset_total_dispensed(index) self.publish_beverage(index) def switch_auto_update(self, index, status): self.monitor.toggle_auto_update(index, status) self.publish_beverage(index) def disconnect(self): if hasattr(self, 'client'): self.client.disconnect() if hasattr(self, 'sched'): self.sched.shutdown() class Monitor: MIN_TAP_SIZE = 1.0 MIN_STORAGE_SIZE = 1.0 def __init__(self, tap_size, order_amount, max_storage, days_to_order): self.tap_size = tap_size self.order_amount = order_amount self.max_storage = max_storage self.days_to_order = days_to_order self.beverages = [] def add_beverage(self, beverage): self.beverages.append(beverage) def get_beverage(self, index): return self.beverages[index] def update_name(self, index, name): beverage = self.get_beverage(index) beverage.name = name def update_tap(self, index, tap): beverage = self.get_beverage(index) beverage.tap = tap def update_storage(self, index, storage): beverage = self.get_beverage(index) beverage.storage = storage def update_average_dispensed(self, index, average_dispensed): beverage = self.get_beverage(index) beverage.total_dispensed = average_dispensed beverage.days_dispensed = 1 def update_tap_size(self, tap_size): self.tap_size = tap_size for beverage in self.beverages: if beverage.tap > tap_size: beverage.tap = tap_size def update_order_amount(self, order_amount): self.order_amount = order_amount def update_max_storage(self, max_storage): self.max_storage = max_storage def update_days_to_order(self, days_to_order): self.days_to_order = days_to_order def reset_total_dispensed(self, index): beverage = self.get_beverage(index) beverage.total_dispensed += beverage.daily_total beverage.days_dispensed += 1 beverage.daily_total = 0.0 def refill_beverage(self, index): beverage = self.get_beverage(index) if beverage.storage < self.tap_size: beverage.tap = beverage.storage else: beverage.tap = self.tap_size def dispense_beverage(self, index, dispensed_amount): beverage = self.get_beverage(index) beverage.tap -= dispensed_amount beverage.storage -= dispensed_amount if beverage.tap < 0: beverage.tap = 0.0 if beverage.storage < 0: beverage.storage = 0.0 beverage.daily_total += dispensed_amount def order_status(self, index): beverage = self.get_beverage(index) days_left = beverage.storage * beverage.days_dispensed / beverage.total_dispensed if days_left <= self.days_to_order: self.make_order(index) return True else: return False def make_order(self, index): beverage = self.get_beverage(index) beverage.storage += self.order_amount beverage.last_order = int(time.time() * 1000) def toggle_online(self, index, status): beverage = self.get_beverage(index) if status: beverage.online = True else: beverage.online = False def toggle_pouring(self, index, status): beverage = self.get_beverage(index) if status: beverage.pouring = True else: beverage.pouring = False def toggle_auto_update(self, index, status): beverage = self.get_beverage(index) beverage.auto_update = status class Beverage: def __init__(self, name, tap, storage, total_dispensed, days_dispensed, last_order, auto_update): self.name = name self.tap = tap self.storage = storage self.total_dispensed = total_dispensed self.days_dispensed = days_dispensed self.last_order = last_order self.auto_update = auto_update self.daily_total = 0.0 self.online = False self.pouring = False class CloudantConnector: def __init__(self, database): # get credentials for cloudant database if 'VCAP_SERVICES' in os.environ: # credentials are given by bluemix environment vcap = json.loads(os.getenv('VCAP_SERVICES')) if 'cloudantNoSQLDB' in vcap: creds = vcap['cloudantNoSQLDB'][0]['credentials'] self.username = creds['username'] self.password = creds['password'] host = creds['host'] elif os.path.isfile('config/bluemix/vcap-local.json'): # credentials are found locally # vcap/vcap-local.json with open('config/bluemix/vcap-local.json') as f: vcap = json.load(f) creds = vcap['cloudantNoSQLDB'][0]['credentials'] self.username = creds['username'] self.password = creds['password'] host = creds['host'] self.url = 'https://' + host + '/' + database def get_data(self, view, descending, limit): view_url = self.url + '/_design/data/_view/' + view args = {'descending': descending, 'limit': limit} response = requests.get(view_url, params=args, auth=(self.username, self.password)) if 'rows' in response.json(): return response.json()['rows'] else: return '' def post_json(self, data): return requests.post(self.url, json=data, auth=(self.username, self.password))
[]
[]
[ "VCAP_SERVICES" ]
[]
["VCAP_SERVICES"]
python
1
0
vkubelet/vkubelet.go
package vkubelet import ( "fmt" "log" "os" "os/signal" "strconv" "strings" "syscall" "time" "github.com/virtual-kubelet/virtual-kubelet/manager" "github.com/virtual-kubelet/virtual-kubelet/providers/azure" "github.com/virtual-kubelet/virtual-kubelet/providers/hypersh" "github.com/virtual-kubelet/virtual-kubelet/providers/web" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) // Server masquarades itself as a kubelet and allows for the virtual node to be backed by non-vm/node providers. type Server struct { nodeName string namespace string k8sClient *kubernetes.Clientset taint string provider Provider podWatcher watch.Interface resourceManager *manager.ResourceManager } // New creates a new virtual-kubelet server. func New(nodeName, operatingSystem, namespace, kubeConfig, taint, provider, providerConfig string) (*Server, error) { var ( config *rest.Config err error ) // Check if the kubeConfig file exists. if _, err := os.Stat(kubeConfig); !os.IsNotExist(err) { // Get the kubeconfig from the filepath. config, err = clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { return nil, err } } else { // Set to in-cluster config. config, err = rest.InClusterConfig() if err != nil { return nil, err } } clientset, err := kubernetes.NewForConfig(config) if err != nil { return nil, err } rm := manager.NewResourceManager(clientset) daemonEndpointPortEnv := os.Getenv("KUBELET_PORT") if daemonEndpointPortEnv == "" { daemonEndpointPortEnv = "10250" } i64value, err := strconv.ParseInt(daemonEndpointPortEnv, 10, 32) daemonEndpointPort := int32(i64value) var p Provider switch provider { case "azure": internalIP := os.Getenv("VKUBELET_POD_IP") if err != nil { return nil, err } p, err = azure.NewACIProvider(providerConfig, rm, nodeName, operatingSystem, internalIP, daemonEndpointPort) if err != nil { return nil, err } case "hyper": p, err = hypersh.NewHyperProvider(providerConfig, rm, nodeName, operatingSystem) if err != nil { return nil, err } case "web": p, err = web.NewBrokerProvider(nodeName, operatingSystem, daemonEndpointPort) if err != nil { return nil, err } default: fmt.Printf("Provider '%s' is not supported\n", provider) } s := &Server{ namespace: namespace, nodeName: nodeName, taint: taint, k8sClient: clientset, resourceManager: rm, provider: p, } if err = s.registerNode(); err != nil { return s, err } go ApiserverStart(p) tick := time.Tick(5 * time.Second) go func() { for range tick { s.updateNode() s.updatePodStatuses() } }() return s, nil } // registerNode registers this virtual node with the Kubernetes API. func (s *Server) registerNode() error { taints := make([]corev1.Taint, 0) if s.taint != "" { taints = append(taints, corev1.Taint{ Key: s.taint, Effect: corev1.TaintEffectNoSchedule, }) } node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: s.nodeName, Labels: map[string]string{ "type": "virtual-kubelet", "kubernetes.io/role": "agent", "beta.kubernetes.io/os": strings.ToLower(s.provider.OperatingSystem()), }, Annotations: map[string]string{ "alpha.service-controller.kubernetes.io/exclude-balancer": "true", }, }, Spec: corev1.NodeSpec{ Taints: taints, }, Status: corev1.NodeStatus{ NodeInfo: corev1.NodeSystemInfo{ OperatingSystem: s.provider.OperatingSystem(), Architecture: "amd64", KubeletVersion: "v1.8.3", }, Capacity: s.provider.Capacity(), Allocatable: s.provider.Capacity(), Conditions: s.provider.NodeConditions(), Addresses: s.provider.NodeAddresses(), DaemonEndpoints: *s.provider.NodeDaemonEndpoints(), }, } if _, err := s.k8sClient.CoreV1().Nodes().Create(node); err != nil && !errors.IsAlreadyExists(err) { return err } log.Printf("Node '%s' with OS type '%s' registered\n", node.Name, node.Status.NodeInfo.OperatingSystem) return nil } // Run starts the server, registers it with Kubernetes and begins watching/reconciling the cluster. // Run will block until Stop is called or a SIGINT or SIGTERM signal is received. func (s *Server) Run() error { sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) go func() { <-sig s.Stop() }() opts := metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", s.nodeName).String(), } pods, err := s.k8sClient.CoreV1().Pods(s.namespace).List(opts) if err != nil { log.Fatal(err) } s.resourceManager.SetPods(pods) s.reconcile() opts.ResourceVersion = pods.ResourceVersion s.podWatcher, err = s.k8sClient.CoreV1().Pods(s.namespace).Watch(opts) if err != nil { log.Fatal(err) } for { select { case ev, ok := <-s.podWatcher.ResultChan(): if !ok { return nil } switch ev.Type { case watch.Added: s.resourceManager.AddPod(ev.Object.(*corev1.Pod)) case watch.Modified: s.resourceManager.UpdatePod(ev.Object.(*corev1.Pod)) case watch.Deleted: s.resourceManager.DeletePod(ev.Object.(*corev1.Pod)) } s.reconcile() } } } // Stop shutsdown the server. // It does not shutdown pods assigned to the virtual node. func (s *Server) Stop() { if s.podWatcher != nil { s.podWatcher.Stop() } } // updateNode updates the node status within Kubernetes with updated NodeConditions. func (s *Server) updateNode() { opts := metav1.GetOptions{} n, err := s.k8sClient.CoreV1().Nodes().Get(s.nodeName, opts) if err != nil { log.Println("Failed to retrieve node:", err) return } n.ResourceVersion = "" // Blank out resource version to prevent object has been modified error n.Status.Conditions = s.provider.NodeConditions() n, err = s.k8sClient.CoreV1().Nodes().UpdateStatus(n) if err != nil { log.Println("Failed to update node:", err) return } } // reconcile is the main reconiliation loop that compares differences between Kubernetes and the active provider and reconciles the differences. func (s *Server) reconcile() { providerPods, err := s.provider.GetPods() if err != nil { log.Println(err) return } for _, pod := range providerPods { // Delete pods that don't exist in Kubernetes if p := s.resourceManager.GetPod(pod.Name); p == nil { if err := s.deletePod(pod); err != nil { log.Printf("Error deleting pod '%s': %s\n", pod.Name, err) continue } } } // Create any pods for k8s pods that don't exist in the provider pods := s.resourceManager.GetPods() for _, pod := range pods { p, err := s.provider.GetPod(pod.Namespace, pod.Name) if err != nil { log.Printf("Error retrieving pod '%s' from provider: %s\n", pod.Name, err) } if pod.DeletionTimestamp == nil && pod.Status.Phase != corev1.PodFailed && p == nil { if err := s.createPod(pod); err != nil { log.Printf("Error creating pod '%s': %s\n", pod.Name, err) continue } log.Printf("Pod '%s' created.\n", pod.Name) } // Delete pod if DeletionTimestamp set if pod.DeletionTimestamp != nil { var err error if err = s.deletePod(pod); err != nil { log.Printf("Error deleting pod '%s': %s\n", pod.Name, err) continue } } } } func (s *Server) createPod(pod *corev1.Pod) error { if err := s.populateSecretsAndConfigMapsInEnv(pod); err != nil { return err } if origErr := s.provider.CreatePod(pod); origErr != nil { pod.ResourceVersion = "" // Blank out resource version to prevent object has been modified error pod.Status.Phase = corev1.PodFailed pod.Status.Reason = "ProviderFailed" pod.Status.Message = origErr.Error() _, err := s.k8sClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) if err != nil { log.Println("Failed to update pod status:", err) return origErr } return origErr } return nil } func (s *Server) deletePod(pod *corev1.Pod) error { var delErr error if delErr = s.provider.DeletePod(pod); delErr != nil && errors.IsNotFound(delErr) { return fmt.Errorf("Error deleting pod '%s': %s", pod.Name, delErr) } if !errors.IsNotFound(delErr) { var grace int64 = 0 if err := s.k8sClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &grace}); err != nil && errors.IsNotFound(err) { if errors.IsNotFound(err) { return nil } return fmt.Errorf("Failed to delete kubernetes pod: %s", err) } log.Printf("Pod '%s' deleted.\n", pod.Name) } return nil } // updatePodStatuses syncs the providers pod status with the kubernetes pod status. func (s *Server) updatePodStatuses() { // Update all the pods with the provider status. pods := s.resourceManager.GetPods() for _, pod := range pods { if pod.DeletionTimestamp != nil && pod.Status.Phase == corev1.PodSucceeded { continue } status, err := s.provider.GetPodStatus(pod.Namespace, pod.Name) if err != nil { log.Printf("Error retrieving pod '%s' status from provider: %s\n", pod.Name, err) return } // Update the pod's status if status != nil { pod.Status = *status s.k8sClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) } } } // populateSecretsAndConfigMapsInEnv populates Secrets and ConfigMap into environment variables func (s *Server) populateSecretsAndConfigMapsInEnv(pod *corev1.Pod) error { for _, c := range pod.Spec.Containers { for i, e := range c.Env { if e.ValueFrom != nil { // Populate ConfigMaps to Env if e.ValueFrom.ConfigMapKeyRef != nil { vf := e.ValueFrom.ConfigMapKeyRef cm, err := s.resourceManager.GetConfigMap(vf.Name, pod.Namespace) if vf.Optional != nil && !*vf.Optional && errors.IsNotFound(err) { return fmt.Errorf("ConfigMap %s is required by Pod %s and does not exist", vf.Name, pod.Name) } if err != nil { return fmt.Errorf("Error retrieving ConfigMap %s required by Pod %s: %s", vf.Name, pod.Name, err) } var ok bool if c.Env[i].Value, ok = cm.Data[vf.Key]; !ok { return fmt.Errorf("ConfigMap %s key %s is required by Pod %s and does not exist", vf.Name, vf.Key, pod.Name) } continue } // Populate Secrets to Env if e.ValueFrom.SecretKeyRef != nil { vf := e.ValueFrom.SecretKeyRef sec, err := s.resourceManager.GetSecret(vf.Name, pod.Namespace) if vf.Optional != nil && !*vf.Optional && errors.IsNotFound(err) { return fmt.Errorf("Secret %s is required by Pod %s and does not exist", vf.Name, pod.Name) } v, ok := sec.Data[vf.Key] if !ok { return fmt.Errorf("Secret %s key %s is required by Pod %s and does not exist", vf.Name, vf.Key, pod.Name) } c.Env[i].Value = string(v) continue } // TODO: Populate Downward API to Env if e.ValueFrom.FieldRef != nil { continue } // TODO: Populate resource requests if e.ValueFrom.ResourceFieldRef != nil { continue } } } } return nil }
[ "\"KUBELET_PORT\"", "\"VKUBELET_POD_IP\"" ]
[]
[ "VKUBELET_POD_IP", "KUBELET_PORT" ]
[]
["VKUBELET_POD_IP", "KUBELET_PORT"]
go
2
0
src/main/java/eisenwave/nbtpad/Main.java
package eisenwave.nbtpad; import eisenwave.nbt.*; import eisenwave.nbt.io.*; import joptsimple.*; import java.io.File; import java.io.IOException; import java.util.List; public class Main { private static final int EXIT_IOERR = 74; private static boolean compress = true; public static void main(String... args) throws IOException { OptionParser parser = new OptionParser(); parser.nonOptions("the file to print or edit").ofType(File.class).isRequired(); //parser.accepts("a", "the action to perform (print|edit)").withRequiredArg(); parser.accepts("p", "print the NBT file"); parser.accepts("e", "edit the NBT file").withRequiredArg().describedAs("editor"); parser.accepts("r", "read Mojangson file and save as NBT").withRequiredArg().ofType(File.class); parser.accepts("u", "uncompressed mode"); OptionSet options = parser.parse(args); List<?> nonOpt = options.nonOptionArguments(); if (nonOpt.isEmpty()) { parser.printHelpOn(System.out); return; } String path = String.valueOf(nonOpt.get(0)); if (options.has("u")) compress = false; if (options.has("p")) { nbtPrint(path); } else if (options.has("e")) { //String e = options.hasArgument("e")? String.valueOf(options.valueOf("e")) : System.getenv("EDITOR"); String e = (String) options.valueOf("e"); /* if (e == null) { System.err.println("Must either provide editor with '-e' option or set 'EDITOR' in environment"); return; } */ nbtEdit(path, e); } else if (options.has("r")) { File r = (File) options.valueOf("r"); nbtRead(r, path); } else { parser.printHelpOn(System.err); } } private static void nbtPrint(String path) { File file = new File(path); if (!file.exists()) { System.err.println("File doesn't exist: "+file.getPath()); return; } NBTNamedTag rootTag = readNBT(file); if (rootTag == null) { System.err.println("Reading NBT file failed"); return; } try { new MojangsonSerializer(true).toStream(rootTag, System.out); } catch (IOException e) { e.printStackTrace(); } } private static void nbtEdit(String path, String editor) { File file = new File(path); if (!file.exists()) { System.err.println("File doesn't exist: "+file.getPath()); return; } NBTNamedTag sourceNBT = readNBT(file); if (sourceNBT == null) { System.err.println("Reading source-NBT file failed: " + file); return; } File editFile; try { editFile = File.createTempFile("nbtpad", ".mson", null); } catch (IOException e) { e.printStackTrace(); return; } editFile.deleteOnExit(); //System.out.println("Created temporary editing file "+editFile); try { new MojangsonSerializer(true).toFile(sourceNBT, editFile); } catch (IOException ex) { ex.printStackTrace(); System.err.println("Writing temporary Mojangson file failed: " + editFile); return; } if (!openEditor(editor, editFile.getPath())) { System.err.println("Editing temporary Mojangson file failed: " + editFile); return; } NBTNamedTag targetNBT = readMSON(editFile); if (targetNBT == null) { System.err.println("Reading temporary Mojangson file failed: " + editFile); return; } writeNBT(targetNBT, file); } private static void nbtRead(File readFile, String path) { if (!readFile.exists()) { System.err.println("File doesn't exist: "+readFile.getPath()); return; } NBTNamedTag rootTag = readMSON(readFile); if (rootTag == null) { System.err.println("Reading NBT file failed"); return; } writeNBT(rootTag, new File(path)); } private static boolean openEditor(String editor, String path) { ProcessBuilder builder = new ProcessBuilder() .inheritIO() .command(editor, path); try { Process process = builder.start(); process.waitFor(); } catch (IOException | InterruptedException ex) { ex.printStackTrace(); return false; } return true; } private static NBTNamedTag readNBT(File file) { try { return new NBTDeserializer(compress).fromFile(file); } catch (IOException ex) { ex.printStackTrace(); System.exit(EXIT_IOERR); return null; } } private static void writeNBT(NBTNamedTag nbt, File file) { try { new NBTSerializer(compress).toFile(nbt, file); } catch (IOException ex) { ex.printStackTrace(); System.exit(EXIT_IOERR); } } private static NBTNamedTag readMSON(File file) { try { return new MojangsonDeserializer().fromFile(file); } catch (IOException ex) { ex.printStackTrace(); System.exit(EXIT_IOERR); return null; } } }
[ "\"EDITOR\"" ]
[]
[ "EDITOR" ]
[]
["EDITOR"]
java
1
0
share/qt/extract_strings_qt.py
#!/usr/bin/env python # Copyright (c) 2012-2016 The Bitcoin Core developers # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE import operator import os import sys OUT_CPP="qt/mulecoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') if not XGETTEXT: print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr) print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr) sys.exit(1) child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out.decode('utf-8')) f = open(OUT_CPP, 'w', encoding="utf8") f.write(""" #include <QtGlobal> // Automatically generated by extract_strings_qt.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *mulecoin_strings[] = {\n') f.write('QT_TRANSLATE_NOOP("mulecoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),)) f.write('QT_TRANSLATE_NOOP("mulecoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),)) if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'): f.write('QT_TRANSLATE_NOOP("mulecoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),)) messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("mulecoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
[]
[]
[ "COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION" ]
[]
["COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION"]
python
4
0
test/base.go
package test import ( fs "github.com/crawlab-team/crawlab-fs" "os" "testing" "time" ) func init() { var err error T, err = NewTest() if err != nil { panic(err) } } var T *Test type Test struct { m fs.Manager } func (t *Test) Setup(t2 *testing.T) { t.Cleanup() t2.Cleanup(t.Cleanup) } func (t *Test) Cleanup() { _ = T.m.DeleteDir("/test") // wait to avoid caching time.Sleep(200 * time.Millisecond) } func NewTest() (res *Test, err error) { // test t := &Test{} // filer url filerUrl := os.Getenv("CRAWLAB_FILER_URL") if filerUrl == "" { filerUrl = "http://localhost:8888" } // manager t.m, err = fs.NewSeaweedFsManager( fs.WithFilerUrl(filerUrl), fs.WithTimeout(10*time.Second), ) if err != nil { return nil, err } return t, nil }
[ "\"CRAWLAB_FILER_URL\"" ]
[]
[ "CRAWLAB_FILER_URL" ]
[]
["CRAWLAB_FILER_URL"]
go
1
0
registry/server/tag_cleanup.go
package server import ( "context" "fmt" "log" "regexp" "strconv" "time" ) var TagMarkTimeKey = "tagMarkedForDeletionTime" var topicRegex = regexp.MustCompile(".*") type TagCleaner struct { running bool } // RunTagCleanup is regularly checks for tags that are stale and need clean up. func (tc *TagCleaner) RunTagCleanup(s *Server, ctx context.Context, c Config) { tc.running = true // Interval timer. t := time.NewTicker(time.Duration(c.TagCleanupFrequencyMinutes) * time.Second) defer t.Stop() for tc.running { <-t.C err := s.MarkForDeletion(time.Now) if err != nil { log.Println(err) continue } s.DeleteStaleTags(time.Now, c) } } // MarkForDeletion marks stored tags that have been stranded without an associated kafka resource. func (s *Server) MarkForDeletion(now func() time.Time) error { markTimeMinutes := fmt.Sprint(now().Unix()) // Get all brokers from ZK. brokers, errs := s.ZK.GetAllBrokerMeta(false) if errs != nil { return ErrFetchingBrokers } // Get all topics from ZK topics, err := s.ZK.GetTopics([]*regexp.Regexp{topicRegex}) topicSet := TopicSetFromSlice(topics) if err != nil { return ErrFetchingTopics } allTags, err := s.Tags.Store.GetAllTags() if err != nil { return err } // Add a marker tag with timestamp to any dangling tagset whose associated kafka resource no longer exists. for kafkaObject, tagSet := range allTags { switch kafkaObject.Type { case "broker": brokerId, err := strconv.Atoi(kafkaObject.ID) if err != nil { log.Println(fmt.Printf("Found non int broker ID %s in tag cleanup", kafkaObject.ID)) } if _, exists := brokers[brokerId]; !exists { tagSet[TagMarkTimeKey] = markTimeMinutes } else { delete(tagSet, TagMarkTimeKey) } case "topic": if _, exists := topicSet[kafkaObject.ID]; !exists { tagSet[TagMarkTimeKey] = markTimeMinutes } else { delete(tagSet, TagMarkTimeKey) } } err := s.Tags.Store.SetTags(kafkaObject, tagSet) // Persist any changes if err != nil { return err } } return nil } // DeleteStaleTags deletes any tags that have not had a kafka resource associated with them. func (s *Server) DeleteStaleTags(now func() time.Time, c Config) { sweepTime := now().Unix() allTags, _ := s.Tags.Store.GetAllTags() for kafkaObject, tags := range allTags { markTag, exists := tags[TagMarkTimeKey] if !exists { continue } markTime, err := strconv.Atoi(markTag) if err != nil { log.Printf("Found non timestamp tag %s in stale tag marker\n", markTag) } if sweepTime-int64(markTime) > int64(c.TagAllowedStalenessMinutes*60) { keys := make([]string, len(tags)) i := 0 for k := range tags { keys[i] = k i++ } s.Tags.Store.DeleteTags(kafkaObject, keys) } } } // TopicSetFromSlice converts a slice into a TopicSet for convenience func TopicSetFromSlice(s []string) TopicSet { var ts = TopicSet{} for _, t := range s { ts[t] = nil } return ts }
[]
[]
[]
[]
[]
go
null
null
null
vendor/k8s.io/kubernetes/test/e2e/load.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "fmt" "math" "math/rand" "net" "net/http" "os" "strconv" "sync" "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/transport" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime/schema" "k8s.io/kubernetes/pkg/util/intstr" utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) const ( smallGroupSize = 5 mediumGroupSize = 30 bigGroupSize = 250 smallGroupName = "load-small" mediumGroupName = "load-medium" bigGroupName = "load-big" smallGroupBatchSize = 30 mediumGroupBatchSize = 5 bigGroupBatchSize = 1 // We start RCs/Services/pods/... in different namespace in this test. // nodeCountPerNamespace determines how many namespaces we will be using // depending on the number of nodes in the underlying cluster. nodeCountPerNamespace = 100 ) // This test suite can take a long time to run, so by default it is added to // the ginkgo.skip list (see driver.go). // To run this suite you must explicitly ask for it by setting the // -t/--test flag or ginkgo.focus flag. var _ = framework.KubeDescribe("Load capacity", func() { var clientset clientset.Interface var nodeCount int var ns string var configs []testutils.RunObjectConfig var secretConfigs []*testutils.SecretConfig // Gathers metrics before teardown // TODO add flag that allows to skip cleanup on failure AfterEach(func() { // Verify latency metrics highLatencyRequests, err := framework.HighLatencyRequests(clientset) framework.ExpectNoError(err, "Too many instances metrics above the threshold") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) // We assume a default throughput of 10 pods/second throughput. // We may want to revisit it in the future. // However, this can be overriden by LOAD_TEST_THROUGHPUT env var. throughput := 10 if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" { if newThroughput, err := strconv.Atoi(throughputEnv); err == nil { throughput = newThroughput } } // Explicitly put here, to delete namespace at the end of the test // (after measuring latency metrics, etc.). options := framework.FrameworkOptions{ ClientQPS: float32(math.Max(50.0, float64(2*throughput))), ClientBurst: int(math.Max(100.0, float64(4*throughput))), } f := framework.NewFramework("load", options, nil) f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { clientset = f.ClientSet ns = f.Namespace.Name nodes := framework.GetReadySchedulableNodesOrDie(clientset) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. err := framework.CheckTestingNSDeletedExcept(clientset, ns) framework.ExpectNoError(err) framework.ExpectNoError(framework.ResetMetrics(clientset)) }) type Load struct { podsPerNode int image string command []string // What kind of resource we want to create kind schema.GroupKind services bool secretsPerPod int daemonsPerNode int } loadTests := []Load{ // The container will consume 1 cpu and 512mb of memory. {podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")}, {podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController")}, } for _, testArg := range loadTests { feature := "ManualPerformance" if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") { feature = "Performance" } name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets", feature, testArg.podsPerNode, testArg.kind, testArg.secretsPerPod) itArg := testArg itArg.services = os.Getenv("CREATE_SERVICES") == "true" It(name, func() { // Create a number of namespaces. namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode)) framework.ExpectNoError(err) totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount configs, secretConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod) if itArg.services { framework.Logf("Creating services") services := generateServicesForConfigs(configs) for _, service := range services { _, err := clientset.Core().Services(service.Namespace).Create(service) framework.ExpectNoError(err) } framework.Logf("%v Services created.", len(services)) defer func(services []*v1.Service) { framework.Logf("Starting to delete services...") for _, service := range services { err := clientset.Core().Services(service.Namespace).Delete(service.Name, nil) framework.ExpectNoError(err) } framework.Logf("Services deleted") }(services) } else { framework.Logf("Skipping service creation") } // Create all secrets for i := range secretConfigs { secretConfigs[i].Run() defer secretConfigs[i].Stop() } // StartDeamon if needed for i := 0; i < itArg.daemonsPerNode; i++ { daemonName := fmt.Sprintf("load-daemon-%v", i) daemonConfig := &testutils.DaemonConfig{ Client: f.ClientSet, Name: daemonName, Namespace: f.Namespace.Name, LogFunc: framework.Logf, } daemonConfig.Run() defer func(config *testutils.DaemonConfig) { framework.ExpectNoError(framework.DeleteResourceAndPods( f.ClientSet, f.InternalClientset, extensions.Kind("DaemonSet"), config.Namespace, config.Name, )) }(daemonConfig) } // Simulate lifetime of RC: // * create with initial size // * scale RC to a random size and list all pods // * scale RC to a random size and list all pods // * delete it // // This will generate ~5 creations/deletions per second assuming: // - X small RCs each 5 pods [ 5 * X = totalPods / 2 ] // - Y medium RCs each 30 pods [ 30 * Y = totalPods / 4 ] // - Z big RCs each 250 pods [ 250 * Z = totalPods / 4] // We would like to spread creating replication controllers over time // to make it possible to create/schedule them in the meantime. // Currently we assume <throughput> pods/second average throughput. // We may want to revisit it in the future. framework.Logf("Starting to create ReplicationControllers...") creatingTime := time.Duration(totalPods/throughput) * time.Second createAllResources(configs, creatingTime) By("============================================================================") // We would like to spread scaling replication controllers over time // to make it possible to create/schedule & delete them in the meantime. // Currently we assume that <throughput> pods/second average throughput. // The expected number of created/deleted pods is less than totalPods/3. scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second framework.Logf("Starting to scale ReplicationControllers first time...") scaleAllResources(configs, scalingTime) By("============================================================================") framework.Logf("Starting to scale ReplicationControllers second time...") scaleAllResources(configs, scalingTime) By("============================================================================") // Cleanup all created replication controllers. // Currently we assume <throughput> pods/second average deletion throughput. // We may want to revisit it in the future. deletingTime := time.Duration(totalPods/throughput) * time.Second framework.Logf("Starting to delete ReplicationControllers...") deleteAllResources(configs, deletingTime) }) } }) func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) { clients := make([]*clientset.Clientset, numberOfClients) internalClients := make([]*internalclientset.Clientset, numberOfClients) for i := 0; i < numberOfClients; i++ { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred()) config.QPS = 100 config.Burst = 200 if framework.TestContext.KubeAPIContentType != "" { config.ContentType = framework.TestContext.KubeAPIContentType } // For the purpose of this test, we want to force that clients // do not share underlying transport (which is a default behavior // in Kubernetes). Thus, we are explicitly creating transport for // each client here. transportConfig, err := config.TransportConfig() if err != nil { return nil, nil, err } tlsConfig, err := transport.TLSConfigFor(transportConfig) if err != nil { return nil, nil, err } config.Transport = utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: 100, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, }) // Overwrite TLS-related fields from config to avoid collision with // Transport field. config.TLSClientConfig = restclient.TLSClientConfig{} c, err := clientset.NewForConfig(config) if err != nil { return nil, nil, err } clients[i] = c internalClient, err := internalclientset.NewForConfig(config) if err != nil { return nil, nil, err } internalClients[i] = internalClient } return clients, internalClients, nil } func computePodCounts(total int) (int, int, int) { // Small RCs owns ~0.5 of total number of pods, medium and big RCs ~0.25 each. // For example for 3000 pods (100 nodes, 30 pods per node) there are: // - 300 small RCs each 5 pods // - 25 medium RCs each 30 pods // - 3 big RCs each 250 pods bigGroupCount := total / 4 / bigGroupSize total -= bigGroupCount * bigGroupSize mediumGroupCount := total / 3 / mediumGroupSize total -= mediumGroupCount * mediumGroupSize smallGroupCount := total / smallGroupSize return smallGroupCount, mediumGroupCount, bigGroupCount } func generateConfigs( totalPods int, image string, command []string, nss []*v1.Namespace, kind schema.GroupKind, secretsPerPod int, ) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) { configs := make([]testutils.RunObjectConfig, 0) secretConfigs := make([]*testutils.SecretConfig, 0) smallGroupCount, mediumGroupCount, bigGroupCount := computePodCounts(totalPods) newConfigs, newSecretConfigs := generateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) newConfigs, newSecretConfigs = generateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) newConfigs, newSecretConfigs = generateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) // Create a number of clients to better simulate real usecase // where not everyone is using exactly the same client. rcsPerClient := 20 clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) framework.ExpectNoError(err) for i := 0; i < len(configs); i++ { configs[i].SetClient(clients[i%len(clients)]) configs[i].SetInternalClient(internalClients[i%len(internalClients)]) } for i := 0; i < len(secretConfigs); i++ { secretConfigs[i].Client = clients[i%len(clients)] } return configs, secretConfigs } func generateConfigsForGroup( nss []*v1.Namespace, groupName string, size, count int, image string, command []string, kind schema.GroupKind, secretsPerPod int, ) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) { configs := make([]testutils.RunObjectConfig, 0, count) secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod) for i := 1; i <= count; i++ { namespace := nss[i%len(nss)].Name secretNames := make([]string, 0, secretsPerPod) for j := 0; j < secretsPerPod; j++ { secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j) secretConfigs = append(secretConfigs, &testutils.SecretConfig{ Content: map[string]string{"foo": "bar"}, Client: nil, // this will be overwritten later Name: secretName, Namespace: namespace, LogFunc: framework.Logf, }) secretNames = append(secretNames, secretName) } baseConfig := &testutils.RCConfig{ Client: nil, // this will be overwritten later InternalClient: nil, // this will be overwritten later Name: groupName + "-" + strconv.Itoa(i), Namespace: namespace, Timeout: 10 * time.Minute, Image: image, Command: command, Replicas: size, CpuRequest: 10, // 0.01 core MemRequest: 26214400, // 25MB SecretNames: secretNames, } var config testutils.RunObjectConfig switch kind { case api.Kind("ReplicationController"): config = baseConfig case extensions.Kind("ReplicaSet"): config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig} case extensions.Kind("Deployment"): config = &testutils.DeploymentConfig{RCConfig: *baseConfig} case batch.Kind("Job"): config = &testutils.JobConfig{RCConfig: *baseConfig} default: framework.Failf("Unsupported kind for config creation: %v", kind) } configs = append(configs, config) } return configs, secretConfigs } func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service { services := make([]*v1.Service, 0, len(configs)) for _, config := range configs { serviceName := config.GetName() + "-svc" labels := map[string]string{"name": config.GetName()} service := &v1.Service{ ObjectMeta: v1.ObjectMeta{ Name: serviceName, Namespace: config.GetNamespace(), }, Spec: v1.ServiceSpec{ Selector: labels, Ports: []v1.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } services = append(services, service) } return services } func sleepUpTo(d time.Duration) { time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds()))) } func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { go createResource(&wg, config, creatingTime) } wg.Wait() } func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creatingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(creatingTime) framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName())) } func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { go scaleResource(&wg, config, scalingTime) } wg.Wait() } // Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scaling happens always based on original size, not the current size. func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scalingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2) framework.ExpectNoError(framework.ScaleResource( config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()), fmt.Sprintf("scaling rc %s for the first time", config.GetName())) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()})) options := v1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: "0", } _, err := config.GetClient().Core().Pods(config.GetNamespace()).List(options) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.GetName())) } func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { go deleteResource(&wg, config, deletingTime) } wg.Wait() } func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(deletingTime) if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") { framework.ExpectNoError(framework.DeleteResourceAndWaitForGC( config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()), fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) } else { framework.ExpectNoError(framework.DeleteResourceAndPods( config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()), fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) } } func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) { namespaces := []*v1.Namespace{} for i := 1; i <= namespaceCount; i++ { namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil) if err != nil { return []*v1.Namespace{}, err } namespaces = append(namespaces, namespace) } return namespaces, nil }
[ "\"LOAD_TEST_THROUGHPUT\"", "\"CREATE_SERVICES\"" ]
[]
[ "CREATE_SERVICES", "LOAD_TEST_THROUGHPUT" ]
[]
["CREATE_SERVICES", "LOAD_TEST_THROUGHPUT"]
go
2
0
aws-es-proxy.go
package main import ( "bytes" "crypto/subtle" "encoding/json" "flag" "fmt" "io" "io/ioutil" "log" "net/http" "net/http/cookiejar" "net/http/httputil" "net/url" "os" "path" "regexp" "runtime" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/session" v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/sirupsen/logrus" "go.mongodb.org/mongo-driver/bson/primitive" "golang.org/x/net/publicsuffix" ) func logger(debug bool) { formatFilePath := func(path string) string { arr := strings.Split(path, "/") return arr[len(arr)-1] } if debug { logrus.SetLevel(logrus.DebugLevel) // logrus.SetReportCaller(true) } formatter := &logrus.TextFormatter{ TimestampFormat: "2006-02-01 15:04:05", FullTimestamp: true, DisableLevelTruncation: false, CallerPrettyfier: func(f *runtime.Frame) (string, string) { return "", fmt.Sprintf("%s:%d", formatFilePath(f.File), f.Line) }, } logrus.SetFormatter(formatter) } type requestStruct struct { Requestid string Datetime string Remoteaddr string Requesturi string Method string Statuscode int Elapsed float64 Body string } type responseStruct struct { Requestid string Body string } type proxy struct { scheme string host string region string service string endpoint string verbose bool prettify bool logtofile bool nosignreq bool fileRequest *os.File fileResponse *os.File credentials *credentials.Credentials httpClient *http.Client auth bool username string password string realm string remoteTerminate bool assumeRole string } func newProxy(args ...interface{}) *proxy { noRedirect := func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) if err != nil { log.Fatal(err) } client := http.Client{ Timeout: time.Duration(args[5].(int)) * time.Second, CheckRedirect: noRedirect, Jar: jar, } return &proxy{ endpoint: args[0].(string), verbose: args[1].(bool), prettify: args[2].(bool), logtofile: args[3].(bool), nosignreq: args[4].(bool), httpClient: &client, auth: args[6].(bool), username: args[7].(string), password: args[8].(string), realm: args[9].(string), remoteTerminate: args[10].(bool), assumeRole: args[11].(string), } } func (p *proxy) parseEndpoint() error { var ( link *url.URL err error isAWSEndpoint bool ) if link, err = url.Parse(p.endpoint); err != nil { return fmt.Errorf("error: failure while parsing endpoint: %s. Error: %s", p.endpoint, err.Error()) } // Only http/https are supported schemes. // AWS Elasticsearch uses https by default, but now aws-es-proxy // allows non-aws ES clusters as endpoints, therefore we have to fallback // to http instead of https switch link.Scheme { case "http", "https": default: link.Scheme = "http" } // Unknown schemes sometimes result in empty host value if link.Host == "" { return fmt.Errorf("error: empty host or protocol information in submitted endpoint (%s)", p.endpoint) } // Update proxy struct p.scheme = link.Scheme p.host = link.Host // AWS SignV4 enabled, extract required parts for signing process if !p.nosignreq { split := strings.SplitAfterN(link.Hostname(), ".", 2) if len(split) < 2 { logrus.Debugln("Endpoint split is less than 2") } awsEndpoints := []string{} for _, partition := range endpoints.DefaultPartitions() { for region := range partition.Regions() { awsEndpoints = append(awsEndpoints, fmt.Sprintf("%s.es.%s", region, partition.DNSSuffix())) } } isAWSEndpoint = false for _, v := range awsEndpoints { if split[1] == v { logrus.Debugln("Provided endpoint is a valid AWS Elasticsearch endpoint") isAWSEndpoint = true break } } if isAWSEndpoint { // Extract region and service from link. This should be save now parts := strings.Split(link.Host, ".") p.region, p.service = parts[1], "es" logrus.Debugln("AWS Region", p.region) } } return nil } func (p *proxy) getSigner() (*v4.Signer, error) { // Refresh credentials after expiration. Required for STS if p.credentials == nil { sess, err := session.NewSession( &aws.Config{ Region: aws.String(p.region), CredentialsChainVerboseErrors: aws.Bool(true), }, ) if err != nil { return nil, err } awsRoleARN := os.Getenv("AWS_ROLE_ARN") awsWebIdentityTokenFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") var creds *credentials.Credentials if awsRoleARN != "" && awsWebIdentityTokenFile != "" { logrus.Infof("Using web identity credentials with role %s", awsRoleARN) creds = stscreds.NewWebIdentityCredentials(sess, awsRoleARN, "", awsWebIdentityTokenFile) } else if p.assumeRole != "" { logrus.Infof("Assuming credentials from %s", p.assumeRole) creds = stscreds.NewCredentials(sess, p.assumeRole, func(provider *stscreds.AssumeRoleProvider) { provider.Duration = 17 * time.Minute provider.ExpiryWindow = 13 * time.Minute provider.MaxJitterFrac = 0.1 }) } else { logrus.Infoln("Using default credentials") // creds = sess.Config.Credentials creds = credentials.NewChainCredentials( []credentials.Provider{ &credentials.EnvProvider{}, &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute, }, }, ) } p.credentials = creds logrus.Infoln("Generated fresh AWS Credentials object") } return v4.NewSigner(p.credentials), nil } func (p *proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { if p.remoteTerminate && r.URL.Path == "/terminate-proxy" && r.Method == http.MethodPost { logrus.Infoln("Terminate Signal") os.Exit(0) } if p.auth { user, pass, ok := r.BasicAuth() if !ok || subtle.ConstantTimeCompare([]byte(user), []byte(p.username)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(p.password)) != 1 { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=\"%s\"", p.realm)) w.WriteHeader(401) _, _ = w.Write([]byte("Unauthorised.\n")) return } } requestStarted := time.Now() var ( err error dump []byte req *http.Request ) if dump, err = httputil.DumpRequest(r, true); err != nil { logrus.WithError(err).Errorln("Failed to dump request.") http.Error(w, err.Error(), http.StatusInternalServerError) return } defer r.Body.Close() proxied := *r.URL proxied.Host = p.host proxied.Scheme = p.scheme proxied.Path = path.Clean(proxied.Path) if req, err = http.NewRequest(r.Method, proxied.String(), r.Body); err != nil { logrus.WithError(err).Errorln("Failed creating new request.") http.Error(w, err.Error(), http.StatusBadRequest) return } addHeaders(r.Header, req.Header) // Make signV4 optional if !p.nosignreq { // Start AWS session from ENV, Shared Creds or EC2Role signer, err := p.getSigner() if err == nil { // Sign the request with AWSv4 payload := bytes.NewReader(replaceBody(req)) _, err = signer.Sign(req, payload, p.service, p.region, time.Now()) } if err != nil { p.credentials = nil logrus.Errorln("Failed to sign", err) http.Error(w, "Failed to sign", http.StatusForbidden) return } } resp, err := p.httpClient.Do(req) if err != nil { logrus.Errorln(err) http.Error(w, err.Error(), http.StatusBadRequest) return } if !p.nosignreq { // AWS credentials expired, need to generate fresh ones if resp.StatusCode == 403 { logrus.Errorln("Received 403 from AWSAuth, invalidating credentials for retrial") p.credentials = nil logrus.Debugln("Received Status code from AWS:", resp.StatusCode) b := bytes.Buffer{} if _, err := io.Copy(&b, resp.Body); err != nil { logrus.WithError(err).Errorln("Failed to decode body") http.Error(w, err.Error(), http.StatusInternalServerError) return } logrus.Debugln("Received headers from AWS:", resp.Header) logrus.Debugln("Received body from AWS:", string(b.Bytes())) } } defer resp.Body.Close() // Write back headers to requesting client copyHeaders(w.Header(), resp.Header) // Send response back to requesting client body := bytes.Buffer{} if _, err := io.Copy(&body, resp.Body); err != nil { logrus.Errorln(err) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.WriteHeader(resp.StatusCode) w.Write(body.Bytes()) requestEnded := time.Since(requestStarted) /*############################ ## Logging ############################*/ rawQuery := string(dump) rawQuery = strings.Replace(rawQuery, "\n", " ", -1) regex, _ := regexp.Compile("{.*}") regEx, _ := regexp.Compile("_msearch|_bulk") queryEx := regEx.FindString(rawQuery) var query string if len(queryEx) == 0 { query = regex.FindString(rawQuery) } else { query = "" } if p.verbose { if p.prettify { var prettyBody bytes.Buffer json.Indent(&prettyBody, []byte(query), "", " ") t := time.Now() fmt.Println() fmt.Println("========================") fmt.Println(t.Format("2006/01/02 15:04:05")) fmt.Println("Remote Address: ", r.RemoteAddr) fmt.Println("Request URI: ", proxied.RequestURI()) fmt.Println("Method: ", r.Method) fmt.Println("Status: ", resp.StatusCode) fmt.Printf("Took: %.3fs\n", requestEnded.Seconds()) fmt.Println("Body: ") fmt.Println(string(prettyBody.Bytes())) } else { log.Printf(" -> %s; %s; %s; %s; %d; %.3fs\n", r.Method, r.RemoteAddr, proxied.RequestURI(), query, resp.StatusCode, requestEnded.Seconds()) } } if p.logtofile { requestID := primitive.NewObjectID().Hex() reqStruct := &requestStruct{ Requestid: requestID, Datetime: time.Now().Format("2006/01/02 15:04:05"), Remoteaddr: r.RemoteAddr, Requesturi: proxied.RequestURI(), Method: r.Method, Statuscode: resp.StatusCode, Elapsed: requestEnded.Seconds(), Body: query, } respStruct := &responseStruct{ Requestid: requestID, Body: string(body.Bytes()), } y, _ := json.Marshal(reqStruct) z, _ := json.Marshal(respStruct) p.fileRequest.Write(y) p.fileRequest.WriteString("\n") p.fileResponse.Write(z) p.fileResponse.WriteString("\n") } } // Recent versions of ES/Kibana require // "content-type: application/json" and // either "kbn-version" or "kbn-xsrf" // headers to exist in the request. // If missing requests fails. func addHeaders(src, dest http.Header) { if val, ok := src["Kbn-Version"]; ok { dest.Add("Kbn-Version", val[0]) } if val, ok := src["Content-Type"]; ok { dest.Add("Content-Type", val[0]) } if val, ok := src["Kbn-Xsrf"]; ok { dest.Add("Kbn-Xsrf", val[0]) } if val, ok := src["Authorization"]; ok { dest.Add("Authorization", val[0]) } } // Signer.Sign requires a "seekable" body to sum body's sha256 func replaceBody(req *http.Request) []byte { if req.Body == nil { return []byte{} } payload, _ := ioutil.ReadAll(req.Body) req.Body = ioutil.NopCloser(bytes.NewReader(payload)) return payload } func copyHeaders(dst, src http.Header) { for k, vals := range src { if k != "Authorization" { for _, v := range vals { dst.Add(k, v) } } } } func main() { var ( debug bool auth bool username string password string realm string verbose bool prettify bool logtofile bool nosignreq bool ver bool endpoint string listenAddress string fileRequest *os.File fileResponse *os.File err error timeout int remoteTerminate bool assumeRole string ) flag.StringVar(&endpoint, "endpoint", "", "Amazon ElasticSearch Endpoint (e.g: https://dummy-host.eu-west-1.es.amazonaws.com)") flag.StringVar(&listenAddress, "listen", "127.0.0.1:9200", "Local TCP port to listen on") flag.BoolVar(&verbose, "verbose", false, "Print user requests") flag.BoolVar(&logtofile, "log-to-file", false, "Log user requests and ElasticSearch responses to files") flag.BoolVar(&prettify, "pretty", false, "Prettify verbose and file output") flag.BoolVar(&nosignreq, "no-sign-reqs", false, "Disable AWS Signature v4") flag.BoolVar(&debug, "debug", false, "Print debug messages") flag.BoolVar(&ver, "version", false, "Print aws-es-proxy version") flag.IntVar(&timeout, "timeout", 15, "Set a request timeout to ES. Specify in seconds, defaults to 15") flag.BoolVar(&auth, "auth", false, "Require HTTP Basic Auth") flag.StringVar(&username, "username", "", "HTTP Basic Auth Username") flag.StringVar(&password, "password", "", "HTTP Basic Auth Password") flag.StringVar(&realm, "realm", "", "Authentication Required") flag.BoolVar(&remoteTerminate, "remote-terminate", false, "Allow HTTP remote termination") flag.StringVar(&assumeRole, "assume", "", "Optionally specify role to assume") flag.Parse() if endpoint == "" { if v, ok := os.LookupEnv(strings.ToUpper("endpoint")); ok { endpoint = v } else { text := "You need to specify Amazon ElasticSearch endpoint.\n" + "You can use either argument '-endpoint' OR environment variable 'ENDPOINT'.\n" + "Please run with '-h' for a list of available arguments." fmt.Println(text) os.Exit(1) } } if debug { logger(true) } else { logger(false) } if ver { version := 1.1 logrus.Infof("Current version is: v%.1f", version) os.Exit(0) } if auth { if len(username) == 0 || len(password) == 0 { fmt.Println("You need to specify username and password when using authentication.") fmt.Println("Please run with '-h' for a list of available arguments.") os.Exit(1) } } p := newProxy( endpoint, verbose, prettify, logtofile, nosignreq, timeout, auth, username, password, realm, remoteTerminate, assumeRole, ) if err = p.parseEndpoint(); err != nil { logrus.Fatalln(err) os.Exit(1) } if p.logtofile { requestFname := fmt.Sprintf("request-%s.log", primitive.NewObjectID().Hex()) if fileRequest, err = os.Create(requestFname); err != nil { log.Fatalln(err.Error()) } defer fileRequest.Close() responseFname := fmt.Sprintf("response-%s.log", primitive.NewObjectID().Hex()) if fileResponse, err = os.Create(responseFname); err != nil { log.Fatalln(err.Error()) } defer fileResponse.Close() p.fileRequest = fileRequest p.fileResponse = fileResponse } logrus.Infof("Listening on %s...\n", listenAddress) logrus.Fatalln(http.ListenAndServe(listenAddress, p)) }
[ "\"AWS_ROLE_ARN\"", "\"AWS_WEB_IDENTITY_TOKEN_FILE\"" ]
[]
[ "AWS_ROLE_ARN", "AWS_WEB_IDENTITY_TOKEN_FILE" ]
[]
["AWS_ROLE_ARN", "AWS_WEB_IDENTITY_TOKEN_FILE"]
go
2
0
src/lib/log/logger.go
// Copyright Project Harbor Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package log import ( "fmt" "io" "os" "runtime" "sort" "strings" "sync" "time" ) // NOTE: the default depth for the logger is 3 so that we can get the correct file and line when use the logger to log message var logger = New(os.Stdout, NewTextFormatter(), WarningLevel, 3) const srcSeparator = "harbor" + string(os.PathSeparator) + "src" func init() { lvl := os.Getenv("LOG_LEVEL") if len(lvl) == 0 { logger.setLevel(InfoLevel) return } level, err := parseLevel(lvl) if err != nil { logger.setLevel(InfoLevel) return } logger.setLevel(level) } // Fields type alias to map[string]interface{} type Fields = map[string]interface{} // Logger provides a struct with fields that describe the details of logger. type Logger struct { out io.Writer fmtter Formatter lvl Level callDepth int skipLine bool fields map[string]interface{} fieldsStr string mu *sync.Mutex // ptr here to share one sync.Mutex for clone method } // New returns a customized Logger func New(out io.Writer, fmtter Formatter, lvl Level, options ...interface{}) *Logger { // Default set to be 3 depth := 3 // If passed in as option, then reset depth // Use index 0 if len(options) > 0 { d, ok := options[0].(int) if ok && d > 0 { depth = d } } return &Logger{ out: out, fmtter: fmtter, lvl: lvl, callDepth: depth, fields: map[string]interface{}{}, mu: &sync.Mutex{}, } } // DefaultLogger returns the default logger within the pkg, i.e. the one used in log.Infof.... func DefaultLogger() *Logger { return logger } func (l *Logger) clone() *Logger { return &Logger{ out: l.out, fmtter: l.fmtter, lvl: l.lvl, callDepth: l.callDepth, skipLine: l.skipLine, fields: l.fields, fieldsStr: l.fieldsStr, mu: l.mu, } } // WithDepth returns cloned logger with new depth func (l *Logger) WithDepth(depth int) *Logger { r := l.clone() r.callDepth = depth return r } // WithFields returns cloned logger which fields merged with the new fields func (l *Logger) WithFields(fields Fields) *Logger { r := l.clone() if len(fields) > 0 { copyFields := make(map[string]interface{}, len(l.fields)+len(fields)) for key, value := range l.fields { copyFields[key] = value } for key, value := range fields { copyFields[key] = value } sortedKeys := make([]string, 0, len(copyFields)) for key := range copyFields { sortedKeys = append(sortedKeys, key) } sort.Strings(sortedKeys) parts := make([]string, 0, len(copyFields)) for _, key := range sortedKeys { parts = append(parts, fmt.Sprintf(`%v="%v"`, key, copyFields[key])) } r.fields = copyFields r.fieldsStr = "[" + strings.Join(parts, " ") + "]" } return r } // setOutput sets the output of Logger l func (l *Logger) setOutput(out io.Writer) { l.mu.Lock() defer l.mu.Unlock() l.out = out } // setFormatter sets the formatter of Logger l func (l *Logger) setFormatter(fmtter Formatter) { l.mu.Lock() defer l.mu.Unlock() l.fmtter = fmtter } // setLevel sets the level of Logger l func (l *Logger) setLevel(lvl Level) { l.mu.Lock() defer l.mu.Unlock() l.lvl = lvl } func (l *Logger) output(record *Record) (err error) { b, err := l.fmtter.Format(record) if err != nil { return } l.mu.Lock() defer l.mu.Unlock() _, err = l.out.Write(b) return } // Debug ... func (l *Logger) Debug(v ...interface{}) { if l.lvl <= DebugLevel { record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), DebugLevel) l.output(record) } } // Debugf ... func (l *Logger) Debugf(format string, v ...interface{}) { if l.lvl <= DebugLevel { record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), DebugLevel) l.output(record) } } // Info ... func (l *Logger) Info(v ...interface{}) { if l.lvl <= InfoLevel { record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), InfoLevel) l.output(record) } } // Infof ... func (l *Logger) Infof(format string, v ...interface{}) { if l.lvl <= InfoLevel { record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), InfoLevel) l.output(record) } } // Warning ... func (l *Logger) Warning(v ...interface{}) { if l.lvl <= WarningLevel { record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), WarningLevel) l.output(record) } } // Warningf ... func (l *Logger) Warningf(format string, v ...interface{}) { if l.lvl <= WarningLevel { record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), WarningLevel) l.output(record) } } // Error ... func (l *Logger) Error(v ...interface{}) { if l.lvl <= ErrorLevel { record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), ErrorLevel) l.output(record) } } // Errorf ... func (l *Logger) Errorf(format string, v ...interface{}) { if l.lvl <= ErrorLevel { record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), ErrorLevel) l.output(record) } } // Fatal ... func (l *Logger) Fatal(v ...interface{}) { if l.lvl <= FatalLevel { record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), FatalLevel) l.output(record) } os.Exit(1) } // Fatalf ... func (l *Logger) Fatalf(format string, v ...interface{}) { if l.lvl <= FatalLevel { record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), FatalLevel) l.output(record) } os.Exit(1) } func (l *Logger) getLine() string { var str string if !l.skipLine { str = line(l.callDepth) } str = str + l.fieldsStr if str != "" { str = str + ":" } return str } // Debug ... func Debug(v ...interface{}) { logger.WithDepth(4).Debug(v...) } // Debugf ... func Debugf(format string, v ...interface{}) { logger.WithDepth(4).Debugf(format, v...) } // Info ... func Info(v ...interface{}) { logger.WithDepth(4).Info(v...) } // Infof ... func Infof(format string, v ...interface{}) { logger.WithDepth(4).Infof(format, v...) } // Warning ... func Warning(v ...interface{}) { logger.WithDepth(4).Warning(v...) } // Warningf ... func Warningf(format string, v ...interface{}) { logger.WithDepth(4).Warningf(format, v...) } // Error ... func Error(v ...interface{}) { logger.WithDepth(4).Error(v...) } // Errorf ... func Errorf(format string, v ...interface{}) { logger.WithDepth(4).Errorf(format, v...) } // Fatal ... func Fatal(v ...interface{}) { logger.WithDepth(4).Fatal(v...) } // Fatalf ... func Fatalf(format string, v ...interface{}) { logger.WithDepth(4).Fatalf(format, v...) } func line(callDepth int) string { _, file, line, ok := runtime.Caller(callDepth) if !ok { file = "???" line = 0 } l := strings.SplitN(file, srcSeparator, 2) if len(l) > 1 { file = l[1] } return fmt.Sprintf("[%s:%d]", file, line) }
[ "\"LOG_LEVEL\"" ]
[]
[ "LOG_LEVEL" ]
[]
["LOG_LEVEL"]
go
1
0
server/common_models/category.py
# coding=utf-8 from __future__ import absolute_import from document import BaseDocument, ObjectId, INDEX_ASC from utils.misc import now class Term(BaseDocument): MAX_STORAGE = 120 structure = { 'key': unicode, 'parent': unicode, 'priority': int, 'meta': dict, 'creation': int, 'updated': int } sensitive_fields = ['meta'] required_fields = ['key'] default_values = { 'meta': {}, 'parent': u'', 'priority': 0, 'creation': now, 'updated': now, } indexes = [ { 'fields': ['key'], 'unique': True, }, { 'fields': ['priority', 'creation'], } ] def find_one_by_key(self, key): return self.find_one({ 'key': key, }) def find_one_by_id(self, term_id): return self.find_one({ '_id': ObjectId(term_id), }) def find_all(self, ): _sort = [('priority', INDEX_ASC), ('creation', INDEX_ASC)] return self.find().sort(_sort).limit(self.MAX_STORAGE) def eject_subset(self, parent): return self.collection.update({ 'parent': parent }, {'$set': {'parent': u''}}, multi=True) def count_used(self, cat_id): return self.find().count()
[]
[]
[]
[]
[]
python
null
null
null
pkg/oc/cli/admin/verifyimagesignature/verify-signature.go
package verifyimagesignature import ( "context" "errors" "fmt" "io/ioutil" "net/url" "os" "path/filepath" "strings" "github.com/containers/image/docker/policyconfiguration" "github.com/containers/image/docker/reference" "github.com/containers/image/signature" sigtypes "github.com/containers/image/types" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" imagev1 "github.com/openshift/api/image/v1" imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" imageref "github.com/openshift/library-go/pkg/image/reference" imageapi "github.com/openshift/origin/pkg/image/apis/image" ) var ( verifyImageSignatureLongDesc = templates.LongDesc(` Verifies the image signature of an image imported to internal registry using the local public GPG key. This command verifies if the image identity contained in the image signature can be trusted by using the public GPG key to verify the signature itself and matching the provided expected identity with the identity (pull spec) of the given image. By default, this command will use the public GPG keyring located in "$GNUPGHOME/.gnupg/pubring.gpg" By default, this command will not save the result of the verification back to the image object, to do so user have to specify the "--save" flag. Note that to modify the image signature verification status, user have to have permissions to edit an image object (usually an "image-auditor" role). Note that using the "--save" flag on already verified image together with invalid GPG key or invalid expected identity will cause the saved verification status to be removed and the image will become "unverified". If this command is outside the cluster, users have to specify the "--registry-url" parameter with the public URL of image registry. To remove all verifications, users can use the "--remove-all" flag. `) verifyImageSignatureExample = templates.Examples(` # Verify the image signature and identity using the local GPG keychain %[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 # Verify the image signature and identity using the local GPG keychain and save the status %[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 --save # Verify the image signature and identity via exposed registry route %[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 \ --registry-url=docker-registry.foo.com # Remove all signature verifications from the image %[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 --remove-all `) ) const ( VerifyRecommendedName = "verify-image-signature" ) type VerifyImageSignatureOptions struct { InputImage string ExpectedIdentity string PublicKeyFilename string PublicKey []byte Save bool RemoveAll bool CurrentUser string CurrentUserToken string RegistryURL string Insecure bool ImageClient imagev1typedclient.ImageV1Interface genericclioptions.IOStreams } func NewVerifyImageSignatureOptions(streams genericclioptions.IOStreams) *VerifyImageSignatureOptions { return &VerifyImageSignatureOptions{ // TODO: This improves the error message users get when containers/image is not able // to locate the pubring.gpg file (which is default). // This should be improved/fixed in containers/image. PublicKeyFilename: filepath.Join(os.Getenv("GNUPGHOME"), "pubring.gpg"), IOStreams: streams, } } func NewCmdVerifyImageSignature(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewVerifyImageSignatureOptions(streams) cmd := &cobra.Command{ Use: fmt.Sprintf("%s IMAGE --expected-identity=EXPECTED_IDENTITY [--save]", VerifyRecommendedName), Short: "Verify the image identity contained in the image signature", Long: verifyImageSignatureLongDesc, Example: fmt.Sprintf(verifyImageSignatureExample, fullName), Run: func(cmd *cobra.Command, args []string) { kcmdutil.CheckErr(o.Validate()) kcmdutil.CheckErr(o.Complete(f, cmd, args)) kcmdutil.CheckErr(o.Run()) }, } cmd.Flags().StringVar(&o.ExpectedIdentity, "expected-identity", o.ExpectedIdentity, "An expected image docker reference to verify (required).") cmd.Flags().BoolVar(&o.Save, "save", o.Save, "If true, the result of the verification will be saved to an image object.") cmd.Flags().BoolVar(&o.RemoveAll, "remove-all", o.RemoveAll, "If set, all signature verifications will be removed from the given image.") cmd.Flags().StringVar(&o.PublicKeyFilename, "public-key", o.PublicKeyFilename, fmt.Sprintf("A path to a public GPG key to be used for verification. (defaults to %q)", o.PublicKeyFilename)) cmd.Flags().StringVar(&o.RegistryURL, "registry-url", o.RegistryURL, "The address to use when contacting the registry, instead of using the internal cluster address. This is useful if you can't resolve or reach the internal registry address.") cmd.Flags().BoolVar(&o.Insecure, "insecure", o.Insecure, "If set, use the insecure protocol for registry communication.") return cmd } func (o *VerifyImageSignatureOptions) Validate() error { if !o.RemoveAll { if len(o.ExpectedIdentity) == 0 { return errors.New("the --expected-identity is required") } if _, err := imageref.Parse(o.ExpectedIdentity); err != nil { return errors.New("the --expected-identity must be valid image reference") } } if o.RemoveAll && len(o.ExpectedIdentity) > 0 { return errors.New("the --expected-identity cannot be used when removing all verifications") } return nil } func (o *VerifyImageSignatureOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { if len(args) != 1 { return kcmdutil.UsageErrorf(cmd, "exactly one image must be specified") } o.InputImage = args[0] var err error if len(o.PublicKeyFilename) > 0 { if o.PublicKey, err = ioutil.ReadFile(o.PublicKeyFilename); err != nil { return fmt.Errorf("unable to read --public-key: %v", err) } } clientConfig, err := f.ToRESTConfig() if err != nil { return err } o.ImageClient, err = imagev1typedclient.NewForConfig(clientConfig) if err != nil { return err } userClient, err := userv1typedclient.NewForConfig(clientConfig) if err != nil { return err } // We need the current user name so we can record it into an verification condition and // we need a bearer token so we can fetch the manifest from the registry. // TODO: Add support for external registries (currently only integrated registry will if me, err := userClient.Users().Get("~", metav1.GetOptions{}); err != nil { return err } else { o.CurrentUser = me.Name if config, err := f.ToRESTConfig(); err != nil { return err } else { if o.CurrentUserToken = config.BearerToken; len(o.CurrentUserToken) == 0 { return fmt.Errorf("no token is currently in use for this session") } } } return nil } func (o VerifyImageSignatureOptions) Run() error { img, err := o.ImageClient.Images().Get(o.InputImage, metav1.GetOptions{}) if err != nil { return err } if len(img.Signatures) == 0 { return fmt.Errorf("%s does not have any signature", img.Name) } pr, err := signature.NewPRSignedByKeyPath(signature.SBKeyTypeGPGKeys, o.PublicKeyFilename, signature.NewPRMMatchRepoDigestOrExact()) if err != nil { return fmt.Errorf("unable to prepare verification policy requirements: %v", err) } policy := signature.Policy{Default: []signature.PolicyRequirement{pr}} pc, err := signature.NewPolicyContext(&policy) if err != nil { return fmt.Errorf("unable to setup policy: %v", err) } defer pc.Destroy() if o.RemoveAll { img.Signatures = []imagev1.ImageSignature{} } for i, s := range img.Signatures { // Verify the signature against the policy signedBy, err := o.verifySignature(pc, img, s.Content) if err != nil { fmt.Fprintf(o.ErrOut, "error verifying signature %s for image %s (verification status will be removed): %v\n", img.Signatures[i].Name, o.InputImage, err) img.Signatures[i] = imagev1.ImageSignature{} continue } fmt.Fprintf(o.Out, "image %q identity is now confirmed (signed by GPG key %q)\n", o.InputImage, signedBy) now := metav1.Now() newConditions := []imagev1.SignatureCondition{ { Type: imageapi.SignatureTrusted, Status: corev1.ConditionTrue, LastProbeTime: now, LastTransitionTime: now, Reason: "manually verified", Message: fmt.Sprintf("verified by user %q", o.CurrentUser), }, // TODO: This should be not needed (need to relax validation). { Type: imageapi.SignatureForImage, Status: corev1.ConditionTrue, LastProbeTime: now, LastTransitionTime: now, }, } img.Signatures[i].Conditions = newConditions img.Signatures[i].IssuedBy = &imagev1.SignatureIssuer{} // TODO: This should not be just a key id but a human-readable identity. img.Signatures[i].IssuedBy.CommonName = signedBy } if o.Save || o.RemoveAll { _, err := o.ImageClient.Images().Update(img) return err } else { fmt.Fprintf(o.Out, "Neither --save nor --remove-all were passed, image %q not updated to %v\n", o.InputImage, img) } return nil } // getImageManifest fetches the manifest for provided image from the integrated registry. func (o *VerifyImageSignatureOptions) getImageManifest(img *imagev1.Image) ([]byte, error) { parsed, err := imageapi.ParseDockerImageReference(img.DockerImageReference) if err != nil { return nil, err } // TODO(juanvallejo): Add missing methods to DockerImageReference object in library-go helper registryURL := parsed.RegistryURL() if len(o.RegistryURL) > 0 { registryURL = &url.URL{Host: o.RegistryURL, Scheme: "https"} if o.Insecure { registryURL.Scheme = "" } } return getImageManifestByIDFromRegistry(registryURL, parsed.RepositoryName(), img.Name, o.CurrentUser, o.CurrentUserToken, o.Insecure) } // verifySignature takes policy, image and the image signature blob and verifies that the // signature was signed by a trusted key, the expected identity matches the one in the // signature message and the manifest matches as well. // In case the image identity is confirmed, this function returns the matching GPG key in // short form, otherwise it returns rejection reason. func (o *VerifyImageSignatureOptions) verifySignature(pc *signature.PolicyContext, img *imagev1.Image, sigBlob []byte) (string, error) { manifest, err := o.getImageManifest(img) if err != nil { return "", fmt.Errorf("failed to get image %q manifest: %v", img.Name, err) } allowed, err := pc.IsRunningImageAllowed(newUnparsedImage(o.ExpectedIdentity, sigBlob, manifest)) if !allowed && err == nil { return "", errors.New("signature rejected but no error set") } if err != nil { return "", fmt.Errorf("signature rejected: %v", err) } if untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(sigBlob); err != nil { // Tis is treated as an unverified signature. It really shouldn’t happen anyway. return "", fmt.Errorf("error getting signing key identity: %v", err) } else { return untrustedInfo.UntrustedShortKeyIdentifier, nil } } // dummyDockerTransport is containers/image/docker.Transport, except that it only provides identity information. var dummyDockerTransport = dockerTransport{} type dockerTransport struct{} func (t dockerTransport) Name() string { return "docker" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t dockerTransport) ParseReference(reference string) (sigtypes.ImageReference, error) { return parseDockerReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { // FIXME? We could be verifying the various character set and length restrictions // from docker/distribution/reference.regexp.go, but other than that there // are few semantically invalid strings. return nil } // dummyDockerReference is containers/image/docker.Reference, except that only provides identity information. type dummyDockerReference struct{ ref reference.Named } // parseDockerReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func parseDockerReference(refString string) (sigtypes.ImageReference, error) { if !strings.HasPrefix(refString, "//") { return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) } ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) if err != nil { return nil, err } ref = reference.TagNameOnly(ref) if reference.IsNameOnly(ref) { return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific // tag and digest), so fail. This MAY be accepted in the future. // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop // the tag or the digest first?) _, isTagged := ref.(reference.NamedTagged) _, isDigested := ref.(reference.Canonical) if isTagged && isDigested { return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported") } return dummyDockerReference{ ref: ref, }, nil } func (ref dummyDockerReference) Transport() sigtypes.ImageTransport { return dummyDockerTransport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dummyDockerReference) StringWithinTransport() string { return "//" + reference.FamiliarString(ref.ref) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref dummyDockerReference) DockerReference() reference.Named { return ref.ref } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref dummyDockerReference) PolicyConfigurationIdentity() string { res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) } return res } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref dummyDockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } func (ref dummyDockerReference) NewImage(ctx *sigtypes.SystemContext) (sigtypes.Image, error) { panic("Unimplemented") } func (ref dummyDockerReference) NewImageSource(ctx *sigtypes.SystemContext, requestedManifestMIMETypes []string) (sigtypes.ImageSource, error) { panic("Unimplemented") } func (ref dummyDockerReference) NewImageDestination(ctx *sigtypes.SystemContext) (sigtypes.ImageDestination, error) { panic("Unimplemented") } func (ref dummyDockerReference) DeleteImage(ctx *sigtypes.SystemContext) error { panic("Unimplemented") } // unparsedImage implements sigtypes.UnparsedImage, to allow evaluating the signature policy // against an image without having to make it pullable by containers/image type unparsedImage struct { ref sigtypes.ImageReference manifest []byte signature []byte } func newUnparsedImage(expectedIdentity string, signature, manifest []byte) sigtypes.UnparsedImage { // We check the error in Validate() ref, _ := parseDockerReference("//" + expectedIdentity) return &unparsedImage{ref: ref, manifest: manifest, signature: signature} } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (ui *unparsedImage) Reference() sigtypes.ImageReference { return ui.ref } // Close removes resources associated with an initialized UnparsedImage, if any. func (ui *unparsedImage) Close() error { return nil } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (ui *unparsedImage) Manifest() ([]byte, string, error) { return ui.manifest, "", nil } // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (ui *unparsedImage) Signatures(context.Context) ([][]byte, error) { return [][]byte{ui.signature}, nil }
[ "\"GNUPGHOME\"" ]
[]
[ "GNUPGHOME" ]
[]
["GNUPGHOME"]
go
1
0
external/github.com/marten-seemann/qtls/common.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package qtls import ( "container/list" "crypto" "crypto/rand" "crypto/sha512" "crypto/tls" "crypto/x509" "errors" "fmt" "io" "math/big" "os" "strings" "sync" "time" "golang.org/x/sys/cpu" ) const ( VersionTLS10 = 0x0301 VersionTLS11 = 0x0302 VersionTLS12 = 0x0303 VersionTLS13 = 0x0304 // Deprecated: SSLv3 is cryptographically broken, and will be // removed in Go 1.14. See golang.org/issue/32716. VersionSSL30 = 0x0300 ) const ( maxPlaintext = 16384 // maximum plaintext payload length maxCiphertext = 16384 + 2048 // maximum ciphertext payload length maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3 recordHeaderLen = 5 // record header length maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) maxUselessRecords = 16 // maximum number of consecutive non-advancing records ) // TLS record types. type recordType uint8 const ( recordTypeChangeCipherSpec recordType = 20 recordTypeAlert recordType = 21 recordTypeHandshake recordType = 22 recordTypeApplicationData recordType = 23 ) // TLS handshake message types. const ( typeHelloRequest uint8 = 0 typeClientHello uint8 = 1 typeServerHello uint8 = 2 typeNewSessionTicket uint8 = 4 typeEndOfEarlyData uint8 = 5 typeEncryptedExtensions uint8 = 8 typeCertificate uint8 = 11 typeServerKeyExchange uint8 = 12 typeCertificateRequest uint8 = 13 typeServerHelloDone uint8 = 14 typeCertificateVerify uint8 = 15 typeClientKeyExchange uint8 = 16 typeFinished uint8 = 20 typeCertificateStatus uint8 = 22 typeKeyUpdate uint8 = 24 typeNextProtocol uint8 = 67 // Not IANA assigned typeMessageHash uint8 = 254 // synthetic message ) // TLS compression types. const ( compressionNone uint8 = 0 ) type Extension struct { Type uint16 Data []byte } // TLS extension numbers const ( extensionServerName uint16 = 0 extensionStatusRequest uint16 = 5 extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7 extensionSupportedPoints uint16 = 11 extensionSignatureAlgorithms uint16 = 13 extensionALPN uint16 = 16 extensionSCT uint16 = 18 extensionSessionTicket uint16 = 35 extensionPreSharedKey uint16 = 41 extensionEarlyData uint16 = 42 extensionSupportedVersions uint16 = 43 extensionCookie uint16 = 44 extensionPSKModes uint16 = 45 extensionCertificateAuthorities uint16 = 47 extensionSignatureAlgorithmsCert uint16 = 50 extensionKeyShare uint16 = 51 extensionNextProtoNeg uint16 = 13172 // not IANA assigned extensionRenegotiationInfo uint16 = 0xff01 ) // TLS signaling cipher suite values const ( scsvRenegotiation uint16 = 0x00ff ) type EncryptionLevel uint8 const ( EncryptionHandshake EncryptionLevel = iota Encryption0RTT EncryptionApplication ) // CurveID is a tls.CurveID type CurveID = tls.CurveID const ( CurveP256 CurveID = 23 CurveP384 CurveID = 24 CurveP521 CurveID = 25 X25519 CurveID = 29 ) // TLS 1.3 Key Share. See RFC 8446, Section 4.2.8. type keyShare struct { group CurveID data []byte } // TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9. const ( pskModePlain uint8 = 0 pskModeDHE uint8 = 1 ) // TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved // session. See RFC 8446, Section 4.2.11. type pskIdentity struct { label []byte obfuscatedTicketAge uint32 } // TLS Elliptic Curve Point Formats // https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 const ( pointFormatUncompressed uint8 = 0 ) // TLS CertificateStatusType (RFC 3546) const ( statusTypeOCSP uint8 = 1 ) // Certificate types (for certificateRequestMsg) const ( certTypeRSASign = 1 certTypeECDSASign = 64 // ECDSA or EdDSA keys, see RFC 8422, Section 3. ) // Signature algorithms (for internal signaling use). Starting at 225 to avoid overlap with // TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do. const ( signaturePKCS1v15 uint8 = iota + 225 signatureRSAPSS signatureECDSA signatureEd25519 ) // directSigning is a standard Hash value that signals that no pre-hashing // should be performed, and that the input should be signed directly. It is the // hash function associated with the Ed25519 signature scheme. var directSigning crypto.Hash = 0 // supportedSignatureAlgorithms contains the signature and hash algorithms that // the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+ // CertificateRequest. The two fields are merged to match with TLS 1.3. // Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc. var supportedSignatureAlgorithms = []SignatureScheme{ PSSWithSHA256, ECDSAWithP256AndSHA256, Ed25519, PSSWithSHA384, PSSWithSHA512, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512, PKCS1WithSHA1, ECDSAWithSHA1, } // supportedSignatureAlgorithmsTLS12 contains the signature and hash algorithms // that are supported in TLS 1.2, where it is possible to distinguish the // protocol version. This is temporary, see Issue 32425. var supportedSignatureAlgorithmsTLS12 = []SignatureScheme{ PKCS1WithSHA256, ECDSAWithP256AndSHA256, Ed25519, PKCS1WithSHA384, PKCS1WithSHA512, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512, PKCS1WithSHA1, ECDSAWithSHA1, } // helloRetryRequestRandom is set as the Random value of a ServerHello // to signal that the message is actually a HelloRetryRequest. var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3. 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91, 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E, 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C, } const ( // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server // random as a downgrade protection if the server would be capable of // negotiating a higher version. See RFC 8446, Section 4.1.3. downgradeCanaryTLS12 = "DOWNGRD\x01" downgradeCanaryTLS11 = "DOWNGRD\x00" ) // ConnectionState records basic TLS details about the connection. type ConnectionState struct { Version uint16 // TLS version used by the connection (e.g. VersionTLS12) HandshakeComplete bool // TLS handshake is complete DidResume bool // connection resumes a previous TLS connection CipherSuite uint16 // cipher suite in use (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, ...) NegotiatedProtocol string // negotiated next protocol (not guaranteed to be from Config.NextProtos) NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server (client side only) ServerName string // server name requested by client, if any (server side only) PeerCertificates []*x509.Certificate // certificate chain presented by remote peer VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates SignedCertificateTimestamps [][]byte // SCTs from the peer, if any OCSPResponse []byte // stapled OCSP response from peer, if any Used0RTT bool // true if 0-RTT was both offered and accepted // ekm is a closure exposed via ExportKeyingMaterial. ekm func(label string, context []byte, length int) ([]byte, error) // TLSUnique contains the "tls-unique" channel binding value (see RFC // 5929, section 3). For resumed sessions this value will be nil // because resumption does not include enough context (see // https://mitls.org/pages/attacks/3SHAKE#channelbindings). This will // change in future versions of Go once the TLS master-secret fix has // been standardized and implemented. It is not defined in TLS 1.3. TLSUnique []byte } // ExportKeyingMaterial returns length bytes of exported key material in a new // slice as defined in RFC 5705. If context is nil, it is not used as part of // the seed. If the connection was set to allow renegotiation via // Config.Renegotiation, this function will return an error. func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) { return cs.ekm(label, context, length) } // ClientAuthType is tls.ClientAuthType type ClientAuthType = tls.ClientAuthType const ( NoClientCert ClientAuthType = iota RequestClientCert RequireAnyClientCert VerifyClientCertIfGiven RequireAndVerifyClientCert ) // requiresClientCert reports whether the ClientAuthType requires a client // certificate to be provided. func requiresClientCert(c ClientAuthType) bool { switch c { case RequireAnyClientCert, RequireAndVerifyClientCert: return true default: return false } } // ClientSessionState contains the state needed by clients to resume TLS // sessions. type ClientSessionState struct { sessionTicket []uint8 // Encrypted ticket used for session resumption with server vers uint16 // SSL/TLS version negotiated for the session cipherSuite uint16 // Ciphersuite negotiated for the session masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret serverCertificates []*x509.Certificate // Certificate chain presented by the server verifiedChains [][]*x509.Certificate // Certificate chains we built for verification receivedAt time.Time // When the session ticket was received from the server // TLS 1.3 fields. nonce []byte // Ticket nonce sent by the server, to derive PSK useBy time.Time // Expiration of the ticket lifetime as set by the server ageAdd uint32 // Random obfuscation factor for sending the ticket age } // ClientSessionCache is a cache of ClientSessionState objects that can be used // by a client to resume a TLS session with a given server. ClientSessionCache // implementations should expect to be called concurrently from different // goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not // SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which // are supported via this interface. type ClientSessionCache interface { // Get searches for a ClientSessionState associated with the given key. // On return, ok is true if one was found. Get(sessionKey string) (session *ClientSessionState, ok bool) // Put adds the ClientSessionState to the cache with the given key. It might // get called multiple times in a connection if a TLS 1.3 server provides // more than one session ticket. If called with a nil *ClientSessionState, // it should remove the cache entry. Put(sessionKey string, cs *ClientSessionState) } // SignatureScheme is a tls.SignatureScheme type SignatureScheme = tls.SignatureScheme const ( // RSASSA-PKCS1-v1_5 algorithms. PKCS1WithSHA256 SignatureScheme = 0x0401 PKCS1WithSHA384 SignatureScheme = 0x0501 PKCS1WithSHA512 SignatureScheme = 0x0601 // RSASSA-PSS algorithms with public key OID rsaEncryption. PSSWithSHA256 SignatureScheme = 0x0804 PSSWithSHA384 SignatureScheme = 0x0805 PSSWithSHA512 SignatureScheme = 0x0806 // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3. ECDSAWithP256AndSHA256 SignatureScheme = 0x0403 ECDSAWithP384AndSHA384 SignatureScheme = 0x0503 ECDSAWithP521AndSHA512 SignatureScheme = 0x0603 // EdDSA algorithms. Ed25519 SignatureScheme = 0x0807 // Legacy signature and hash algorithms for TLS 1.2. PKCS1WithSHA1 SignatureScheme = 0x0201 ECDSAWithSHA1 SignatureScheme = 0x0203 ) // A ClientHelloInfo is a tls.ClientHelloInfo type ClientHelloInfo = tls.ClientHelloInfo // The CertificateRequestInfo is a tls.CertificateRequestInfo type CertificateRequestInfo = tls.CertificateRequestInfo // RenegotiationSupport enumerates the different levels of support for TLS // renegotiation. TLS renegotiation is the act of performing subsequent // handshakes on a connection after the first. This significantly complicates // the state machine and has been the source of numerous, subtle security // issues. Initiating a renegotiation is not supported, but support for // accepting renegotiation requests may be enabled. // // Even when enabled, the server may not change its identity between handshakes // (i.e. the leaf certificate must be the same). Additionally, concurrent // handshake and application data flow is not permitted so renegotiation can // only be used with protocols that synchronise with the renegotiation, such as // HTTPS. // // Renegotiation is not defined in TLS 1.3. type RenegotiationSupport int const ( // RenegotiateNever disables renegotiation. RenegotiateNever RenegotiationSupport = iota // RenegotiateOnceAsClient allows a remote server to request // renegotiation once per connection. RenegotiateOnceAsClient // RenegotiateFreelyAsClient allows a remote server to repeatedly // request renegotiation. RenegotiateFreelyAsClient ) // A Config structure is used to configure a TLS client or server. // After one has been passed to a TLS function it must not be // modified. A Config may be reused; the tls package will also not // modify it. type Config struct { // Rand provides the source of entropy for nonces and RSA blinding. // If Rand is nil, TLS uses the cryptographic random reader in package // crypto/rand. // The Reader must be safe for use by multiple goroutines. Rand io.Reader // Time returns the current time as the number of seconds since the epoch. // If Time is nil, TLS uses time.Now. Time func() time.Time // Certificates contains one or more certificate chains to present to // the other side of the connection. Server configurations must include // at least one certificate or else set GetCertificate. Clients doing // client-authentication may set either Certificates or // GetClientCertificate. Certificates []Certificate // NameToCertificate maps from a certificate name to an element of // Certificates. Note that a certificate name can be of the form // '*.example.com' and so doesn't have to be a domain name as such. // See Config.BuildNameToCertificate // The nil value causes the first element of Certificates to be used // for all connections. NameToCertificate map[string]*Certificate // GetCertificate returns a Certificate based on the given // ClientHelloInfo. It will only be called if the client supplies SNI // information or if Certificates is empty. // // If GetCertificate is nil or returns nil, then the certificate is // retrieved from NameToCertificate. If NameToCertificate is nil, the // first element of Certificates will be used. GetCertificate func(*ClientHelloInfo) (*Certificate, error) // GetClientCertificate, if not nil, is called when a server requests a // certificate from a client. If set, the contents of Certificates will // be ignored. // // If GetClientCertificate returns an error, the handshake will be // aborted and that error will be returned. Otherwise // GetClientCertificate must return a non-nil Certificate. If // Certificate.Certificate is empty then no certificate will be sent to // the server. If this is unacceptable to the server then it may abort // the handshake. // // GetClientCertificate may be called multiple times for the same // connection if renegotiation occurs or if TLS 1.3 is in use. GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error) // GetConfigForClient, if not nil, is called after a ClientHello is // received from a client. It may return a non-nil Config in order to // change the Config that will be used to handle this connection. If // the returned Config is nil, the original Config will be used. The // Config returned by this callback may not be subsequently modified. // // If GetConfigForClient is nil, the Config passed to Server() will be // used for all connections. // // Uniquely for the fields in the returned Config, session ticket keys // will be duplicated from the original Config if not set. // Specifically, if SetSessionTicketKeys was called on the original // config but not on the returned config then the ticket keys from the // original config will be copied into the new config before use. // Otherwise, if SessionTicketKey was set in the original config but // not in the returned config then it will be copied into the returned // config before use. If neither of those cases applies then the key // material from the returned config will be used for session tickets. GetConfigForClient func(*ClientHelloInfo) (*Config, error) // VerifyPeerCertificate, if not nil, is called after normal // certificate verification by either a TLS client or server. It // receives the raw ASN.1 certificates provided by the peer and also // any verified chains that normal processing found. If it returns a // non-nil error, the handshake is aborted and that error results. // // If normal verification fails then the handshake will abort before // considering this callback. If normal verification is disabled by // setting InsecureSkipVerify, or (for a server) when ClientAuth is // RequestClientCert or RequireAnyClientCert, then this callback will // be considered but the verifiedChains argument will always be nil. VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error // RootCAs defines the set of root certificate authorities // that clients use when verifying server certificates. // If RootCAs is nil, TLS uses the host's root CA set. RootCAs *x509.CertPool // NextProtos is a list of supported application level protocols, in // order of preference. NextProtos []string // ServerName is used to verify the hostname on the returned // certificates unless InsecureSkipVerify is given. It is also included // in the client's handshake to support virtual hosting unless it is // an IP address. ServerName string // ClientAuth determines the server's policy for // TLS Client Authentication. The default is NoClientCert. ClientAuth ClientAuthType // ClientCAs defines the set of root certificate authorities // that servers use if required to verify a client certificate // by the policy in ClientAuth. ClientCAs *x509.CertPool // InsecureSkipVerify controls whether a client verifies the // server's certificate chain and host name. // If InsecureSkipVerify is true, TLS accepts any certificate // presented by the server and any host name in that certificate. // In this mode, TLS is susceptible to man-in-the-middle attacks. // This should be used only for testing. InsecureSkipVerify bool // CipherSuites is a list of supported cipher suites for TLS versions up to // TLS 1.2. If CipherSuites is nil, a default list of secure cipher suites // is used, with a preference order based on hardware performance. The // default cipher suites might change over Go versions. Note that TLS 1.3 // ciphersuites are not configurable. CipherSuites []uint16 // PreferServerCipherSuites controls whether the server selects the // client's most preferred ciphersuite, or the server's most preferred // ciphersuite. If true then the server's preference, as expressed in // the order of elements in CipherSuites, is used. PreferServerCipherSuites bool // SessionTicketsDisabled may be set to true to disable session ticket and // PSK (resumption) support. Note that on clients, session ticket support is // also disabled if ClientSessionCache is nil. SessionTicketsDisabled bool // SessionTicketKey is used by TLS servers to provide session resumption. // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled // with random data before the first server handshake. // // If multiple servers are terminating connections for the same host // they should all have the same SessionTicketKey. If the // SessionTicketKey leaks, previously recorded and future TLS // connections using that key might be compromised. SessionTicketKey [32]byte // ClientSessionCache is a cache of ClientSessionState entries for TLS // session resumption. It is only used by clients. ClientSessionCache ClientSessionCache // MinVersion contains the minimum SSL/TLS version that is acceptable. // If zero, then TLS 1.0 is taken as the minimum. MinVersion uint16 // MaxVersion contains the maximum SSL/TLS version that is acceptable. // If zero, then the maximum version supported by this package is used, // which is currently TLS 1.3. MaxVersion uint16 // CurvePreferences contains the elliptic curves that will be used in // an ECDHE handshake, in preference order. If empty, the default will // be used. The client will use the first preference as the type for // its key share in TLS 1.3. This may change in the future. CurvePreferences []CurveID // DynamicRecordSizingDisabled disables adaptive sizing of TLS records. // When true, the largest possible TLS record size is always used. When // false, the size of TLS records may be adjusted in an attempt to // improve latency. DynamicRecordSizingDisabled bool // Renegotiation controls what types of renegotiation are supported. // The default, none, is correct for the vast majority of applications. Renegotiation RenegotiationSupport // KeyLogWriter optionally specifies a destination for TLS master secrets // in NSS key log format that can be used to allow external programs // such as Wireshark to decrypt TLS connections. // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. // Use of KeyLogWriter compromises security and should only be // used for debugging. KeyLogWriter io.Writer // GetExtensions, if not nil, is called before a message that allows // sending of extensions is sent. // Currently only implemented for the ClientHello message (for the client) // and for the EncryptedExtensions message (for the server). // Only valid for TLS 1.3. GetExtensions func(handshakeMessageType uint8) []Extension // ReceivedExtensions, if not nil, is called when a message that allows the // inclusion of extensions is received. // It is called with an empty slice of extensions, if the message didn't // contain any extensions. // Currently only implemented for the ClientHello message (sent by the // client) and for the EncryptedExtensions message (sent by the server). // Only valid for TLS 1.3. ReceivedExtensions func(handshakeMessageType uint8, exts []Extension) serverInitOnce sync.Once // guards calling (*Config).serverInit // mutex protects sessionTicketKeys. mutex sync.RWMutex // sessionTicketKeys contains zero or more ticket keys. If the length // is zero, SessionTicketsDisabled must be true. The first key is used // for new tickets and any subsequent keys can be used to decrypt old // tickets. sessionTicketKeys []ticketKey // AlternativeRecordLayer is used by QUIC AlternativeRecordLayer RecordLayer // Enforce the selection of a supported application protocol. // Only works for TLS 1.3. // If enabled, client and server have to agree on an application protocol. // Otherwise, connection establishment fails. EnforceNextProtoSelection bool // If MaxEarlyData is greater than 0, the client will be allowed to send early // data when resuming a session. // Requires the AlternativeRecordLayer to be set. // // It has no meaning on the client. MaxEarlyData uint32 // The Accept0RTT callback is called when the client offers 0-RTT. // The server then has to decide if it wants to accept or reject 0-RTT. // It is only used for servers. Accept0RTT func(appData []byte) bool // 0RTTRejected is called when the server rejectes 0-RTT. // It is only used for clients. Rejected0RTT func() // If set, the client will export the 0-RTT key when resuming a session that // allows sending of early data. // Requires the AlternativeRecordLayer to be set. // // It has no meaning to the server. Enable0RTT bool } // A RecordLayer handles encrypting and decrypting of TLS messages. type RecordLayer interface { SetReadKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte) SetWriteKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte) ReadHandshakeMessage() ([]byte, error) WriteRecord([]byte) (int, error) SendAlert(uint8) } // ticketKeyNameLen is the number of bytes of identifier that is prepended to // an encrypted session ticket in order to identify the key used to encrypt it. const ticketKeyNameLen = 16 // ticketKey is the internal representation of a session ticket key. type ticketKey struct { // keyName is an opaque byte string that serves to identify the session // ticket key. It's exposed as plaintext in every session ticket. keyName [ticketKeyNameLen]byte aesKey [16]byte hmacKey [16]byte } // ticketKeyFromBytes converts from the external representation of a session // ticket key to a ticketKey. Externally, session ticket keys are 32 random // bytes and this function expands that into sufficient name and key material. func ticketKeyFromBytes(b [32]byte) (key ticketKey) { hashed := sha512.Sum512(b[:]) copy(key.keyName[:], hashed[:ticketKeyNameLen]) copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16]) copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32]) return key } // maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session // ticket, and the lifetime we set for tickets we send. const maxSessionTicketLifetime = 7 * 24 * time.Hour // Clone returns a shallow clone of c. It is safe to clone a Config that is // being used concurrently by a TLS client or server. func (c *Config) Clone() *Config { // Running serverInit ensures that it's safe to read // SessionTicketsDisabled. c.serverInitOnce.Do(func() { c.serverInit(nil) }) var sessionTicketKeys []ticketKey c.mutex.RLock() sessionTicketKeys = c.sessionTicketKeys c.mutex.RUnlock() return &Config{ Rand: c.Rand, Time: c.Time, Certificates: c.Certificates, NameToCertificate: c.NameToCertificate, GetCertificate: c.GetCertificate, GetClientCertificate: c.GetClientCertificate, GetConfigForClient: c.GetConfigForClient, VerifyPeerCertificate: c.VerifyPeerCertificate, RootCAs: c.RootCAs, NextProtos: c.NextProtos, ServerName: c.ServerName, ClientAuth: c.ClientAuth, ClientCAs: c.ClientCAs, InsecureSkipVerify: c.InsecureSkipVerify, CipherSuites: c.CipherSuites, PreferServerCipherSuites: c.PreferServerCipherSuites, SessionTicketsDisabled: c.SessionTicketsDisabled, SessionTicketKey: c.SessionTicketKey, ClientSessionCache: c.ClientSessionCache, MinVersion: c.MinVersion, MaxVersion: c.MaxVersion, CurvePreferences: c.CurvePreferences, DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, Renegotiation: c.Renegotiation, KeyLogWriter: c.KeyLogWriter, GetExtensions: c.GetExtensions, ReceivedExtensions: c.ReceivedExtensions, sessionTicketKeys: sessionTicketKeys, EnforceNextProtoSelection: c.EnforceNextProtoSelection, MaxEarlyData: c.MaxEarlyData, Enable0RTT: c.Enable0RTT, Accept0RTT: c.Accept0RTT, Rejected0RTT: c.Rejected0RTT, } } // serverInit is run under c.serverInitOnce to do initialization of c. If c was // returned by a GetConfigForClient callback then the argument should be the // Config that was passed to Server, otherwise it should be nil. func (c *Config) serverInit(originalConfig *Config) { if c.SessionTicketsDisabled || len(c.ticketKeys()) != 0 { return } alreadySet := false for _, b := range c.SessionTicketKey { if b != 0 { alreadySet = true break } } if !alreadySet { if originalConfig != nil { copy(c.SessionTicketKey[:], originalConfig.SessionTicketKey[:]) } else if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil { c.SessionTicketsDisabled = true return } } if originalConfig != nil { originalConfig.mutex.RLock() c.sessionTicketKeys = originalConfig.sessionTicketKeys originalConfig.mutex.RUnlock() } else { c.sessionTicketKeys = []ticketKey{ticketKeyFromBytes(c.SessionTicketKey)} } } func (c *Config) ticketKeys() []ticketKey { c.mutex.RLock() // c.sessionTicketKeys is constant once created. SetSessionTicketKeys // will only update it by replacing it with a new value. ret := c.sessionTicketKeys c.mutex.RUnlock() return ret } // SetSessionTicketKeys updates the session ticket keys for a server. The first // key will be used when creating new tickets, while all keys can be used for // decrypting tickets. It is safe to call this function while the server is // running in order to rotate the session ticket keys. The function will panic // if keys is empty. func (c *Config) SetSessionTicketKeys(keys [][32]byte) { if len(keys) == 0 { panic("tls: keys must have at least one key") } newKeys := make([]ticketKey, len(keys)) for i, bytes := range keys { newKeys[i] = ticketKeyFromBytes(bytes) } c.mutex.Lock() c.sessionTicketKeys = newKeys c.mutex.Unlock() } func (c *Config) rand() io.Reader { r := c.Rand if r == nil { return rand.Reader } return r } func (c *Config) time() time.Time { t := c.Time if t == nil { t = time.Now } return t() } func (c *Config) cipherSuites() []uint16 { s := c.CipherSuites if s == nil { s = defaultCipherSuites() } return s } var supportedVersions = []uint16{ VersionTLS13, VersionTLS12, VersionTLS11, VersionTLS10, VersionSSL30, } func (c *Config) supportedVersions(isClient bool) []uint16 { versions := make([]uint16, 0, len(supportedVersions)) for _, v := range supportedVersions { // TLS 1.0 is the default minimum version. if (c == nil || c.MinVersion == 0) && v < VersionTLS10 { continue } if c != nil && c.MinVersion != 0 && v < c.MinVersion { continue } if c != nil && c.MaxVersion != 0 && v > c.MaxVersion { continue } // TLS 1.0 is the minimum version supported as a client. if isClient && v < VersionTLS10 { continue } // TLS 1.3 is opt-out in Go 1.13. if v == VersionTLS13 && !isTLS13Supported() { continue } versions = append(versions, v) } return versions } // tls13Support caches the result for isTLS13Supported. var tls13Support struct { sync.Once cached bool } // isTLS13Supported returns whether the program enabled TLS 1.3 by not opting // out with GODEBUG=tls13=0. It's cached after the first execution. func isTLS13Supported() bool { return true tls13Support.Do(func() { tls13Support.cached = goDebugString("tls13") != "0" }) return tls13Support.cached } // goDebugString returns the value of the named GODEBUG key. // GODEBUG is of the form "key=val,key2=val2". func goDebugString(key string) string { s := os.Getenv("GODEBUG") for i := 0; i < len(s)-len(key)-1; i++ { if i > 0 && s[i-1] != ',' { continue } afterKey := s[i+len(key):] if afterKey[0] != '=' || s[i:i+len(key)] != key { continue } val := afterKey[1:] for i, b := range val { if b == ',' { return val[:i] } } return val } return "" } func (c *Config) maxSupportedVersion(isClient bool) uint16 { supportedVersions := c.supportedVersions(isClient) if len(supportedVersions) == 0 { return 0 } return supportedVersions[0] } // supportedVersionsFromMax returns a list of supported versions derived from a // legacy maximum version value. Note that only versions supported by this // library are returned. Any newer peer will use supportedVersions anyway. func supportedVersionsFromMax(maxVersion uint16) []uint16 { versions := make([]uint16, 0, len(supportedVersions)) for _, v := range supportedVersions { if v > maxVersion { continue } versions = append(versions, v) } return versions } var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521} func (c *Config) curvePreferences() []CurveID { if c == nil || len(c.CurvePreferences) == 0 { return defaultCurvePreferences } return c.CurvePreferences } // mutualVersion returns the protocol version to use given the advertised // versions of the peer. Priority is given to the peer preference order. func (c *Config) mutualVersion(isClient bool, peerVersions []uint16) (uint16, bool) { supportedVersions := c.supportedVersions(isClient) for _, peerVersion := range peerVersions { for _, v := range supportedVersions { if v == peerVersion { return v, true } } } return 0, false } // getCertificate returns the best certificate for the given ClientHelloInfo, // defaulting to the first element of c.Certificates. func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) { if c.GetCertificate != nil && (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) { cert, err := c.GetCertificate(clientHello) if cert != nil || err != nil { return cert, err } } if len(c.Certificates) == 0 { return nil, errors.New("tls: no certificates configured") } if len(c.Certificates) == 1 || c.NameToCertificate == nil { // There's only one choice, so no point doing any work. return &c.Certificates[0], nil } name := strings.ToLower(clientHello.ServerName) for len(name) > 0 && name[len(name)-1] == '.' { name = name[:len(name)-1] } if cert, ok := c.NameToCertificate[name]; ok { return cert, nil } // try replacing labels in the name with wildcards until we get a // match. labels := strings.Split(name, ".") for i := range labels { labels[i] = "*" candidate := strings.Join(labels, ".") if cert, ok := c.NameToCertificate[candidate]; ok { return cert, nil } } // If nothing matches, return the first certificate. return &c.Certificates[0], nil } // BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate // from the CommonName and SubjectAlternateName fields of each of the leaf // certificates. func (c *Config) BuildNameToCertificate() { c.NameToCertificate = make(map[string]*Certificate) for i := range c.Certificates { cert := &c.Certificates[i] x509Cert := cert.Leaf if x509Cert == nil { var err error x509Cert, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { continue } } if len(x509Cert.Subject.CommonName) > 0 { c.NameToCertificate[x509Cert.Subject.CommonName] = cert } for _, san := range x509Cert.DNSNames { c.NameToCertificate[san] = cert } } } const ( keyLogLabelTLS12 = "CLIENT_RANDOM" keyLogLabelEarlyTraffic = "CLIENT_EARLY_TRAFFIC_SECRET" keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET" keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET" keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0" keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0" ) func (c *Config) writeKeyLog(label string, clientRandom, secret []byte) error { if c.KeyLogWriter == nil { return nil } logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret)) writerMutex.Lock() _, err := c.KeyLogWriter.Write(logLine) writerMutex.Unlock() return err } // writerMutex protects all KeyLogWriters globally. It is rarely enabled, // and is only for debugging, so a global mutex saves space. var writerMutex sync.Mutex // A Certificate is a tls.Certificate type Certificate = tls.Certificate type handshakeMessage interface { marshal() []byte unmarshal([]byte) bool } // lruSessionCache is a ClientSessionCache implementation that uses an LRU // caching strategy. type lruSessionCache struct { sync.Mutex m map[string]*list.Element q *list.List capacity int } type lruSessionCacheEntry struct { sessionKey string state *ClientSessionState } // NewLRUClientSessionCache returns a ClientSessionCache with the given // capacity that uses an LRU strategy. If capacity is < 1, a default capacity // is used instead. func NewLRUClientSessionCache(capacity int) ClientSessionCache { const defaultSessionCacheCapacity = 64 if capacity < 1 { capacity = defaultSessionCacheCapacity } return &lruSessionCache{ m: make(map[string]*list.Element), q: list.New(), capacity: capacity, } } // Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry // corresponding to sessionKey is removed from the cache instead. func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) { c.Lock() defer c.Unlock() if elem, ok := c.m[sessionKey]; ok { if cs == nil { c.q.Remove(elem) delete(c.m, sessionKey) } else { entry := elem.Value.(*lruSessionCacheEntry) entry.state = cs c.q.MoveToFront(elem) } return } if c.q.Len() < c.capacity { entry := &lruSessionCacheEntry{sessionKey, cs} c.m[sessionKey] = c.q.PushFront(entry) return } elem := c.q.Back() entry := elem.Value.(*lruSessionCacheEntry) delete(c.m, entry.sessionKey) entry.sessionKey = sessionKey entry.state = cs c.q.MoveToFront(elem) c.m[sessionKey] = elem } // Get returns the ClientSessionState value associated with a given key. It // returns (nil, false) if no value is found. func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) { c.Lock() defer c.Unlock() if elem, ok := c.m[sessionKey]; ok { c.q.MoveToFront(elem) return elem.Value.(*lruSessionCacheEntry).state, true } return nil, false } // TODO(jsing): Make these available to both crypto/x509 and crypto/tls. type dsaSignature struct { R, S *big.Int } type ecdsaSignature dsaSignature var emptyConfig Config func defaultConfig() *Config { return &emptyConfig } var ( once sync.Once varDefaultCipherSuites []uint16 varDefaultCipherSuitesTLS13 []uint16 ) func defaultCipherSuites() []uint16 { once.Do(initDefaultCipherSuites) return varDefaultCipherSuites } func defaultCipherSuitesTLS13() []uint16 { once.Do(initDefaultCipherSuites) return varDefaultCipherSuitesTLS13 } func initDefaultCipherSuites() { var topCipherSuites []uint16 // Check the cpu flags for each platform that has optimized GCM implementations. // Worst case, these variables will just all be false. var ( hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL // Keep in sync with crypto/aes/cipher_s390x.go. // TODO: check for s390 // hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM) hasGCMAsmS390X = false hasGCMAsm = hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X ) // x/sys/cpu does not respect GODEBUG=cpu.all=off. As a workaround, // check it here. See https://github.com/golang/go/issues/33963 if strings.Contains(os.Getenv("GODEBUG"), "cpu.all=off") { hasGCMAsm = false } if hasGCMAsm { // If AES-GCM hardware is provided then prioritise AES-GCM // cipher suites. topCipherSuites = []uint16{ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, } varDefaultCipherSuitesTLS13 = []uint16{ TLS_AES_128_GCM_SHA256, TLS_CHACHA20_POLY1305_SHA256, TLS_AES_256_GCM_SHA384, } } else { // Without AES-GCM hardware, we put the ChaCha20-Poly1305 // cipher suites first. topCipherSuites = []uint16{ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, } varDefaultCipherSuitesTLS13 = []uint16{ TLS_CHACHA20_POLY1305_SHA256, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, } } varDefaultCipherSuites = make([]uint16, 0, len(cipherSuites)) varDefaultCipherSuites = append(varDefaultCipherSuites, topCipherSuites...) NextCipherSuite: for _, suite := range cipherSuites { if suite.flags&suiteDefaultOff != 0 { continue } for _, existing := range varDefaultCipherSuites { if existing == suite.id { continue NextCipherSuite } } varDefaultCipherSuites = append(varDefaultCipherSuites, suite.id) } } func unexpectedMessageError(wanted, got interface{}) error { return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted) } func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool { for _, s := range supportedSignatureAlgorithms { if s == sigAlg { return true } } return false } // signatureFromSignatureScheme maps a signature algorithm to the underlying // signature method (without hash function). func signatureFromSignatureScheme(signatureAlgorithm SignatureScheme) uint8 { switch signatureAlgorithm { case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512: return signaturePKCS1v15 case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512: return signatureRSAPSS case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512: return signatureECDSA case Ed25519: return signatureEd25519 default: return 0 } }
[ "\"GODEBUG\"", "\"GODEBUG\"" ]
[]
[ "GODEBUG" ]
[]
["GODEBUG"]
go
1
0
upup/pkg/fi/cloudup/template_functions.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /****************************************************************************** Template Functions are what map functions in the models, to internal logic in kops. This is the point where we connect static YAML configuration to dynamic runtime values in memory. When defining a new function: - Build the new function here - Define the new function in AddTo() dest["MyNewFunction"] = MyNewFunction // <-- Function Pointer ******************************************************************************/ package cloudup import ( "encoding/base64" "encoding/json" "fmt" "os" "path" "sort" "strconv" "strings" "text/template" "github.com/Masterminds/sprig/v3" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" kopscontrollerconfig "k8s.io/kops/cmd/kops-controller/pkg/config" "k8s.io/kops/pkg/apis/kops" apiModel "k8s.io/kops/pkg/apis/kops/model" "k8s.io/kops/pkg/apis/kops/util" "k8s.io/kops/pkg/dns" "k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/model" "k8s.io/kops/pkg/resources/spotinst" "k8s.io/kops/pkg/wellknownports" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" "k8s.io/kops/upup/pkg/fi/cloudup/gce" gcetpm "k8s.io/kops/upup/pkg/fi/cloudup/gce/tpm" "k8s.io/kops/util/pkg/env" "sigs.k8s.io/yaml" ) // TemplateFunctions provides a collection of methods used throughout the templates type TemplateFunctions struct { model.KopsModelContext cloud fi.Cloud } // AddTo defines the available functions we can use in our YAML models. // If we are trying to get a new function implemented it MUST // be defined here. func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretStore) (err error) { cluster := tf.Cluster dest["SharedVPC"] = tf.SharedVPC dest["ToJSON"] = tf.ToJSON dest["ToYAML"] = tf.ToYAML dest["UseBootstrapTokens"] = tf.UseBootstrapTokens // Remember that we may be on a different arch from the target. Hard-code for now. dest["replace"] = func(s, find, replace string) string { return strings.Replace(s, find, replace, -1) } dest["join"] = func(a []string, sep string) string { return strings.Join(a, sep) } sprigTxtFuncMap := sprig.TxtFuncMap() dest["nindent"] = sprigTxtFuncMap["nindent"] dest["indent"] = sprigTxtFuncMap["indent"] dest["contains"] = sprigTxtFuncMap["contains"] dest["trimPrefix"] = sprigTxtFuncMap["trimPrefix"] dest["semverCompare"] = sprigTxtFuncMap["semverCompare"] dest["ClusterName"] = tf.ClusterName dest["WithDefaultBool"] = func(v *bool, defaultValue bool) bool { if v != nil { return *v } return defaultValue } dest["GetInstanceGroup"] = tf.GetInstanceGroup dest["GetNodeInstanceGroups"] = tf.GetNodeInstanceGroups dest["HasHighlyAvailableControlPlane"] = tf.HasHighlyAvailableControlPlane dest["ControlPlaneControllerReplicas"] = tf.ControlPlaneControllerReplicas dest["APIServerNodeRole"] = tf.APIServerNodeRole dest["CloudTags"] = tf.CloudTagsForInstanceGroup dest["KubeDNS"] = func() *kops.KubeDNSConfig { return cluster.Spec.KubeDNS } dest["NodeLocalDNSClusterIP"] = func() string { if cluster.Spec.KubeProxy.ProxyMode == "ipvs" { return cluster.Spec.KubeDNS.ServerIP } return "__PILLAR__CLUSTER__DNS__" } dest["NodeLocalDNSHealthCheck"] = func() string { return fmt.Sprintf("%d", wellknownports.NodeLocalDNSHealthCheck) } dest["KopsControllerArgv"] = tf.KopsControllerArgv dest["KopsControllerConfig"] = tf.KopsControllerConfig dest["DnsControllerArgv"] = tf.DNSControllerArgv dest["ExternalDnsArgv"] = tf.ExternalDNSArgv dest["CloudControllerConfigArgv"] = tf.CloudControllerConfigArgv // TODO: Only for GCE? dest["EncodeGCELabel"] = gce.EncodeGCELabel dest["Region"] = func() string { return tf.Region } // will return openstack external ccm image location for current kubernetes version dest["OpenStackCCMTag"] = tf.OpenStackCCMTag dest["ProxyEnv"] = tf.ProxyEnv dest["KopsSystemEnv"] = tf.KopsSystemEnv dest["UseKopsControllerForNodeBootstrap"] = func() bool { return tf.UseKopsControllerForNodeBootstrap() } dest["DO_TOKEN"] = func() string { return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN") } if featureflag.Spotinst.Enabled() { if creds, err := spotinst.LoadCredentials(); err == nil { dest["SpotinstToken"] = func() string { return creds.Token } dest["SpotinstAccount"] = func() string { return creds.Account } dest["SpotinstTokenBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Token)) } dest["SpotinstAccountBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Account)) } } } if cluster.Spec.Networking != nil && cluster.Spec.Networking.AmazonVPC != nil { c := cluster.Spec.Networking.AmazonVPC dest["AmazonVpcEnvVars"] = func() map[string]string { envVars := map[string]string{ "AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER": "false", } for _, e := range c.Env { envVars[e.Name] = e.Value } return envVars } } if cluster.Spec.Networking != nil && cluster.Spec.Networking.Calico != nil { c := cluster.Spec.Networking.Calico dest["CalicoIPv4PoolIPIPMode"] = func() string { if c.EncapsulationMode != "ipip" { return "Never" } if c.IPIPMode != "" { return c.IPIPMode } if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderOpenstack { return "Always" } return "CrossSubnet" } dest["CalicoIPv4PoolVXLANMode"] = func() string { if c.EncapsulationMode != "vxlan" { return "Never" } if c.VXLANMode != "" { return c.VXLANMode } return "CrossSubnet" } } if cluster.Spec.Networking != nil && cluster.Spec.Networking.Cilium != nil { ciliumsecretString := "" ciliumsecret, _ := secretStore.Secret("ciliumpassword") if ciliumsecret != nil { ciliumsecretString, err = ciliumsecret.AsString() if err != nil { return err } klog.V(4).Info("Cilium secret function successfully registered") } dest["CiliumSecret"] = func() string { return ciliumsecretString } } if cluster.Spec.Networking != nil && cluster.Spec.Networking.Flannel != nil { flannelBackendType := cluster.Spec.Networking.Flannel.Backend if flannelBackendType == "" { klog.Warningf("Defaulting flannel backend to udp (not a recommended configuration)") flannelBackendType = "udp" } dest["FlannelBackendType"] = func() string { return flannelBackendType } } if cluster.Spec.Networking != nil && cluster.Spec.Networking.Weave != nil { weavesecretString := "" weavesecret, _ := secretStore.Secret("weavepassword") if weavesecret != nil { weavesecretString, err = weavesecret.AsString() if err != nil { return err } klog.V(4).Info("Weave secret function successfully registered") } dest["WeaveSecret"] = func() string { return weavesecretString } } dest["CloudLabels"] = func() string { labels := []string{ fmt.Sprintf("KubernetesCluster=%s", cluster.ObjectMeta.Name), } for n, v := range cluster.Spec.CloudLabels { labels = append(labels, fmt.Sprintf("%s=%s", n, v)) } // ensure stable sorting of tags sort.Strings(labels) return strings.Join(labels, ",") } dest["IsIPv6Only"] = tf.IsIPv6Only dest["UseServiceAccountExternalPermissions"] = tf.UseServiceAccountExternalPermissions if cluster.Spec.NodeTerminationHandler != nil { dest["DefaultQueueName"] = func() string { s := strings.Replace(tf.ClusterName(), ".", "-", -1) domain := ".amazonaws.com/" if strings.Contains(tf.Region, "cn-") { domain = ".amazonaws.com.cn/" } url := "https://sqs." + tf.Region + domain + tf.AWSAccountID + "/" + s + "-nth" return url } dest["EnableSQSTerminationDraining"] = func() bool { return *cluster.Spec.NodeTerminationHandler.EnableSQSTerminationDraining } } return nil } // ToJSON returns a json representation of the struct or on error an empty string func (tf *TemplateFunctions) ToJSON(data interface{}) string { encoded, err := json.Marshal(data) if err != nil { return "" } return string(encoded) } // ToYAML returns a yaml representation of the struct or on error an empty string func (tf *TemplateFunctions) ToYAML(data interface{}) string { encoded, err := yaml.Marshal(data) if err != nil { return "" } return string(encoded) } // SharedVPC is a simple helper function which makes the templates for a shared VPC clearer func (tf *TemplateFunctions) SharedVPC() bool { return tf.Cluster.SharedVPC() } // GetInstanceGroup returns the instance group with the specified name func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, error) { ig := tf.KopsModelContext.FindInstanceGroup(name) if ig == nil { return nil, fmt.Errorf("InstanceGroup %q not found", name) } return ig, nil } // ControlPlaneControllerReplicas returns the amount of replicas for a controllers that should run in the cluster // If the cluster has a highly available control plane, this function will return 2, if it has 1 control plane node, it will return 1 func (tf *TemplateFunctions) ControlPlaneControllerReplicas() int { if tf.HasHighlyAvailableControlPlane() { return 2 } return 1 } func (tf *TemplateFunctions) APIServerNodeRole() string { if featureflag.APIServerNodes.Enabled() { return "node-role.kubernetes.io/api-server" } return "node-role.kubernetes.io/master" } // HasHighlyAvailableControlPlane returns true of the cluster has more than one control plane node. False otherwise. func (tf *TemplateFunctions) HasHighlyAvailableControlPlane() bool { cp := 0 for _, ig := range tf.InstanceGroups { if ig.Spec.Role == kops.InstanceGroupRoleMaster { cp++ if cp > 1 { return true } } } return false } // CloudControllerConfigArgv returns the args to external cloud controller func (tf *TemplateFunctions) CloudControllerConfigArgv() ([]string, error) { cluster := tf.Cluster if cluster.Spec.ExternalCloudControllerManager == nil { return nil, fmt.Errorf("ExternalCloudControllerManager is nil") } var argv []string if cluster.Spec.ExternalCloudControllerManager.Master != "" { argv = append(argv, fmt.Sprintf("--master=%s", cluster.Spec.ExternalCloudControllerManager.Master)) } if cluster.Spec.ExternalCloudControllerManager.LogLevel != 0 { argv = append(argv, fmt.Sprintf("--v=%d", cluster.Spec.ExternalCloudControllerManager.LogLevel)) } else { argv = append(argv, "--v=2") } if cluster.Spec.ExternalCloudControllerManager.CloudProvider != "" { argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.ExternalCloudControllerManager.CloudProvider)) } else if cluster.Spec.CloudProvider != "" { argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.CloudProvider)) } else { return nil, fmt.Errorf("Cloud Provider is not set") } if cluster.Spec.ExternalCloudControllerManager.ClusterName != "" { argv = append(argv, fmt.Sprintf("--cluster-name=%s", cluster.Spec.ExternalCloudControllerManager.ClusterName)) } if cluster.Spec.ExternalCloudControllerManager.ClusterCIDR != "" { argv = append(argv, fmt.Sprintf("--cluster-cidr=%s", cluster.Spec.ExternalCloudControllerManager.ClusterCIDR)) } if cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs != nil { argv = append(argv, fmt.Sprintf("--allocate-node-cidrs=%t", *cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs)) } if cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes != nil { argv = append(argv, fmt.Sprintf("--configure-cloud-routes=%t", *cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes)) } if cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != nil && *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != "" { argv = append(argv, fmt.Sprintf("--cidr-allocator-type=%s", *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType)) } if cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials != nil { argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", *cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials)) } else { argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", true)) } argv = append(argv, "--cloud-config=/etc/kubernetes/cloud.config") return argv, nil } // DNSControllerArgv returns the args to the DNS controller func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) { cluster := tf.Cluster var argv []string argv = append(argv, "/dns-controller") // @check if the dns controller has custom configuration if cluster.Spec.ExternalDNS == nil { argv = append(argv, []string{"--watch-ingress=false"}...) klog.V(4).Infof("watch-ingress=false set on dns-controller") } else { // @check if the watch ingress is set var watchIngress bool if cluster.Spec.ExternalDNS.WatchIngress != nil { watchIngress = fi.BoolValue(cluster.Spec.ExternalDNS.WatchIngress) } if watchIngress { klog.Warningln("--watch-ingress=true set on dns-controller") klog.Warningln("this may cause problems with previously defined services: https://github.com/kubernetes/kops/issues/2496") } argv = append(argv, fmt.Sprintf("--watch-ingress=%t", watchIngress)) if cluster.Spec.ExternalDNS.WatchNamespace != "" { argv = append(argv, fmt.Sprintf("--watch-namespace=%s", cluster.Spec.ExternalDNS.WatchNamespace)) } } if dns.IsGossipHostname(cluster.Spec.MasterInternalName) { argv = append(argv, "--dns=gossip") // Configuration specifically for the DNS controller gossip if cluster.Spec.DNSControllerGossipConfig != nil { if cluster.Spec.DNSControllerGossipConfig.Protocol != nil { argv = append(argv, "--gossip-protocol="+*cluster.Spec.DNSControllerGossipConfig.Protocol) } if cluster.Spec.DNSControllerGossipConfig.Listen != nil { argv = append(argv, "--gossip-listen="+*cluster.Spec.DNSControllerGossipConfig.Listen) } if cluster.Spec.DNSControllerGossipConfig.Secret != nil { argv = append(argv, "--gossip-secret="+*cluster.Spec.DNSControllerGossipConfig.Secret) } if cluster.Spec.DNSControllerGossipConfig.Seed != nil { argv = append(argv, "--gossip-seed="+*cluster.Spec.DNSControllerGossipConfig.Seed) } else { argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh)) } if cluster.Spec.DNSControllerGossipConfig.Secondary != nil { if cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol != nil { argv = append(argv, "--gossip-protocol-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol) } if cluster.Spec.DNSControllerGossipConfig.Secondary.Listen != nil { argv = append(argv, "--gossip-listen-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Listen) } if cluster.Spec.DNSControllerGossipConfig.Secondary.Secret != nil { argv = append(argv, "--gossip-secret-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Secret) } if cluster.Spec.DNSControllerGossipConfig.Secondary.Seed != nil { argv = append(argv, "--gossip-seed-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Seed) } else { argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist)) } } } else { // Default to primary mesh and secondary memberlist argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh)) argv = append(argv, "--gossip-protocol-secondary=memberlist") argv = append(argv, fmt.Sprintf("--gossip-listen-secondary=0.0.0.0:%d", wellknownports.DNSControllerGossipMemberlist)) argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist)) } } else { switch kops.CloudProviderID(cluster.Spec.CloudProvider) { case kops.CloudProviderAWS: if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") { argv = append(argv, "--dns=gossip") } else { argv = append(argv, "--dns=aws-route53") } case kops.CloudProviderGCE: argv = append(argv, "--dns=google-clouddns") case kops.CloudProviderDO: argv = append(argv, "--dns=digitalocean") default: return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider) } } zone := cluster.Spec.DNSZone if zone != "" { if strings.Contains(zone, ".") { // match by name argv = append(argv, "--zone="+zone) } else { // match by id argv = append(argv, "--zone=*/"+zone) } } if cluster.Spec.IsIPv6Only() { argv = append(argv, "--internal-ipv6") } else { argv = append(argv, "--internal-ipv4") } // permit wildcard updates argv = append(argv, "--zone=*/*") // Verbose, but not crazy logging argv = append(argv, "-v=2") return argv, nil } // KopsControllerConfig returns the yaml configuration for kops-controller func (tf *TemplateFunctions) KopsControllerConfig() (string, error) { cluster := tf.Cluster config := &kopscontrollerconfig.Options{ Cloud: cluster.Spec.CloudProvider, ConfigBase: cluster.Spec.ConfigBase, } if featureflag.CacheNodeidentityInfo.Enabled() { config.CacheNodeidentityInfo = true } if tf.UseKopsControllerForNodeBootstrap() { certNames := []string{"kubelet", "kubelet-server"} signingCAs := []string{fi.CertificateIDCA} if apiModel.UseCiliumEtcd(cluster) { certNames = append(certNames, "etcd-client-cilium") signingCAs = append(signingCAs, "etcd-clients-ca-cilium") } if cluster.Spec.KubeProxy.Enabled == nil || *cluster.Spec.KubeProxy.Enabled { certNames = append(certNames, "kube-proxy") } if cluster.Spec.Networking.Kuberouter != nil { certNames = append(certNames, "kube-router") } pkiDir := "/etc/kubernetes/kops-controller/pki" config.Server = &kopscontrollerconfig.ServerOptions{ Listen: fmt.Sprintf(":%d", wellknownports.KopsControllerPort), ServerCertificatePath: path.Join(pkiDir, "kops-controller.crt"), ServerKeyPath: path.Join(pkiDir, "kops-controller.key"), CABasePath: pkiDir, SigningCAs: signingCAs, CertNames: certNames, } switch kops.CloudProviderID(cluster.Spec.CloudProvider) { case kops.CloudProviderAWS: nodesRoles := sets.String{} for _, ig := range tf.InstanceGroups { if ig.Spec.Role == kops.InstanceGroupRoleNode || ig.Spec.Role == kops.InstanceGroupRoleAPIServer { profile, err := tf.LinkToIAMInstanceProfile(ig) if err != nil { return "", fmt.Errorf("getting profile for ig %s: %v", ig.Name, err) } // The IAM Instance Profile has not been created at this point if it is not specified. // Because the IAM Instance Profile and the IAM Role are created in IAMModelBuilder tasks. // Therefore, the IAM Role associated with IAM Instance Profile is acquired only when it is not specified. if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil { c := tf.cloud.(awsup.AWSCloud) roles, err := awsup.GetRolesInInstanceProfile(c, *profile.Name) if err != nil { return "", fmt.Errorf("getting role from profile %s: %v", *profile.Name, err) } nodesRoles.Insert(roles...) } else { // When the IAM Instance Profile is not specified, IAM Instance Profile is created by kOps. // In this case, the IAM Instance Profile name and IAM Role name are same. // So there is no problem even if IAM Instance Profile name is inserted as role name in nodesRoles. nodesRoles.Insert(*profile.Name) } } } config.Server.Provider.AWS = &awsup.AWSVerifierOptions{ NodesRoles: nodesRoles.List(), Region: tf.Region, } case kops.CloudProviderGCE: c := tf.cloud.(gce.GCECloud) config.Server.Provider.GCE = &gcetpm.TPMVerifierOptions{ ProjectID: c.Project(), ClusterName: tf.ClusterName(), Region: tf.Region, MaxTimeSkew: 300, } default: return "", fmt.Errorf("unsupported cloud provider %s", cluster.Spec.CloudProvider) } } if tf.Cluster.Spec.IsKopsControllerIPAM() { config.EnableCloudIPAM = true } // To avoid indentation problems, we marshal as json. json is a subset of yaml b, err := json.Marshal(config) if err != nil { return "", fmt.Errorf("failed to serialize kops-controller config: %v", err) } return string(b), nil } // KopsControllerArgv returns the args to kops-controller func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) { var argv []string argv = append(argv, "/kops-controller") // Verbose, but not excessive logging argv = append(argv, "--v=2") argv = append(argv, "--conf=/etc/kubernetes/kops-controller/config/config.yaml") return argv, nil } func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) { cluster := tf.Cluster externalDNS := tf.Cluster.Spec.ExternalDNS var argv []string cloudProvider := cluster.Spec.CloudProvider switch kops.CloudProviderID(cloudProvider) { case kops.CloudProviderAWS: argv = append(argv, "--provider=aws") case kops.CloudProviderGCE: project := cluster.Spec.Project argv = append(argv, "--provider=google") argv = append(argv, "--google-project="+project) default: return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider) } argv = append(argv, "--events") if fi.BoolValue(externalDNS.WatchIngress) { argv = append(argv, "--source=ingress") } argv = append(argv, "--source=pod") argv = append(argv, "--source=service") argv = append(argv, "--compatibility=kops-dns-controller") argv = append(argv, "--registry=txt") argv = append(argv, "--txt-owner-id=kops-"+tf.ClusterName()) argv = append(argv, "--zone-id-filter="+tf.Cluster.Spec.DNSZone) if externalDNS.WatchNamespace != "" { argv = append(argv, "--namespace="+externalDNS.WatchNamespace) } return argv, nil } func (tf *TemplateFunctions) ProxyEnv() map[string]string { cluster := tf.Cluster envs := map[string]string{} proxies := cluster.Spec.EgressProxy if proxies == nil { return envs } httpProxy := proxies.HTTPProxy if httpProxy.Host != "" { var portSuffix string if httpProxy.Port != 0 { portSuffix = ":" + strconv.Itoa(httpProxy.Port) } else { portSuffix = "" } url := "http://" + httpProxy.Host + portSuffix envs["http_proxy"] = url envs["https_proxy"] = url } if proxies.ProxyExcludes != "" { envs["no_proxy"] = proxies.ProxyExcludes envs["NO_PROXY"] = proxies.ProxyExcludes } return envs } // KopsSystemEnv builds the env vars for a system component func (tf *TemplateFunctions) KopsSystemEnv() []corev1.EnvVar { envMap := env.BuildSystemComponentEnvVars(&tf.Cluster.Spec) return envMap.ToEnvVars() } // OpenStackCCM returns OpenStack external cloud controller manager current image // with tag specified to k8s version func (tf *TemplateFunctions) OpenStackCCMTag() string { var tag string parsed, err := util.ParseKubernetesVersion(tf.Cluster.Spec.KubernetesVersion) if err != nil { tag = "latest" } else { if parsed.Minor == 13 { // The bugfix release tag = "1.13.1" } else { // otherwise we use always .0 ccm image, if needed that can be overrided using clusterspec tag = fmt.Sprintf("v%d.%d.0", parsed.Major, parsed.Minor) } } return tag } // GetNodeInstanceGroups returns a map containing the defined instance groups of role "Node". func (tf *TemplateFunctions) GetNodeInstanceGroups() map[string]kops.InstanceGroupSpec { nodegroups := make(map[string]kops.InstanceGroupSpec) for _, ig := range tf.KopsModelContext.InstanceGroups { if ig.Spec.Role == kops.InstanceGroupRoleNode { nodegroups[ig.ObjectMeta.Name] = ig.Spec } } return nodegroups }
[ "\"DIGITALOCEAN_ACCESS_TOKEN\"", "\"AWS_REGION\"" ]
[]
[ "DIGITALOCEAN_ACCESS_TOKEN", "AWS_REGION" ]
[]
["DIGITALOCEAN_ACCESS_TOKEN", "AWS_REGION"]
go
2
0
setup.py
#!/usr/bin/env python import os import sys from setuptools import setup, Extension from setuptools.command.test import test as TestCommand # setuptools DWIM monkey-patch madness # http://mail.python.org/pipermail/distutils-sig/2007-September/thread.html#8204 if 'setuptools.extension' in sys.modules: m = sys.modules['setuptools.extension'] m.Extension.__dict__ = m._Extension.__dict__ options = {} if os.environ.get('LIBRARY_DIRS'): options['library_dirs'] = [os.environ['LIBRARY_DIRS']] if os.environ.get('INCLUDE_DIRS'): options['include_dirs'] = [os.environ['INCLUDE_DIRS']] # support python setup.py test # http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = ['tests'] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args or []) sys.exit(errno) setup(name="python-libmemcached", version="1.0", description="python memcached client wrapped on libmemcached", maintainer="Qiangning Hong", maintainer_email="[email protected]", setup_requires=['setuptools_cython'], install_requires=['Cython>=0.18'], ext_modules=[Extension('cmemcached_imp', ['cmemcached_imp.pyx', 'split_mc.c'], libraries=['memcached'], **options)], py_modules=['cmemcached'], cmdclass={'test': PyTest}, tests_require=['pytest', 'mock'], )
[]
[]
[ "INCLUDE_DIRS", "LIBRARY_DIRS" ]
[]
["INCLUDE_DIRS", "LIBRARY_DIRS"]
python
2
0
lib/metaphlan2/metaphlan2Server.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import os import random as _random import sys import traceback from getopt import getopt, GetoptError from multiprocessing import Process from os import environ from wsgiref.simple_server import make_server import requests as _requests from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from biokbase import log from metaphlan2.authclient import KBaseAuth as _KBaseAuth try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'metaphlan2'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from metaphlan2.metaphlan2Impl import metaphlan2 # noqa @IgnorePep8 impl_metaphlan2 = metaphlan2(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if len(e.args) == 1: newerr.data = repr(e.args[0]) else: newerr.data = repr(e.args) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if 'types' in self.method_data[request['method']]: self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'metaphlan2' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_metaphlan2.run_metaphlan2, name='metaphlan2.run_metaphlan2', types=[dict]) self.method_authentication['metaphlan2.run_metaphlan2'] = 'required' # noqa self.rpc_service.add(impl_metaphlan2.status, name='metaphlan2.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'metaphlan2 ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception as e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print('Request method was %s\n' % environ['REQUEST_METHOD']) # print('Environment dictionary is:\n%s\n' % pprint.pformat(environ)) # print('Request body was: %s' % request_body) # print('Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result)) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body.encode('utf8')] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print("Monkeypatching std libraries for async") from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print("Listening on port %s" % port) if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print("Host set to %s" % host) else: assert False, "unhandled option" start_server(host=host, port=port) # print("Listening on port %s" % port) # httpd = make_server( host, port, application) # # httpd.serve_forever()
[]
[]
[ "SDK_CALLBACK_URL" ]
[]
["SDK_CALLBACK_URL"]
python
1
0
class6/collateral/ssh_config_file.py
#!/usr/bin/env python import os from getpass import getpass from netmiko import ConnectHandler # Code so automated tests will run properly password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass() cisco3 = { "device_type": "cisco_ios", "host": "cisco3.lasthop.io", "username": "pyclass", "password": password, "ssh_config_file": "~/.ssh/ssh_config", } with ConnectHandler(**cisco3) as net_connect: output = net_connect.send_command("show users") print(output)
[]
[]
[ "NETMIKO_PASSWORD" ]
[]
["NETMIKO_PASSWORD"]
python
1
0
graph/encoding/doc.go
// Copyright ©2017 The Gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package encoding provides a common graph encoding API. package encoding // import "github.com/savalin/gonum/graph/encoding"
[]
[]
[]
[]
[]
go
null
null
null
project-templates/mysql/libunix/libunix_test.go
package libunix import ( "os" "testing" ) func TestCurrentUser(t *testing.T) { userEnv := os.Getenv("USER") username, err := CurrentUser() if userEnv != "" && err != nil { t.Fatalf("If $USER is not blank, error should not happen. Error: %v", err) } if userEnv != username { t.Errorf("Fetched the wrong username. $USER: %v, username: %v", userEnv, username) } }
[ "\"USER\"" ]
[]
[ "USER" ]
[]
["USER"]
go
1
0
examples/stock/price/go/getAStockPrice.go
package example import ( "fmt" "os" "github.com/micro/services/clients/go/stock" ) // Get the last price for a given stock ticker func GetAstockPrice() { stockService := stock.NewStockService(os.Getenv("MICRO_API_TOKEN")) rsp, err := stockService.Price(&stock.PriceRequest{ Symbol: "AAPL", }) fmt.Println(rsp, err) }
[ "\"MICRO_API_TOKEN\"" ]
[]
[ "MICRO_API_TOKEN" ]
[]
["MICRO_API_TOKEN"]
go
1
0
ISPy/img/denoising.py
import numpy as np import os import sys import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) # To deactivate warnings: https://github.com/tensorflow/tensorflow/issues/7778 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # import keras.backend.tensorflow_backend as ktf from keras.layers import Input, Conv2D, UpSampling2D, Concatenate, MaxPooling2D from keras.models import Model from tensorflow.keras.layers import Layer # from keras.engine.topology import Layer from tensorflow.keras.layers import InputSpec # from keras.engine import InputSpec from keras.utils import conv_utils import tensorflow as tf # ================================================================================== def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return tf.pad(x, pattern, "REFLECT") # ================================================================================== class ReflectionPadding2D(Layer): """Reflection-padding layer for 2D input (e.g. picture). This layer can add rows and columns or zeros at the top, bottom, left and right side of an image tensor. """ def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ReflectionPadding2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return (input_shape[0], input_shape[1], rows, cols) elif self.data_format == 'channels_last': if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return (input_shape[0], rows, cols, input_shape[3]) def call(self, inputs): return spatial_reflection_2d_padding(inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ReflectionPadding2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) # ================================================================================== def unet(start_ch=32, depth=2, activation='relu', input_channel_num=1, out_ch=1, inc_rate=2., dropout=0.0, batchnorm=False, maxpool=True, upconv=True, residual=False): # UNet: code from https://github.com/pietz/unet-keras def _conv_block(m, dim, acti, bn, res, do=0): n = ReflectionPadding2D()(m) n = Conv2D(dim, 3, padding='valid', kernel_initializer='he_normal', activation=acti)(n) n = ReflectionPadding2D()(n) n = Conv2D(dim, 3, padding='valid', kernel_initializer='he_normal', activation=acti)(n) return Concatenate()([m, n]) if res else n def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res): if depth > 0: n = _conv_block(m, dim, acti, bn, res) m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n) m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res) if up: m = UpSampling2D()(m) m = Conv2D(dim, 2, activation=acti, padding='same', kernel_initializer='he_normal')( m) else: m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m) n = Concatenate()([n, m]) m = _conv_block(n, dim, acti, bn, res) else: m = _conv_block(m, dim, acti, bn, res, do) return m i = Input(shape=(None, None, input_channel_num)) o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual) o = Conv2D(out_ch, 1)(o) return Model(inputs=i, outputs=o) # ================================================================================= class deep_network(object): def __init__(self): self.network_type = 'network' self.nfilter = 32 this_dir, this_filename = os.path.split(__file__) DATA_PATH = os.path.join(this_dir, "../data/{0}_denoising_weights.hdf5".format(self.network_type)) self.model = unet(start_ch=self.nfilter) print("==> Setting up the network: loading {0}_weights.hdf5".format(self.network_type)) self.model.load_weights(DATA_PATH) def define_network(self, image): self.image = image self.nx = image.shape[1] self.ny = image.shape[2] def predict(self, numerotime): input_validation = np.zeros((self.image.shape[0], self.nx, self.ny, 1), dtype='float32') input_validation[:, :, :, 0] = self.image # start = time.time() out = self.model.predict(input_validation) # end = time.time() # print("Prediction took {0:3.2} seconds...".format(end-start)) return self.image[:, :, :] - out[:, :, :, 0] # ================================================================================= def predict_image(model, image, numerotime, split): # Patch for big images in keras if split is True: tamano = image.shape[1] index = int(tamano / 2) cindex = int(index / 4. + 2) * int(4) cindex_minor = int(index / 4. + 0) * int(4) ciclo = np.zeros_like(image, dtype=np.float32) # First part: print('(1/2)', end='') image1 = image[:, :cindex, :] model.define_network(image=np.nan_to_num(image1)) ciclo1 = model.predict(numerotime) ciclo[:, :cindex_minor, :] = ciclo1[:, :cindex_minor, :] print('(2/2)', end='') image1 = image[:, -cindex:, :] model.define_network(image=np.nan_to_num(image1)) ciclo2 = model.predict(numerotime) ciclo[:, -cindex_minor:, :] = ciclo2[:, -cindex_minor:, :] else: ciclo = model.predict(numerotime) return ciclo # ================================================================================= def neural_network(input_data, niterations=2, scale=None, plotOption=False, test_Option=False, split=False): """ Run the denoising neural network algorithm developed in the paper https://arxiv.org/abs/1908.02815 Parameters ---------- input_data: 5D ndarray data cube niterations : int, optional number of times the neural network, cleaning the residuals scale : float, optional normalization to match training values. The std of the normalized data has to be around 1e-3 plotOption : bool, optional some plots to check the quality of the denoising. test_Option : bool, optional crop the input data to perform a quick check. split : bool, optional run the network splitting the data into multiple partitions (for big files) Returns ------- cube_array: ndarray 5D cube of shape [nt,ns,nw,nx,ny] after performing the denoising. Examples -------- >>> from ISPy.img import denoising >>> from ISPy.io import solarnet >>> # Reading data: >>> data_input = solarnet.read('filename.fits') >>> ouput_name = 'test.fits' >>> output_file = denoising.neural_network(data_input, niterations = 2, scale=-1.0, plotOption = True, test_Option=True, split=True) >>> solarnet.write(ouput_name, output_file) :Authors: Carlos Diaz (ISP/SU 2019) """ print(input_data.shape) if test_Option is True: # To do some tests input_data = input_data[:1, :, :1, :, :] print('==> Cropping data to perform tests.') nt, ns, nw, nx, ny = input_data.shape if scale is not None and scale > 0.0: sc = np.copy(scale) else: # Calculating scale: noise_s = [] for ii in range(input_data.shape[2]): noise_s.append(np.std(input_data[0, 1, ii, :, :])) noise_s.append(np.std(input_data[0, 2, ii, :, :])) noise_s.append(np.std(input_data[0, 3, ii, :, :])) sc = 1. / (np.min(noise_s) / 1e-3) print('==> Scale factor (so noise is 1e-3) = {}'.format(sc)) new_output = np.zeros_like(input_data, dtype=np.float32) model = deep_network() if plotOption is True: import os if not os.path.exists('images'): os.makedirs('images') if test_Option is True: nt = 1 else: nt = input_data.shape[0] stokes_label = ['I', 'Q', 'U', 'V'] for istokes in [1, 2, 3]: # print('==> Denoising Stokes '+stokes_label[istokes]) for jj in range(nt): print('==> Denoising all wavelengths of Stokes ' + stokes_label[istokes], end='') print(', time_frame:', jj, end='') print(", iter: 0", end='') sys.stdout.flush() input0 = input_data[jj, istokes, :, :int(input_data.shape[3] / 4.) * int(4), :int(input_data.shape[4] / 4.) * int(4)] * sc numerotime = str(jj) + '_s' + stokes_label[istokes] model.define_network(image=np.nan_to_num(input0)) # ciclo = out.predict(numerotime) ciclo = predict_image(model, input0, numerotime, split) for i in range(niterations): print(', ' + str(i + 1), end='') sys.stdout.flush() model.define_network(image=ciclo) # ciclo = out.predict(numerotime) ciclo = predict_image(model, input0, numerotime, split) print() if plotOption is True: medio = 3 * 2.6e-3 lambdai = 0 import matplotlib.pyplot as plt plt.figure(figsize=(12, 6)) plt.subplot(131) plt.title('Original - lambda:' + str(lambdai)) plt.imshow(input0[lambdai, :, :], cmap='seismic', origin='lower', interpolation='None', vmin=-medio, vmax=+medio) plt.subplot(132) plt.title('Clean image') plt.imshow(input0[lambdai, :, :] - ciclo[lambdai, :, :], cmap='seismic', vmin=-medio, vmax=+medio, origin='lower', interpolation='None') plt.subplot(133) plt.title('Difference') plt.imshow(ciclo[lambdai, :, :], cmap='seismic', vmin=-medio, vmax=+medio, origin='lower', interpolation='None') plt.savefig('images/output_t' + str(numerotime) + '_i' + str(niterations) + '.pdf', bbox_inches='tight') if input0.shape[0] > 1.5: lambdai = input0.shape[0] // 2 - 1 plt.figure(figsize=(12, 6)) plt.subplot(131) plt.title('Original - lambda:' + str(lambdai)) plt.imshow(input0[lambdai, :, :], cmap='seismic', origin='lower', interpolation='None', vmin=-medio, vmax=+medio) plt.subplot(132) plt.title('Clean image') plt.imshow(input0[lambdai, :, :] - ciclo[lambdai, :, :], cmap='seismic', vmin=-medio, vmax=+medio, origin='lower', interpolation='None') plt.subplot(133) plt.title('Difference') plt.imshow(ciclo[lambdai, :, :], cmap='seismic', vmin=-medio, vmax=+medio, origin='lower', interpolation='None') plt.savefig( 'images/outputB_t' + str(numerotime) + '_i' + str(niterations) + '.pdf', bbox_inches='tight') output0 = (input0[:, :, :] - ciclo[:, :, :]) / sc # Copying the original data new_output[jj, istokes, :, :, :] = input_data[jj, istokes, :, :, :] # Changing the new output new_output[jj, istokes, :, :int(input_data.shape[3] / 4.) * int(4), :int(input_data.shape[4] / 4.) * int(4)] = output0 # We do not clean Stokes I new_output[:, 0, :, :, :] = input_data[:, 0, :, :, :] print('All done') # To avoid the TF_DeleteStatus message: # https://github.com/tensorflow/tensorflow/issues/3388 # ktf.clear_session() return new_output
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
backend/eataboutown_34327/wsgi.py
""" WSGI config for eataboutown_34327 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eataboutown_34327.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.dev") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main()
[]
[]
[]
[]
[]
python
0
0
cli/start.go
package cli /* This is just a test app to demonstrate basic usage of the securityspy library. */ // XXX: Export most of the methods. Make this more library-like. import ( "fmt" "log" "os" "strconv" "strings" "time" flg "github.com/spf13/pflag" "golift.io/securityspy" "golift.io/securityspy/server" "golift.io/version" ) const ( mebibyte = 1024 * 1024 waitTime = 10 * time.Second ) // Config represents the CLI args + securityspy.Server. type Config struct { UseSSL bool User string Pass string URL string Cmd string Arg string Server *securityspy.Server } // Errors. var ( ErrInvalidCommand = fmt.Errorf("invalid command") ErrDisconnected = fmt.Errorf("disconnected") ) // Start gets the app going. func Start() error { //nolint:cyclop config := parseFlags() switch config.Cmd { // Demonstrates event channels. Events always happen in order. // Do not block the channel or things stop working. case "events", "event", "e": return config.handleEvents() case "callbacks", "callback", "call", "l": // Demonstrates event callbacks. Sometimes they fire out of order. // They happen in a go routine, so they can be blocking operations. server := config.getServer() server.Encoder = "/usr/local/bin/ffmpeg" fmt.Println("Watching Event Stream (all events, forever)") config.Server.Events.BindFunc(securityspy.EventAllEvents, config.showEvent) config.Server.Events.Watch(waitTime, true) case "cameras", "cams", "cam", "c": config.printCamData() case "video", "vid", "v": config.saveVideo() case "picture", "pic", "p": config.savePicture() case "trigger", "t": config.triggerMotion() case "files", "file", "f": config.showFiles() case "download", "d": config.downloadFile() case "ptz", "z": config.controlPTZ() case "arm", "a": config.armEverything() default: flg.Usage() return fmt.Errorf("%w: %s", ErrInvalidCommand, config.Cmd) } return nil } func (c *Config) handleEvents() error { channel := make(chan securityspy.Event) fmt.Println("Watching Event Stream (specific events, until disconnect)") c.getServer() c.Server.Events.BindChan(securityspy.EventStreamDisconnect, channel) c.Server.Events.BindChan(securityspy.EventStreamConnect, channel) c.Server.Events.BindChan(securityspy.EventMotionDetected, channel) c.Server.Events.BindChan(securityspy.EventOnline, channel) c.Server.Events.BindChan(securityspy.EventOffline, channel) go c.Server.Events.Watch(waitTime, true) for event := range channel { c.showEvent(event) if event.Type == securityspy.EventStreamDisconnect { c.Server.Events.UnbindAll() c.Server.Events.Stop(true) return fmt.Errorf("%w: exiting", ErrDisconnected) } } return nil } // Turn CLI flags into a config struct. func parseFlags() *Config { config := &Config{} flg.Usage = func() { fmt.Println("Usage: secspy [--user <user>] [--pass <pass>] [--url <url>] [-c <cmd>] [-a <arg>]") flg.PrintDefaults() } flg.StringVarP(&config.User, "user", "u", os.Getenv("SECSPY_USERNAME"), "Username to authenticate with") flg.StringVarP(&config.Pass, "pass", "p", os.Getenv("SECSPY_PASSWORD"), "Password to authenticate with") flg.StringVarP(&config.URL, "url", "U", "http://127.0.0.1:8000", "SecuritySpy URL") flg.BoolVarP(&config.UseSSL, "verify-ssl", "s", false, "Validate SSL certificate if using https") flg.StringVarP(&config.Cmd, "command", "c", "", "Command to run. Currently supports: events/callback, cams, pic, vid, trigger, files, download, ptz, arm") flg.StringVarP(&config.Arg, "arg", "a", "", "if cmd supports an argument, pass it here. ie. -c pic -a Porch:/tmp/filename.jpg") ver := flg.BoolP("version", "v", false, "Print the version and exit") if flg.Parse(); *ver { fmt.Printf("secspy v%s\n", version.Version) os.Exit(0) // don't run anything else. } return config } // getServer makes, saves and returns a securitypy handle. func (c *Config) getServer() *securityspy.Server { var err error if c.Server, err = securityspy.New(&server.Config{ Username: c.User, Password: c.Pass, URL: c.URL, VerifySSL: c.UseSSL, }); err != nil { fmt.Println("SecuritySpy Error:", err) os.Exit(1) } scripts, _ := c.Server.GetScripts() // These each do another web request. sounds, _ := c.Server.GetSounds() fmt.Printf("%v %v @ %v (http://%v:%v/) %d cameras, %d scripts, %d sounds, %d schedules, %d schedule presets\n", c.Server.Info.Name, c.Server.Info.Version, c.Server.Info.CurrentTime, c.Server.Info.IP1, c.Server.Info.HTTPPort, len(c.Server.Cameras.All()), len(scripts), len(sounds), len(c.Server.Info.ServerSchedules), len(c.Server.Info.SchedulePresets)) return c.Server } func (c *Config) triggerMotion() { if c.Arg == "" { fmt.Println("Triggers motion on a camera.") fmt.Println("Supply a camera name with -a <cam>[,<cam>][,<cam>]") fmt.Println("Example: secspy -c trigger -a Door,Gate") fmt.Println("See camera names with -c cams") os.Exit(1) } srv := c.getServer() for _, arg := range strings.Split(c.Arg, ",") { if cam := srv.Cameras.ByName(arg); cam == nil { fmt.Println("Camera does not exist:", arg) continue } else if err := cam.TriggerMotion(); err != nil { fmt.Printf("Error Triggering Motion for camera '%v': %v", arg, err) continue } fmt.Println("Triggered Motion for Camera:", arg) } } // showEvent is a callback function fired by the event watcher in securityspy library. func (c *Config) showEvent(e securityspy.Event) { camString := "No Camera" // Always check Camera interface for nil. if e.Camera != nil { camString = "Camera " + strconv.Itoa(e.Camera.Number) + ": " + e.Camera.Name } else if e.ID < 0 { camString = "SecuritySpy Server" } fmt.Printf("[%v] Event %d: %v, %v, Msg: (errors: %d) %v\n", e.When, e.ID, e.String(), camString, len(e.Errors), e.Msg) } // printCamData formats camera data onto a screen for an operator. func (c *Config) printCamData() { for _, camera := range c.getServer().Cameras.All() { fmt.Printf("%2v: %-14v (%-4vx%-4v %5v/%-7v %v) connected: %3v, down %v, modes: C:%-8v M:%-8v A:%-8v "+ "%2vFPS, Audio:%3v, MD: %3v/pre:%v/post:%3v idle %-10v Script: %v (reset %v)\n", camera.Number, camera.Name, camera.Width, camera.Height, camera.DeviceName, camera.DeviceType, camera.Address, camera.Connected.Val, camera.TimeSinceLastFrame.String(), camera.ModeC.Txt, camera.ModeM.Txt, camera.ModeA.Txt+",", int(camera.CurrentFPS), camera.HasAudio.Txt, camera.MDenabled.Txt, camera.MDpreCapture.String(), camera.MDpostCapture.String(), camera.TimeSinceLastMotion.String(), camera.ActionScriptName, camera.ActionResetTime.String()) } } func (c *Config) savePicture() { if c.Arg == "" || !strings.Contains(c.Arg, ":") { fmt.Println("Saves a single still JPEG image from a camera.") fmt.Println("Supply a camera name and file path with -a <cam>:<path>") fmt.Println("Example: secspy -c pic -a Porch:/tmp/Porch.jpg") fmt.Println("See camera names with -c cams") os.Exit(1) } split := strings.Split(c.Arg, ":") cam := c.getServer().Cameras.ByName(split[0]) if cam == nil { fmt.Println("Camera does not exist:", split[0]) os.Exit(1) } else if err := cam.SaveJPEG(&securityspy.VidOps{}, split[1]); err != nil { fmt.Printf("Error Saving Image for camera '%v' to file '%v': %v\n", cam.Name, split[1], err) os.Exit(1) } fmt.Printf("Image for camera '%v' saved to: %v\n", cam.Name, split[1]) } func (c *Config) saveVideo() { if c.Arg == "" || !strings.Contains(c.Arg, ":") { fmt.Println("Saves a 10 second video from a camera.") fmt.Println("Supply a camera name and file path with -a <cam>:<path>") fmt.Println("Example: secspy -c pic -a Gate:/tmp/Gate.mov") fmt.Println("See camera names with -c cams") os.Exit(1) } split := strings.Split(c.Arg, ":") cam := c.getServer().Cameras.ByName(split[0]) if cam == nil { fmt.Println("Camera does not exist:", split[0]) os.Exit(1) } else if err := cam.SaveVideo(&securityspy.VidOps{}, waitTime, 9999999999, split[1]); err != nil { fmt.Printf("Error Saving Video for camera '%v' to file '%v': %v\n", cam.Name, split[1], err) os.Exit(1) } fmt.Printf("10 Second video for camera '%v' saved to: %v\n", cam.Name, split[1]) } func (c *Config) showFiles() { if c.Arg == "" { fmt.Println("Shows last files captured by securityspy") fmt.Println("Supply camera names and file age with -a <cam>,<cam>:<days old>") fmt.Println("Example: secspy -c files -a Porch,Gate:10") fmt.Println("See camera names with -c cams") os.Exit(1) } daysOld := 14 srv := c.getServer() cameraNums := []int{} split := strings.Split(c.Arg, ":") if len(split) > 1 { daysOld, _ = strconv.Atoi(split[1]) if daysOld < 1 { daysOld = 14 } } // Loop the provided camera names and find their numbers. for _, name := range strings.Split(split[0], ",") { cam := srv.Cameras.ByName(name) if cam == nil { fmt.Println("Camera does not exist:", name) continue } cameraNums = append(cameraNums, cam.Number) } age := time.Now().Add(-time.Duration(daysOld) * 24 * time.Hour) // nolint:durationcheck files, err := srv.Files.GetAll(cameraNums, age, time.Now()) if err != nil { fmt.Println("Received error from Files.All() method:", err) } fmt.Printf("Found %d files. From %v to %v:\n", len(files), age.Format("01/02/2006"), time.Now().Format("01/02/2006")) for _, file := range files { camName := "<no camera>" if file.Camera != nil { camName = file.Camera.Name } fmt.Printf("[%v] %v %v: '%v' (%vMB)\n", file.Updated, camName, file.Link.Type, file.Title, file.Link.Length/mebibyte) } } func (c *Config) downloadFile() { if c.Arg == "" || !strings.Contains(c.Arg, ":") { fmt.Println("Downloads a saved media file from SecuritySpy.") fmt.Println("Supply file name and save-path with -a 'filename:path'") fmt.Println("Example: secspy -c download -a '01-19-2019 00-01-23 M Porch.m4v:/tmp/file.m4v'") fmt.Println("See file names with -c files") os.Exit(1) } srv := c.getServer() fileName := strings.Split(c.Arg, ":")[0] savePath := strings.Split(c.Arg, ":")[1] if _, err := os.Stat(savePath); !os.IsNotExist(err) { fmt.Println("File already exists:", savePath) os.Exit(1) } file, err := srv.Files.GetFile(fileName) if err != nil { fmt.Println("Error getting file:", err) os.Exit(1) } size, err := file.Save(savePath) if err != nil { fmt.Println("Error writing file:", err) os.Exit(1) } fmt.Println("File saved to:", savePath, "->", size/mebibyte, "MiB") } func (c *Config) controlPTZ() { if c.Arg == "" || !strings.Contains(c.Arg, ":") { fmt.Println("Controls Camera PTZ.") fmt.Println("Supply the Camera and action with -a 'Camera:action'") fmt.Println("Example: secspy -c z -a 'Door Cam:Home'") fmt.Println("Actions: Home, Up, Down, Left, Right, In, Out, Preset1 .. Preset8") os.Exit(1) } srv := c.getServer() splitStr := strings.Split(c.Arg, ":") command := strings.ToLower(splitStr[1]) camera := srv.Cameras.ByName(splitStr[0]) if camera == nil { fmt.Println("camera not found:", splitStr[0]) os.Exit(1) } if err := c.handlePTZCommand(camera, command); err != nil { fmt.Println(err) os.Exit(1) } fmt.Println(command, "command sent to", camera.Name) } func (c *Config) handlePTZCommand(camera *securityspy.Camera, command string) error { //nolint:cyclop var err error switch command { case "home": err = camera.PTZ.Home() case "up": err = camera.PTZ.Up() case "down": err = camera.PTZ.Down() case "left": err = camera.PTZ.Left() case "right": err = camera.PTZ.Right() case "in": err = camera.PTZ.Zoom(true) case "out": err = camera.PTZ.Zoom(false) case "preset1": err = camera.PTZ.Preset(securityspy.PTZpreset1) case "preset2": err = camera.PTZ.Preset(securityspy.PTZpreset2) case "preset3": err = camera.PTZ.Preset(securityspy.PTZpreset3) case "preset4": err = camera.PTZ.Preset(securityspy.PTZpreset4) case "preset5": err = camera.PTZ.Preset(securityspy.PTZpreset5) case "preset6": err = camera.PTZ.Preset(securityspy.PTZpreset6) case "preset7": err = camera.PTZ.Preset(securityspy.PTZpreset7) case "preset8": err = camera.PTZ.Preset(securityspy.PTZpreset8) default: err = fmt.Errorf("%w: %s", ErrInvalidCommand, command) } if err != nil { return fmt.Errorf("ptz error: %w", err) } return nil } func (c *Config) armEverything() { if c.Arg == "" { fmt.Println("Arms all modes on a camera.") fmt.Println("Supply the Camera with -a 'Camera'") fmt.Println("Example: secspy -c a -a 'Door Cam'") os.Exit(1) } srv := c.getServer() splitStr := strings.Split(c.Arg, ":") camera := srv.Cameras.ByName(splitStr[0]) schedules := srv.Info.ServerSchedules mode := securityspy.CameraModeAll // or CameraModeMotion, CameraModeActions, CameraModeContinous for id, schedule := range schedules { if id == 1 { // 1 is always Arm 24/7, and 0 is Unarm 24/7. if err := camera.SetSchedule(mode, id); err != nil { log.Fatal("Error Setting Camera Schedule:", err) } fmt.Println(schedule, "-> schedule set on camera:", camera.Name) break } } /* Another way */ // schedule := securityspy.Schedule{ID: 1, Name: "Always Armed"} // 1 is always Arm 24/7 // if err := camera.SetSchedule(securityspy.CameraModeAll, schedule); err != nil { // log.Fatal("Error Setting Camera Schedule:", err) // } for id, override := range srv.Info.ScheduleOverrides { if override == "None" { if err := camera.SetScheduleOverride(mode, id); err != nil { log.Fatal("Error Setting Schedule Override:", err) } fmt.Println("Set Override:", override, "-> on camera:", camera.Name) break } } }
[ "\"SECSPY_USERNAME\"", "\"SECSPY_PASSWORD\"" ]
[]
[ "SECSPY_USERNAME", "SECSPY_PASSWORD" ]
[]
["SECSPY_USERNAME", "SECSPY_PASSWORD"]
go
2
0
src/cnn_semrel.py
""" This file was extensively rewritten from the sentence CNN code at https://github.com/yoonkim/CNN_sentence by Yoon Kim """ from cnn_classes import LeNetConvPoolLayer, MLPDropout from file_util import get_file_list __author__= ["Yuan Luo ([email protected])", "Simon Suster"] import cPickle import numpy as np from collections import defaultdict, OrderedDict import os os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32" import theano import theano.tensor as T import re import warnings import sys import time import stats_util as su warnings.filterwarnings("ignore") htrp_rel = {'TrIP': 1, 'TrWP': 2, 'TrCP': 3, 'TrAP': 4, 'TrNAP': 5, 'None': 0} inv_htrp_rel = {i: name for name, i in htrp_rel.items()} htep_rel = {'TeRP': 1, 'TeCP': 2, 'None': 0} inv_htep_rel = {i: name for name, i in htep_rel.items()} hpp_rel = {'PIP': 1, 'None': 0} inv_hpp_rel = {i: name for name, i in hpp_rel.items()} #different non-linearities def ReLU(x): y = T.maximum(0.0, x) return(y) def ELU(x): y = T.nnet.elu(x) return (y) def Sigmoid(x): y = T.nnet.sigmoid(x) return(y) def Tanh(x): y = T.tanh(x) return(y) def Iden(x): y = x return(y) def make_rel_hash(drel): hrel = {} for rel in drel: hrel[rel['iid']] = rel return hrel; def train_conv_net(datasets, rel_tr, rel_te, rel_de, hlen, U, # yluo: embedding matrix fnres, img_w=300, filter_hs=[3,4,5], hidden_units=[100,2], # hidden_units[1] is number of classes dropout_rate=[0.5], shuffle_batch=True, n_epochs=25, batch_size=50, # yluo: how many sentences to extract to compute gradient lr_decay = 0.95, conv_non_linear="relu", activations=[Iden], sqr_norm_lim=9, non_static=True, relname=None ): """ Train a simple conv net img_h = sentence length (padded where necessary) img_w = word vector length (300 for word2vec) filter_hs = filter window sizes hidden_units = [x,y] x is the number of feature maps (per filter window), and y is the penultimate layer sqr_norm_lim = s^2 in the paper lr_decay = adadelta decay parameter """ hrel_tr = make_rel_hash(rel_tr) hrel_te = make_rel_hash(rel_te) hrel_de = make_rel_hash(rel_de) rng = np.random.RandomState() img_h_tot = len(datasets[0][0])-2 # SS: exclude 2 dimensions: (iid, y). compa1 and compa2 are included pad = max(filter_hs) - 1 filter_w = img_w # yluo: what does different feature maps correspond to? feature_maps = hidden_units[0] filter_shapes = [] for filter_h in filter_hs: # yluo: what does 1 in the filter shape mean? # (number of filters, num input feature maps, filter height, filter width) # how to interpet different filters? filter_shapes.append((feature_maps, 1, filter_h, filter_w)) parameters = [("image shape",img_h_tot,img_w), ("filter shape",filter_shapes), ("hidden_units",hidden_units), ("dropout", dropout_rate), ("batch_size",batch_size), ("non_static", non_static), ("learn_decay",lr_decay), ("conv_non_linear", conv_non_linear), ("non_static", non_static), ("sqr_norm_lim",sqr_norm_lim), ("shuffle_batch",shuffle_batch)] print parameters #define model architecture index = T.lscalar() # x = T.matrix('x') c1 = T.matrix('c1') c2 = T.matrix('c2') prec = T.matrix('prec') mid = T.matrix('mid') succ = T.matrix('succ') y = T.ivector('y') iid = T.vector('iid') compa1 = T.vector('compa1') # compatibility1 of c1/c2 compa2 = T.vector('compa2') # compatibility2 of c1/c2 semclass1 = T.vector('semclass1') # semclass of a "predicate" semclass2 = T.vector('semclass2') # semclass of a "predicate" semclass3 = T.vector('semclass3') # semclass of a "predicate" semclass4 = T.vector('semclass4') # semclass of a "predicate" semclass5 = T.vector('semclass5') # semclass of a "predicate" #pr = theano.printing.Print("COMPA")(compa) Words = theano.shared(value = U, name = "Words") zero_vec_tensor = T.vector() zero_vec = np.zeros(img_w) set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))], allow_input_downcast=True) c1_input = Words[T.cast(c1.flatten(),dtype="int32")].reshape((c1.shape[0],1,c1.shape[1],Words.shape[1])) # reshape to 3d array # Words[T.cast(c1.flatten(),dtype="int32")] >>> len c1 flattened*emb_dim # c1_input >>> n_insts * 1 * n_ws_per_inst * emb_dim c2_input = Words[T.cast(c2.flatten(),dtype="int32")].reshape((c2.shape[0],1,c2.shape[1],Words.shape[1])) # reshape to 3d array prec_input = Words[T.cast(prec.flatten(),dtype="int32")].reshape((prec.shape[0],1,prec.shape[1],Words.shape[1])) # reshape to 3d array mid_input = Words[T.cast(mid.flatten(),dtype="int32")].reshape((mid.shape[0],1,mid.shape[1],Words.shape[1])) # reshape to 3d array succ_input = Words[T.cast(succ.flatten(),dtype="int32")].reshape((succ.shape[0],1,succ.shape[1],Words.shape[1])) # reshape to 3d array layer0_input = {'c1':c1_input, 'c2':c2_input, 'prec':prec_input, 'mid':mid_input, 'succ':succ_input} conv_layers = [] layer1_inputs = [] for i in xrange(len(filter_hs)): for seg in hlen.keys(): # used hlen as a global var, to fix filter_shape = filter_shapes[i] img_h = hlen[seg]+2*pad pool_size = (img_h-filter_h+1, img_w-filter_w+1) conv_layer = LeNetConvPoolLayer(rng, input=layer0_input[seg], image_shape=(batch_size, 1, img_h, img_w), filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear) layer1_input = conv_layer.output.flatten(2) # yluo: 2 dimensions >>> conv_layers.append(conv_layer) # yluo: layer 0 layer1_inputs.append(layer1_input) # yluo: 3 dimensions layer1_input = T.concatenate(layer1_inputs,1) # yluo: 2 dimensions >>> n_insts * concat_dim? layer1_input = T.horizontal_stack(layer1_input, compa1.reshape((compa1.shape[0], 1)), compa2.reshape((compa2.shape[0], 1)), semclass1.reshape((semclass1.shape[0], 1)), semclass2.reshape((semclass2.shape[0], 1)), semclass3.reshape((semclass3.shape[0], 1)), semclass4.reshape((semclass4.shape[0], 1)), semclass5.reshape((semclass5.shape[0], 1))) hidden_units[0] = feature_maps*len(filter_hs)*len(hlen)+2+5 # compa: plus 2 (we have two compa feats); semclass: plus 5 classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate) #define parameters of the model and update functions using adadelta params = classifier.params for conv_layer in conv_layers: params += conv_layer.params if non_static: #if word vectors are allowed to change, add them as model parameters params += [Words] cost = classifier.negative_log_likelihood(y) dropout_cost = classifier.dropout_negative_log_likelihood(y) grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim) #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate , stochastic gradient descent #extra data (at random) tr_size = datasets[0].shape[0] de_size = datasets[2].shape[0] hi_seg = datasets[3] print(hi_seg) c1s, c1e = hi_seg['c1']; c2s, c2e = hi_seg['c2']; mids, mide = hi_seg['mid'] precs, prece = hi_seg['prec']; succs, succe = hi_seg['succ'] yi = hi_seg['y']; idi = hi_seg['iid'] compa1i = hi_seg['compa1'] compa2i = hi_seg['compa2'] semclass1i = hi_seg['semclass1'] semclass2i = hi_seg['semclass2'] semclass3i = hi_seg['semclass3'] semclass4i = hi_seg['semclass4'] semclass5i = hi_seg['semclass5'] if tr_size % batch_size > 0: extra_data_num = batch_size - tr_size % batch_size train_set = rng.permutation(datasets[0]) extra_data = train_set[:extra_data_num] new_data=np.append(datasets[0],extra_data,axis=0) else: new_data = datasets[0] new_data = rng.permutation(new_data) n_batches = new_data.shape[0]/batch_size #n_train_batches = int(np.round(n_batches*0.9)) n_train_batches = n_batches if de_size % batch_size > 0: extra_data_num = batch_size - de_size % batch_size dev_set = rng.permutation(datasets[2]) extra_data = dev_set[:extra_data_num] new_data_de = np.append(datasets[2],extra_data,axis=0) else: new_data_de = datasets[2] new_data_de = rng.permutation(new_data_de) n_dev_batches = new_data_de.shape[0]/batch_size #divide train set into train/val sets c1_te = datasets[1][:, c1s:c1e] c2_te = datasets[1][:, c2s:c2e] prec_te = datasets[1][:, precs:prece] mid_te = datasets[1][:, mids:mide] succ_te = datasets[1][:, succs:succe] test_set = datasets[1] y_te = np.asarray(test_set[:,yi],"int32") compa1_te = np.asarray(test_set[:, compa1i], "float32") compa2_te = np.asarray(test_set[:, compa2i], "float32") semclass1_te = np.asarray(test_set[:, semclass1i], "float32") semclass2_te = np.asarray(test_set[:, semclass2i], "float32") semclass3_te = np.asarray(test_set[:, semclass3i], "float32") semclass4_te = np.asarray(test_set[:, semclass4i], "float32") semclass5_te = np.asarray(test_set[:, semclass5i], "float32") train_set = new_data[:n_train_batches*batch_size,:] dev_set = new_data_de[:n_dev_batches*batch_size:,:] x_tr, y_tr = shared_dataset((train_set[:,:img_h_tot], train_set[:,-1])) x_de, y_de = shared_dataset((dev_set[:,:img_h_tot], dev_set[:,-1])) iid_tr = train_set[:,idi].flatten() iid_de = dev_set[:,idi].flatten() iid_te = test_set[:,idi].flatten() print('len iid_de %d' % (len(iid_de))) #compile theano functions to get train/val/test errors dev_model = theano.function([index], classifier.preds(y), givens={ c1: x_de[index*batch_size: (index+1)*batch_size, c1s:c1e], c2: x_de[index*batch_size: (index+1)*batch_size, c2s:c2e], prec: x_de[index*batch_size: (index+1)*batch_size, precs:prece], mid: x_de[index*batch_size: (index+1)*batch_size, mids:mide], succ: x_de[index*batch_size: (index+1)*batch_size, succs:succe], compa1: x_de[index*batch_size: (index+1)*batch_size, compa1i], compa2: x_de[index * batch_size: (index + 1) * batch_size, compa2i], semclass1: x_de[index * batch_size: (index + 1) * batch_size, semclass1i], semclass2: x_de[index * batch_size: (index + 1) * batch_size, semclass2i], semclass3: x_de[index * batch_size: (index + 1) * batch_size, semclass3i], semclass4: x_de[index * batch_size: (index + 1) * batch_size, semclass4i], semclass5: x_de[index * batch_size: (index + 1) * batch_size, semclass5i], y: y_de[index*batch_size: (index+1)*batch_size], }, allow_input_downcast=True, on_unused_input='warn') # this test_model is batch test model for train test_model = theano.function([index], classifier.errors(y), givens={ c1: x_tr[index*batch_size: (index+1)*batch_size, c1s:c1e], c2: x_tr[index*batch_size: (index+1)*batch_size, c2s:c2e], prec: x_tr[index*batch_size: (index+1)*batch_size, precs:prece], mid: x_tr[index*batch_size: (index+1)*batch_size, mids:mide], succ: x_tr[index*batch_size: (index+1)*batch_size, succs:succe], compa1: x_tr[index * batch_size: (index + 1) * batch_size, compa1i], compa2: x_tr[index * batch_size: (index + 1) * batch_size, compa2i], semclass1: x_tr[index * batch_size: (index + 1) * batch_size, semclass1i], semclass2: x_tr[index * batch_size: (index + 1) * batch_size, semclass2i], semclass3: x_tr[index * batch_size: (index + 1) * batch_size, semclass3i], semclass4: x_tr[index * batch_size: (index + 1) * batch_size, semclass4i], semclass5: x_tr[index * batch_size: (index + 1) * batch_size, semclass5i], y: y_tr[index*batch_size: (index+1)*batch_size]}, allow_input_downcast=True) train_model = theano.function([index], cost, updates=grad_updates, givens={ c1: x_tr[index*batch_size: (index+1)*batch_size, c1s:c1e], c2: x_tr[index*batch_size: (index+1)*batch_size, c2s:c2e], prec: x_tr[index*batch_size: (index+1)*batch_size, precs:prece], mid: x_tr[index*batch_size: (index+1)*batch_size, mids:mide], succ: x_tr[index*batch_size: (index+1)*batch_size, succs:succe], compa1: x_tr[index * batch_size: (index + 1) * batch_size, compa1i], compa2: x_tr[index * batch_size: (index + 1) * batch_size, compa2i], semclass1: x_tr[index * batch_size: (index + 1) * batch_size, semclass1i], semclass2: x_tr[index * batch_size: (index + 1) * batch_size, semclass2i], semclass3: x_tr[index * batch_size: (index + 1) * batch_size, semclass3i], semclass4: x_tr[index * batch_size: (index + 1) * batch_size, semclass4i], semclass5: x_tr[index * batch_size: (index + 1) * batch_size, semclass5i], y: y_tr[index*batch_size: (index+1)*batch_size]}, allow_input_downcast = True) test_pred_layers = [] test_size = len(y_te) c1_te_input = Words[T.cast(c1.flatten(),dtype="int32")].reshape((c1_te.shape[0],1,c1_te.shape[1],Words.shape[1])) c2_te_input = Words[T.cast(c2.flatten(),dtype="int32")].reshape((c2_te.shape[0],1,c2_te.shape[1],Words.shape[1])) prec_te_input = Words[T.cast(prec.flatten(),dtype="int32")].reshape((prec_te.shape[0],1,prec_te.shape[1],Words.shape[1])) mid_te_input = Words[T.cast(mid.flatten(),dtype="int32")].reshape((mid_te.shape[0],1,mid_te.shape[1],Words.shape[1])) succ_te_input = Words[T.cast(succ.flatten(),dtype="int32")].reshape((succ_te.shape[0],1,succ_te.shape[1],Words.shape[1])) test_layer0_input = {'c1':c1_te_input, 'c2':c2_te_input, 'prec':prec_te_input, 'mid':mid_te_input, 'succ':succ_te_input} cl_id = 0 # conv layer id for i in xrange(len(filter_hs)): for seg in hlen.keys(): conv_layer = conv_layers[cl_id] test_layer0_output = conv_layer.predict(test_layer0_input[seg], test_size) ## doesn't seeem to matter if just use layer0_input here test_pred_layers.append(test_layer0_output.flatten(2)) cl_id += 1 test_layer1_input = T.concatenate(test_pred_layers, 1) #test_layer1_input = T.horizontal_stack(test_layer1_input, compa_te.reshape((compa_te.shape[0], 1))) test_layer1_input = T.horizontal_stack(test_layer1_input, compa1.reshape((compa1.shape[0], 1)), compa2.reshape((compa2.shape[0], 1)), semclass1.reshape((semclass1.shape[0], 1)), semclass2.reshape((semclass2.shape[0], 1)), semclass3.reshape((semclass3.shape[0], 1)), semclass4.reshape((semclass4.shape[0], 1)), semclass5.reshape((semclass5.shape[0], 1))) test_y_pred = classifier.predict(test_layer1_input) test_error = T.mean(T.neq(test_y_pred, y)) test_model_all = theano.function([c1,c2,prec,mid,succ,compa1,compa2,semclass1,semclass2,semclass3,semclass4,semclass5], test_y_pred, allow_input_downcast = True) #start training over mini-batches print '... training' epoch = 0 best_dev_perf = 0 test_perf = 0 cost_epoch = 0 while (epoch < n_epochs): start_time = time.time() epoch = epoch + 1 if shuffle_batch: for minibatch_index in rng.permutation(range(n_train_batches)): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) else: for minibatch_index in xrange(n_train_batches): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) train_losses = [np.mean(test_model(i)) for i in xrange(n_train_batches)] train_perf = 1 - np.mean(train_losses) dev_preds = np.asarray([]) for i in xrange(n_dev_batches): dev_sb_preds = dev_model(i) y_sb = y_de[i*batch_size:(i+1)*batch_size].eval() dev_sb_errors = dev_sb_preds != y_sb err_ind = [j for j,x in enumerate(dev_sb_errors) if x==1] dev_sb = iid_de[i*batch_size:(i+1)*batch_size] dev_preds = np.append(dev_preds, dev_sb_preds) dev_perf = 1- np.mean(y_de.eval() != dev_preds) dev_cm = su.confMat(y_de.eval(), dev_preds, hidden_units[1]) (dev_pres, dev_recs, dev_f1s, dev_mipre, dev_mirec, dev_mif) = su.cmPRF(dev_cm, ncstart=1) print('epoch: %i, training time: %.2f secs, train perf: %.2f %%, dev_mipre: %.2f %%, dev_mirec: %.2f %%, dev_mif: %.2f %%' % (epoch, time.time()-start_time, train_perf * 100., dev_mipre*100., dev_mirec*100., dev_mif*100.)) if dev_mif >= best_dev_perf: best_dev_perf = dev_mif test_pred = test_model_all(c1_te,c2_te,prec_te,mid_te,succ_te,compa1_te,compa2_te,semclass1_te,semclass2_te,semclass3_te,semclass4_te,semclass5_te) test_preds = extract_preds(rel_te, test_pred, relname) test_errors = test_pred != y_te err_ind = [j for j,x in enumerate(test_errors) if x==1] test_cm = su.confMat(y_te, test_pred, hidden_units[1]) print('\n'.join([''.join(['{:10}'.format(int(item)) for item in row]) for row in test_cm])) (pres, recs, f1s, mipre, mirec, mif) = su.cmPRF(test_cm, ncstart=1) mipre_de = dev_mipre mirec_de = dev_mirec mif_de = dev_mif print('mipre %s, mirec %s, mif %s' % (mipre, mirec, mif)) cPickle.dump([y_te,test_pred], open(fnres, "wb")) return (mipre, mirec, mif, mipre_de, mirec_de, mif_de, test_cm, test_preds) def extract_preds(rel, pred, relname): """ :param rel: rel_de, rel_te or rel_st :param pred: test_pred, dev_pred, st_pred :param relname: "trp", "tep" or "pp" :return: {fn1: [ln1, ln2, ...]} """ preds = defaultdict(list) if relname == "trp": inv_d_rel = inv_htrp_rel elif relname == "tep": inv_d_rel = inv_htep_rel elif relname == "pp": inv_d_rel = inv_hpp_rel else: raise NotImplementedError # write out non-None predictions assert len(rel) == len(pred) for c, i in enumerate(rel): r = inv_d_rel[pred[c]] if r == "None": continue # sent as an unannotated list un_sent = i["sen"].replace("[ ", "").replace(" ]treatment", "").replace(" ]problem", "").replace(" ]test", "").split() # get iid: '0449.rel:43 (6,7) (12,13)' inst_id = i["iid"] fname = inst_id.split(":")[0] ln, local_c1_id, local_c2_id = inst_id.split(":")[1].split(" ") local_c1_id = eval(local_c1_id) local_c2_id = eval(local_c2_id) c1_str = " ".join(un_sent[local_c1_id[0]:local_c1_id[1]]) c2_str = " ".join(un_sent[local_c2_id[0]:local_c2_id[1]]) # c="antibiotics" 80:15 80:15||r="TrAP"||c="left arm phlebitis" 80:8 80:10 ln_out = "c=\"{c1_str}\" {ln}:{local_c1_id_s} {ln}:{local_c1_id_e}||r=\"{r}\"||c=\"{c2_str}\" {ln}:{local_c2_id_s} {ln}:{local_c2_id_e}".format( c1_str=c1_str, ln=int(ln) + 1, local_c1_id_s=local_c1_id[0], local_c1_id_e=local_c1_id[1] - 1, # reduce last index by one r=r, c2_str=c2_str, local_c2_id_s=local_c2_id[0], local_c2_id_e=local_c2_id[1] - 1) preds[fname].append(ln_out) return preds def shared_dataset(data_xy, iid=None, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) if iid == None: return shared_x, T.cast(shared_y, 'int32') else: shared_iid = theano.shared(np.asarray(iid), borrow=borrow) return shared_x, T.cast(shared_y, 'int32'), shared_iid def sgd_updates_adadelta(params,cost,rho=0.95,epsilon=1e-6,norm_lim=9,word_vec_name='Words'): """ adadelta update rule, mostly from https://groups.google.com/forum/#!topic/pylearn-dev/3QbKtCumAW4 (for Adadelta) """ updates = OrderedDict({}) exp_sqr_grads = OrderedDict({}) exp_sqr_ups = OrderedDict({}) gparams = [] for param in params: empty = np.zeros_like(param.get_value()) exp_sqr_grads[param] = theano.shared(value=as_floatX(empty),name="exp_grad_%s" % param.name) gp = T.grad(cost, param) exp_sqr_ups[param] = theano.shared(value=as_floatX(empty), name="exp_grad_%s" % param.name) gparams.append(gp) for param, gp in zip(params, gparams): exp_sg = exp_sqr_grads[param] exp_su = exp_sqr_ups[param] up_exp_sg = rho * exp_sg + (1 - rho) * T.sqr(gp) updates[exp_sg] = up_exp_sg step = -(T.sqrt(exp_su + epsilon) / T.sqrt(up_exp_sg + epsilon)) * gp updates[exp_su] = rho * exp_su + (1 - rho) * T.sqr(step) stepped_param = param + step if (param.get_value(borrow=True).ndim == 2) and (param.name!='Words'): col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0)) desired_norms = T.clip(col_norms, 0, T.sqrt(norm_lim)) scale = desired_norms / (1e-7 + col_norms) updates[param] = stepped_param * scale else: updates[param] = stepped_param return updates def as_floatX(variable): if isinstance(variable, float): return np.cast[theano.config.floatX](variable) if isinstance(variable, np.ndarray): return np.cast[theano.config.floatX](variable) return theano.tensor.cast(variable, theano.config.floatX) def safe_update(dict_to, dict_from): """ re-make update dictionary for safe updating """ for key, val in dict(dict_from).iteritems(): if key in dict_to: raise KeyError(key) dict_to[key] = val return dict_to def get_idx_from_segment(words, word_idx_map, max_l=51, k=300, filter_h=5): """ Transforms sentence into a list of indices. Pad with zeroes. """ x = [] pad = filter_h - 1 for i in xrange(pad): x.append(0) for word in words: if word in word_idx_map: x.append(word_idx_map[word]) while len(x) < max_l+2*pad: x.append(0) return x def merge_segs(c1, c2, prec, mid, succ, y, iid, compa1, compa2, semclass1, semclass2, semclass3, semclass4, semclass5, over_sampling=False, down_sampling=None): rng = np.random.RandomState() hi_seg = {} cursor = 0 print('shapes c1: %s, c2: %s, prec: %s, mid: %s, succ: %s, compa1: %s, compa2: %s, semclass1: %s, semclass2: %s, semclass3: %s, semclass4: %s, semclass5: %s, iid: %s, y: %s' % (c1.shape, c2.shape, prec.shape, mid.shape, succ.shape, compa1.shape, compa2.shape, semclass1.shape, semclass2.shape, semclass3.shape, semclass4.shape, semclass5.shape, iid.shape, y.shape)) data = np.hstack((c1, c2, prec, mid, succ, compa1, compa2, semclass1, semclass2, semclass3, semclass4, semclass5, iid, y)) hi_seg['c1'] = [cursor,c1.shape[1]]; cursor += c1.shape[1] hi_seg['c2'] = [cursor, cursor+c2.shape[1]]; cursor += c2.shape[1] hi_seg['prec'] = [cursor, cursor+prec.shape[1]]; cursor += prec.shape[1] hi_seg['mid'] = [cursor, cursor+mid.shape[1]]; cursor += mid.shape[1] hi_seg['succ'] = [cursor, cursor+succ.shape[1]]; cursor += succ.shape[1] hi_seg['compa1'] = cursor; cursor += 1 hi_seg['compa2'] = cursor; cursor += 1 hi_seg['semclass1'] = cursor; cursor += 1 hi_seg['semclass2'] = cursor; cursor += 1 hi_seg['semclass3'] = cursor; cursor += 1 hi_seg['semclass4'] = cursor; cursor += 1 hi_seg['semclass5'] = cursor; cursor += 1 hi_seg['iid'] = cursor; cursor += 1 hi_seg['y'] = cursor y = y.flatten() if over_sampling: num_none = np.sum(y==0) data_os = data for c in np.unique(y): if c != 0: num_c = np.sum(y==c) num_sample = num_none - num_c print(data.shape) data_c = data[np.asarray(y==c),:] print('data_c lab %s' % (data_c[:,hi_seg['y']].flatten()[1:20])) while num_sample > num_c: data_os = np.vstack((data_os, data_c)) num_sample -= num_c data_os = np.vstack((data_os, data_c[:num_sample,:])) data = data_os print('over-sampled dist %s %s' % (np.unique(data[:,hi_seg['y']], return_counts=True))) if down_sampling != None: data_ds = None (labs, counts) = np.unique(y, return_counts=True) cnt_min = min(counts) for (lab, count) in zip(labs, counts): data_c = data[np.asarray(y==lab),:] if count > down_sampling*cnt_min: data_c = data_c[rng.permutation(count)[:down_sampling*cnt_min],:] if data_ds is None: data_ds = data_c else: data_ds = np.vstack((data_ds, data_c)) data = data_ds print('down-sampled dist %s %s' % (np.unique(data[:,hi_seg['y']], return_counts=True))) return data, hi_seg; def make_idx_data_train_test_dev(rel_tr, rel_te, rel_de, word_idx_map, hlen, hrel, k=300, filter_h=5, down_sampling=None, n_train=None): """ Transforms sentences into a 2-d matrix. """ c1_tr, c1_te, c1_de = [], [], [] c2_tr, c2_te, c2_de = [], [], [] prec_tr, prec_te, prec_de = [], [], [] mid_tr, mid_te, mid_de = [], [], [] succ_tr, succ_te, succ_de = [], [], [] y_tr, y_te, y_de = [], [], [] iid_tr, iid_te, iid_de = [], [], [] compa1_tr, compa1_te, compa1_de = [], [], [] compa2_tr, compa2_te, compa2_de = [], [], [] semclass1_tr, semclass1_te, semclass1_de = [], [], [] semclass2_tr, semclass2_te, semclass2_de = [], [], [] semclass3_tr, semclass3_te, semclass3_de = [], [], [] semclass4_tr, semclass4_te, semclass4_de = [], [], [] semclass5_tr, semclass5_te, semclass5_de = [], [], [] for rel in rel_tr: c1_tr.append( get_idx_from_segment(rel['c1'], word_idx_map, hlen['c1'], k, filter_h) ) c2_tr.append( get_idx_from_segment(rel['c2'], word_idx_map, hlen['c2'], k, filter_h) ) prec_tr.append( get_idx_from_segment(rel['prec'], word_idx_map, hlen['prec'], k, filter_h) ) mid_tr.append( get_idx_from_segment(rel['mid'], word_idx_map, hlen['mid'], k, filter_h) ) succ_tr.append( get_idx_from_segment(rel['succ'], word_idx_map, hlen['succ'], k, filter_h) ) y_tr.append(hrel[rel['rel']]) iid_tr.append(rel['iid']) compa1_tr.append(rel['compa1']) compa2_tr.append(rel['compa2']) semclass1_tr.append(rel['semclass1']) semclass2_tr.append(rel['semclass2']) semclass3_tr.append(rel['semclass3']) semclass4_tr.append(rel['semclass4']) semclass5_tr.append(rel['semclass5']) print(np.unique(y_tr, return_counts=True)) y_tr = np.asarray(y_tr); y_tr = y_tr.reshape(len(y_tr), 1) iid_tr = np.asarray(iid_tr); iid_tr = iid_tr.reshape(len(iid_tr), 1) compa1_tr = np.asarray(compa1_tr); compa1_tr = compa1_tr.reshape(len(compa1_tr), 1) compa2_tr = np.asarray(compa2_tr); compa2_tr = compa2_tr.reshape(len(compa2_tr), 1) semclass1_tr = np.asarray(semclass1_tr); semclass1_tr = semclass1_tr.reshape(len(semclass1_tr), 1) semclass2_tr = np.asarray(semclass2_tr); semclass2_tr = semclass2_tr.reshape(len(semclass2_tr), 1) semclass3_tr = np.asarray(semclass3_tr); semclass3_tr = semclass3_tr.reshape(len(semclass3_tr), 1) semclass4_tr = np.asarray(semclass4_tr); semclass4_tr = semclass4_tr.reshape(len(semclass4_tr), 1) semclass5_tr = np.asarray(semclass5_tr); semclass5_tr = semclass5_tr.reshape(len(semclass5_tr), 1) c1_tr_lens = map(len, c1_tr) print('c1 tr len max %d, min %d' % (max(c1_tr_lens), min(c1_tr_lens))) for rel in rel_te: c1_te.append( get_idx_from_segment(rel['c1'], word_idx_map, hlen['c1'], k, filter_h) ) c2_te.append( get_idx_from_segment(rel['c2'], word_idx_map, hlen['c2'], k, filter_h) ) prec_te.append( get_idx_from_segment(rel['prec'], word_idx_map, hlen['prec'], k, filter_h) ) mid_te.append( get_idx_from_segment(rel['mid'], word_idx_map, hlen['mid'], k, filter_h) ) succ_te.append( get_idx_from_segment(rel['succ'], word_idx_map, hlen['succ'], k, filter_h) ) y_te.append(hrel[rel['rel']]) iid_te.append(rel['iid']) compa1_te.append(rel['compa1']) compa2_te.append(rel['compa2']) semclass1_te.append(rel['semclass1']) semclass2_te.append(rel['semclass2']) semclass3_te.append(rel['semclass3']) semclass4_te.append(rel['semclass4']) semclass5_te.append(rel['semclass5']) print(np.unique(y_te, return_counts=True)) y_te = np.asarray(y_te); y_te = y_te.reshape(len(y_te), 1) iid_te = np.asarray(iid_te); iid_te = iid_te.reshape(len(iid_te), 1) compa1_te = np.asarray(compa1_te); compa1_te = compa1_te.reshape(len(compa1_te), 1) compa2_te = np.asarray(compa2_te); compa2_te = compa2_te.reshape(len(compa2_te), 1) semclass1_te = np.asarray(semclass1_te); semclass1_te = semclass1_te.reshape(len(semclass1_te), 1) semclass2_te = np.asarray(semclass2_te); semclass2_te = semclass2_te.reshape(len(semclass2_te), 1) semclass3_te = np.asarray(semclass3_te); semclass3_te = semclass3_te.reshape(len(semclass3_te), 1) semclass4_te = np.asarray(semclass4_te); semclass4_te = semclass4_te.reshape(len(semclass4_te), 1) semclass5_te = np.asarray(semclass5_te); semclass5_te = semclass5_te.reshape(len(semclass5_te), 1) for rel in rel_de: c1_de.append(get_idx_from_segment(rel['c1'], word_idx_map, hlen['c1'], k, filter_h)) c2_de.append(get_idx_from_segment(rel['c2'], word_idx_map, hlen['c2'], k, filter_h)) prec_de.append(get_idx_from_segment(rel['prec'], word_idx_map, hlen['prec'], k, filter_h)) mid_de.append(get_idx_from_segment(rel['mid'], word_idx_map, hlen['mid'], k, filter_h)) succ_de.append(get_idx_from_segment(rel['succ'], word_idx_map, hlen['succ'], k, filter_h)) y_de.append(hrel[rel['rel']]) iid_de.append(rel['iid']) compa1_de.append(rel['compa1']) compa2_de.append(rel['compa2']) semclass1_de.append(rel['semclass1']) semclass2_de.append(rel['semclass2']) semclass3_de.append(rel['semclass3']) semclass4_de.append(rel['semclass4']) semclass5_de.append(rel['semclass5']) print(np.unique(y_de, return_counts=True)) y_de = np.asarray(y_de); y_de = y_de.reshape(len(y_de), 1) iid_de = np.asarray(iid_de); iid_de = iid_de.reshape(len(iid_de), 1) compa1_de = np.asarray(compa1_de); compa1_de = compa1_de.reshape(len(compa1_de), 1) compa2_de = np.asarray(compa2_de); compa2_de = compa2_de.reshape(len(compa2_de), 1) semclass1_de = np.asarray(semclass1_de); semclass1_de = semclass1_de.reshape(len(semclass1_de), 1) semclass2_de = np.asarray(semclass2_de); semclass2_de = semclass2_de.reshape(len(semclass2_de), 1) semclass3_de = np.asarray(semclass3_de); semclass3_de = semclass3_de.reshape(len(semclass3_de), 1) semclass4_de = np.asarray(semclass4_de); semclass4_de = semclass4_de.reshape(len(semclass4_de), 1) semclass5_de = np.asarray(semclass5_de); semclass5_de = semclass5_de.reshape(len(semclass5_de), 1) c1_tr = np.array(c1_tr,dtype="int"); c1_te = np.array(c1_te,dtype="int"); c1_de = np.array(c1_de,dtype="int") c2_tr = np.array(c2_tr,dtype="int"); c2_te = np.array(c2_te,dtype="int"); c2_de = np.array(c2_de,dtype="int") prec_tr = np.array(prec_tr,dtype="int"); prec_te = np.array(prec_te,dtype="int"); prec_de = np.array(prec_de,dtype="int") mid_tr = np.array(mid_tr,dtype="int"); mid_te = np.array(mid_te,dtype="int"); mid_de = np.array(mid_de,dtype="int") succ_tr = np.array(succ_tr,dtype="int"); succ_te = np.array(succ_te,dtype="int"); succ_de = np.array(succ_de,dtype="int") train, hi_seg_tr = merge_segs(c1_tr, c2_tr, prec_tr, mid_tr, succ_tr, y_tr, iid_tr, compa1_tr, compa2_tr, semclass1_tr, semclass2_tr, semclass3_tr, semclass4_tr, semclass5_tr, down_sampling=down_sampling) test, hi_seg_te = merge_segs(c1_te, c2_te, prec_te, mid_te, succ_te, y_te, iid_te, compa1_te, compa2_te, semclass1_te, semclass2_te, semclass3_te, semclass4_te, semclass5_te) dev, hi_seg_de = merge_segs(c1_de, c2_de, prec_de, mid_de, succ_de, y_de, iid_de, compa1_de, compa2_de, semclass1_de, semclass2_de, semclass3_de, semclass4_de, semclass5_de) return [train[:n_train] if n_train is not None else train, test, dev, hi_seg_tr, hi_seg_te, hi_seg_de] def print_cm(cm, h_rel, sep="\t"): ord_ys= [i[0] for i in sorted(h_rel.items(), key=lambda x: x[1])] print(sep + sep.join(ord_ys)) for c, row in enumerate(cm): print("{}{}".format(ord_ys[c], sep) + sep.join(row.astype(np.str))) def write_cm_R(cm, h_rel, dn): """ Write out a CM for visualization with R. """ ord_ys = [i[0] for i in sorted(h_rel.items(), key=lambda x: x[1])] with open(dn+"cm", "w") as dh: dh.write("System\tGold\tvalue\n") for c_g, g in enumerate(cm): for c_s, val in enumerate(g): dh.write("{}\t{}\t{}\n".format(ord_ys[c_s], ord_ys[c_g], val)) def convert_write_cm_R(result_f, h_rel, dn): with open(result_f) as fh: ls = fh.readlines() cm_s = ls[ls.index("Avg test confusion matrix:\n")+2: -6] assert cm_s[0].startswith("None") cm = [row.rstrip().split("\t")[1:] for row in cm_s] write_cm_R(cm, h_rel, dn) def write_preds(dirname, preds, add_missing_from=None): if not os.path.exists(dirname): os.makedirs(dirname) for fn, pred in preds.items(): with open(dirname+fn, "w") as f_out: for p in pred: f_out.write(p+"\n") if add_missing_from is not None: fns = {os.path.basename(f) for f in get_file_list(add_missing_from)} - set(preds.keys()) for fn in fns: with open(dirname+fn, "w") as f_out: f_out.write("") if __name__=="__main__": img_w = sys.argv[3] mo = re.search('-img_w(\d+)', img_w) if mo: img_w = int(mo.group(1)) else: print('example: -img_w300') sys.exit(1) l1_nhu = sys.argv[4] mo = re.search('-l1_nhu(\d+)', l1_nhu) if mo: l1_nhu = int(mo.group(1)) # number of hidden units first layer else: print('example: -l1_nhu100') sys.exit(1) pad = sys.argv[5] mo = re.search('-pad(\d+)', pad) if mo: pad = int(mo.group(1)) else: print('example: -pad5') sys.exit(1) task = sys.argv[6] n_runs = sys.argv[7] mo = re.search('-n_runs(\d+)', n_runs) if mo: n_runs = int(mo.group(1)) else: print('example: -n_runs1') sys.exit(1) n_train = sys.argv[8] mo = re.search('-n_train(\d+)', n_train) if mo: n_train = int(mo.group(1)) else: print('example: -n_train1000') sys.exit(1) fndata = '../data/semrel_pp%s_pad%s.p' % (img_w, pad) fdata = open(fndata,"rb") x = cPickle.load(fdata) fdata.close() trp_rel_tr, tep_rel_tr, pp_rel_tr, trp_rel_te, tep_rel_te, pp_rel_te, trp_rel_de, tep_rel_de, pp_rel_de, vocab, hlen, mem, hwoov, hwid = x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13] for cts in hlen.keys(): hlen[cts]['c1'] += 2*pad hlen[cts]['c2'] += 2*pad print('msg: %s loaded!' % (fndata)) mode= sys.argv[1] word_vectors = sys.argv[2] if mode=="-nonstatic": print "model architecture: CNN-non-static" non_static=True elif mode=="-static": print "model architecture: CNN-static" non_static=False execfile("cnn_classes.py") if word_vectors=="-word2vec": print "using: word2vec vectors" U = mem else: print "unrecognized word_vectors option: %s" % (word_vectors) results = [] if task=='-trp': trp_data = make_idx_data_train_test_dev(trp_rel_tr, trp_rel_te, trp_rel_de, hwid, hlen['problem_treatment'], htrp_rel, k=img_w, filter_h=5, down_sampling=None, n_train=n_train) mipre_runs = [] mirec_runs = [] mif_runs = [] mipre_de_runs = [] mirec_de_runs = [] mif_de_runs = [] cm_te_runs = [] for n_run in range(n_runs): (mipre, mirec, mif, mipre_de, mirec_de, mif_de, test_cm, test_preds) = train_conv_net(trp_data, trp_rel_tr, trp_rel_te, trp_rel_de, hlen['problem_treatment'], U, fnres='../result/trp_img%s_nhu%s_pad%s.p' % (img_w, l1_nhu, pad), img_w=img_w, lr_decay=0.95, filter_hs=[3,4,5], conv_non_linear="relu", hidden_units=[l1_nhu,6], activations=[ReLU], shuffle_batch=True, n_epochs=30, sqr_norm_lim=9, non_static=non_static, batch_size=50, dropout_rate=[0.0], relname="trp") write_preds("/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/i2b2-2010/evaluation/system/test/trp/", test_preds) print("msg: trp img_w: %s, l1_nhu: %s, pad: %s, mipre: %s, mirec: %s, mif: %s, mipre_de: %s, mirec_de: %s, mif_de: %s" % (img_w, l1_nhu, pad, mipre, mirec, mif, mipre_de, mirec_de, mif_de)) mipre_runs.append(mipre) mirec_runs.append(mirec) mif_runs.append(mif) mipre_de_runs.append(mipre_de) mirec_de_runs.append(mirec_de) mif_de_runs.append(mif_de) cm_te_runs.append(test_cm) print("Avg test confusion matrix:") print_cm(np.mean(cm_te_runs, axis = 0), htrp_rel) if task=='-tep': tep_data = make_idx_data_train_test_dev(tep_rel_tr, tep_rel_te, tep_rel_de, hwid, hlen['problem_test'], htep_rel, k=img_w, filter_h=5) mipre_runs = [] mirec_runs = [] mif_runs = [] mipre_de_runs = [] mirec_de_runs = [] mif_de_runs = [] cm_te_runs = [] for n_run in range(n_runs): (mipre, mirec, mif, mipre_de, mirec_de, mif_de, test_cm, test_preds) = train_conv_net(tep_data,tep_rel_tr, tep_rel_te, tep_rel_de, hlen['problem_test'], U, fnres='../result/tep_img%s_nhu%s_pad%s.p' % (img_w, l1_nhu, pad), img_w=img_w, lr_decay=0.95, filter_hs=[3,4,5], conv_non_linear="relu", hidden_units=[l1_nhu,3], shuffle_batch=True, n_epochs=30, sqr_norm_lim=9, non_static=non_static, batch_size=50, dropout_rate=[0.0], relname="tep") write_preds( "/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/i2b2-2010/evaluation/system/test/tep/", test_preds) print("msg: tep img_w: %s, l1_nhu: %s, pad: %s, mipre: %s, mirec: %s, mif: %s, mipre_de: %s, mirec_de: %s, mif_de: %s" % (img_w, l1_nhu, pad, mipre, mirec, mif, mipre_de, mirec_de, mif_de)) mipre_runs.append(mipre) mirec_runs.append(mirec) mif_runs.append(mif) mipre_de_runs.append(mipre_de) mirec_de_runs.append(mirec_de) mif_de_runs.append(mif_de) cm_te_runs.append(test_cm) print("Avg test confusion matrix:") print_cm(np.mean(cm_te_runs, axis = 0), htep_rel) if task=='-pp': pp_data = make_idx_data_train_test_dev(pp_rel_tr, pp_rel_te, pp_rel_de, hwid, hlen['problem_problem'], hpp_rel, k=img_w, filter_h=5, down_sampling=4) mipre_runs = [] mirec_runs = [] mif_runs = [] mipre_de_runs = [] mirec_de_runs = [] mif_de_runs = [] cm_te_runs = [] for n_run in range(n_runs): (mipre, mirec, mif, mipre_de, mirec_de, mif_de, test_cm, test_preds) = train_conv_net(pp_data, pp_rel_tr, pp_rel_te, pp_rel_de, hlen['problem_problem'], U, fnres='../result/pp_img%s_nhu%s_pad%s.p' % (img_w, l1_nhu, pad), img_w=img_w, lr_decay=0.95, filter_hs=[3,4,5], conv_non_linear="relu", hidden_units=[l1_nhu,2], shuffle_batch=True, n_epochs=30, sqr_norm_lim=9, non_static=non_static, batch_size=50, dropout_rate=[0.0], relname="pp") write_preds( "/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/i2b2-2010/evaluation/system/test/pp/", test_preds) print("msg: pp img_w: %s, l1_nhu: %s, pad: %s, mipre: %s, mirec: %s, mif: %s, mipre_de: %s, mirec_de: %s, mif_de: %s" % (img_w, l1_nhu, pad, mipre, mirec, mif, mipre_de, mirec_de, mif_de)) mipre_runs.append(mipre) mirec_runs.append(mirec) mif_runs.append(mif) mipre_de_runs.append(mipre_de) mirec_de_runs.append(mirec_de) mif_de_runs.append(mif_de) cm_te_runs.append(test_cm) print("Avg test confusion matrix:") print_cm(np.mean(cm_te_runs, axis=0), hpp_rel) print("Avg mipre: {}; CI95: {}".format(np.mean(mipre_runs), su.confint(mipre_runs))) print("Avg mirec: {}; CI95: {}".format(np.mean(mirec_runs), su.confint(mirec_runs))) print("Avg mif: {}; CI95: {}".format(np.mean(mif_runs), su.confint(mif_runs))) print("Avg mipre_de: {}; CI95: {}".format(np.mean(mipre_de_runs), su.confint(mipre_de_runs))) print("Avg mirec_de: {}; CI95: {}".format(np.mean(mirec_de_runs), su.confint(mirec_de_runs))) print("Avg mif_de: {}; CI95: {}".format(np.mean(mif_de_runs), su.confint(mif_de_runs)))
[]
[]
[ "THEANO_FLAGS" ]
[]
["THEANO_FLAGS"]
python
1
0
auth/auth.go
package auth import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httputil" "net/url" "os" "strings" "github.com/menta2l/secret-helper/vault" ) func shouldDebug() bool { d := strings.ToLower(os.Getenv("DEBUG")) return d != "" && d != "false" && d != "0" && d != "no" && d != "off" } func authurl(base, f string, args ...interface{}) string { return base + fmt.Sprintf(f, args...) } func authenticate(req *http.Request) (string, error) { proxyRouter, err := vault.NewProxyRouter() if err != nil { return "", fmt.Errorf("Error setting up proxy: %s", err) } if os.Getenv("VAULT_NAMESPACE") != "" { req.Header.Set("X-Vault-Namespace", strings.Trim(os.Getenv("VAULT_NAMESPACE"), "/")+"/") } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: os.Getenv("VAULT_SKIP_VERIFY") != "", }, Proxy: proxyRouter.Proxy, MaxIdleConnsPerHost: 100, }, } var ( body []byte res *http.Response ) if req.Body != nil { body, err = ioutil.ReadAll(req.Body) if err != nil { return "", err } } for i := 0; i < 10; i++ { if req.Body != nil { req.Body = ioutil.NopCloser(bytes.NewReader(body)) } res, err = client.Do(req) if err != nil { return "", err } if shouldDebug() { r, _ := httputil.DumpResponse(res, true) fmt.Fprintf(os.Stderr, "Response:\n%s\n----------------\n", r) } // Vault returns a 307 to redirect during HA / Auth if res.StatusCode == 307 { // Note: this does not handle relative Location headers u, err := url.Parse(res.Header.Get("Location")) if err != nil { return "", err } req.URL = u // ... and try again. continue } break } if res.StatusCode != 200 { b, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } var e struct { Errors []string `json:"errors"` } if err = json.Unmarshal(b, &e); err == nil && len(e.Errors) > 0 { /* did our Github auth token fail? */ if strings.Contains(e.Errors[0], "401 Bad credentials") { return "", fmt.Errorf("authentication failed.") } return "", fmt.Errorf("Vault API errored: %s", e.Errors[0]) } return "", fmt.Errorf("API %s", res.Status) } b, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } var raw map[string]interface{} if err = json.Unmarshal(b, &raw); err != nil { return "", err } if authdata, ok := raw["auth"]; ok { if data, ok := authdata.(map[string]interface{}); ok { if tok, ok := data["client_token"]; ok { if s, ok := tok.(string); ok { return s, nil } } } } return "", nil }
[ "\"DEBUG\"", "\"VAULT_NAMESPACE\"", "\"VAULT_NAMESPACE\"", "\"VAULT_SKIP_VERIFY\"" ]
[]
[ "VAULT_NAMESPACE", "VAULT_SKIP_VERIFY", "DEBUG" ]
[]
["VAULT_NAMESPACE", "VAULT_SKIP_VERIFY", "DEBUG"]
go
3
0
test/test_assignors.py
# pylint: skip-file from __future__ import absolute_import import pytest from kafka.coordinator.assignors.range import RangePartitionAssignor from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment @pytest.fixture def cluster(mocker): cluster = mocker.MagicMock() cluster.partitions_for_topic.return_value = set([0, 1, 2]) return cluster def test_assignor_roundrobin(cluster): assignor = RoundRobinPartitionAssignor member_metadata = { 'C0': assignor.metadata(set(['t0', 't1'])), 'C1': assignor.metadata(set(['t0', 't1'])), } ret = assignor.assign(cluster, member_metadata) expected = { 'C0': ConsumerProtocolMemberAssignment( assignor.version, [('t0', [0, 2]), ('t1', [1])], b''), 'C1': ConsumerProtocolMemberAssignment( assignor.version, [('t0', [1]), ('t1', [0, 2])], b'') } assert ret == expected assert set(ret) == set(expected) for member in ret: assert ret[member].encode() == expected[member].encode() def test_assignor_range(cluster): assignor = RangePartitionAssignor member_metadata = { 'C0': assignor.metadata(set(['t0', 't1'])), 'C1': assignor.metadata(set(['t0', 't1'])), } ret = assignor.assign(cluster, member_metadata) expected = { 'C0': ConsumerProtocolMemberAssignment( assignor.version, [('t0', [0, 1]), ('t1', [0, 1])], b''), 'C1': ConsumerProtocolMemberAssignment( assignor.version, [('t0', [2]), ('t1', [2])], b'') } assert ret == expected assert set(ret) == set(expected) for member in ret: assert ret[member].encode() == expected[member].encode()
[]
[]
[]
[]
[]
python
null
null
null
run_pretrained_openfold.py
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from datetime import date import logging import numpy as np import os # A hack to get OpenMM and PyTorch to peacefully coexist os.environ["OPENMM_DEFAULT_PLATFORM"] = "OpenCL" import pickle import random import sys import time import torch from openfold.config import model_config from openfold.data import templates, feature_pipeline, data_pipeline from openfold.model.model import AlphaFold from openfold.model.primitives import Attention, GlobalAttention from openfold.np import residue_constants, protein import openfold.np.relax.relax as relax from openfold.utils.import_weights import ( import_jax_weights_, ) from openfold.utils.torchscript_utils import script_submodules_ from openfold.utils.tensor_utils import ( tensor_tree_map, ) from scripts.utils import add_data_args def script_primitives_(model): script_submodules_(model, [Attention, GlobalAttention]) def main(args): config = model_config(args.model_name) model = AlphaFold(config) model = model.eval() import_jax_weights_(model, args.param_path) script_primitives_(model) model = model.to(args.model_device) template_featurizer = templates.TemplateHitFeaturizer( mmcif_dir=args.template_mmcif_dir, max_template_date=args.max_template_date, max_hits=config.data.predict.max_templates, kalign_binary_path=args.kalign_binary_path, release_dates_path=None, obsolete_pdbs_path=args.obsolete_pdbs_path ) use_small_bfd=(args.bfd_database_path is None) data_processor = data_pipeline.DataPipeline( template_featurizer=template_featurizer, ) output_dir_base = args.output_dir random_seed = args.data_random_seed if random_seed is None: random_seed = random.randrange(sys.maxsize) feature_processor = feature_pipeline.FeaturePipeline(config.data) if not os.path.exists(output_dir_base): os.makedirs(output_dir_base) if(args.use_precomputed_alignments is None): alignment_dir = os.path.join(output_dir_base, "alignments") else: alignment_dir = args.use_precomputed_alignments # Gather input sequences with open(args.fasta_path, "r") as fp: lines = [l.strip() for l in fp.readlines()] tags, seqs = lines[::2], lines[1::2] tags = [l[1:] for l in tags] for tag, seq in zip(tags, seqs): fasta_path = os.path.join(args.output_dir, "tmp.fasta") with open(fasta_path, "w") as fp: fp.write(f">{tag}\n{seq}") logging.info("Generating features...") local_alignment_dir = os.path.join(alignment_dir, tag) if(args.use_precomputed_alignments is None): if not os.path.exists(local_alignment_dir): os.makedirs(local_alignment_dir) alignment_runner = data_pipeline.AlignmentRunner( jackhmmer_binary_path=args.jackhmmer_binary_path, hhblits_binary_path=args.hhblits_binary_path, hhsearch_binary_path=args.hhsearch_binary_path, uniref90_database_path=args.uniref90_database_path, mgnify_database_path=args.mgnify_database_path, bfd_database_path=args.bfd_database_path, uniclust30_database_path=args.uniclust30_database_path, small_bfd_database_path=args.small_bfd_database_path, pdb70_database_path=args.pdb70_database_path, use_small_bfd=use_small_bfd, no_cpus=args.cpus, ) alignment_runner.run( fasta_path, local_alignment_dir ) feature_dict = data_processor.process_fasta( fasta_path=fasta_path, alignment_dir=local_alignment_dir ) # Remove temporary FASTA file os.remove(fasta_path) processed_feature_dict = feature_processor.process_features( feature_dict, mode='predict', ) logging.info("Executing model...") batch = processed_feature_dict with torch.no_grad(): batch = { k:torch.as_tensor(v, device=args.model_device) for k,v in batch.items() } t = time.time() out = model(batch) logging.info(f"Inference time: {time.time() - t}") # Toss out the recycling dimensions --- we don't need them anymore batch = tensor_tree_map(lambda x: np.array(x[..., -1].cpu()), batch) out = tensor_tree_map(lambda x: np.array(x.cpu()), out) plddt = out["plddt"] mean_plddt = np.mean(plddt) plddt_b_factors = np.repeat( plddt[..., None], residue_constants.atom_type_num, axis=-1 ) unrelaxed_protein = protein.from_prediction( features=batch, result=out, b_factors=plddt_b_factors ) amber_relaxer = relax.AmberRelaxation( **config.relax ) # Relax the prediction. t = time.time() relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein) logging.info(f"Relaxation time: {time.time() - t}") # Save the relaxed PDB. relaxed_output_path = os.path.join( args.output_dir, f'{tag}_{args.model_name}.pdb' ) with open(relaxed_output_path, 'w') as f: f.write(relaxed_pdb_str) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "fasta_path", type=str, ) add_data_args(parser) parser.add_argument( "--use_precomputed_alignments", type=str, default=None, help="""Path to alignment directory. If provided, alignment computation is skipped and database path arguments are ignored.""" ) parser.add_argument( "--output_dir", type=str, default=os.getcwd(), help="""Name of the directory in which to output the prediction""", required=True ) parser.add_argument( "--model_device", type=str, default="cpu", help="""Name of the device on which to run the model. Any valid torch device name is accepted (e.g. "cpu", "cuda:0")""" ) parser.add_argument( "--model_name", type=str, default="model_1", help="""Name of a model config. Choose one of model_{1-5} or model_{1-5}_ptm, as defined on the AlphaFold GitHub.""" ) parser.add_argument( "--param_path", type=str, default=None, help="""Path to model parameters. If None, parameters are selected automatically according to the model name from openfold/resources/params""" ) parser.add_argument( "--cpus", type=int, default=4, help="""Number of CPUs with which to run alignment tools""" ) parser.add_argument( '--preset', type=str, default='full_dbs', choices=('reduced_dbs', 'full_dbs') ) parser.add_argument( '--data_random_seed', type=str, default=None ) args = parser.parse_args() if(args.param_path is None): args.param_path = os.path.join( "openfold", "resources", "params", "params_" + args.model_name + ".npz" ) if(args.model_device == "cpu" and torch.cuda.is_available()): logging.warning( """The model is being run on CPU. Consider specifying --model_device for better performance""" ) if(args.bfd_database_path is None and args.small_bfd_database_path is None): raise ValueError( "At least one of --bfd_database_path or --small_bfd_database_path" "must be specified" ) main(args)
[]
[]
[ "OPENMM_DEFAULT_PLATFORM" ]
[]
["OPENMM_DEFAULT_PLATFORM"]
python
1
0
library/ESP8266_MQTT_Mesh/utils/dump_stacktrace.py
#!/usr/bin/python import subprocess import sys import re import os firmware = sys.argv[1] stacktrace = sys.argv[2] objdump = os.environ['HOME'] + "/.platformio/packages/toolchain-xtensa/bin/xtensa-lx106-elf-objdump" dump = subprocess.check_output([objdump, '-S',firmware]).split('\n') funcs = [] start = None end = None for line in dump: #40208544 <_ZN15ESP8266MQTTMesh8setup_APEv>: match = re.match(r'([0-9a-f]{8})\s+<(.*)>:', line) if match: funcs.append([int(match.group(1), 16), match.group(2)]) match = re.match(r'([0-9a-f]{8}):',line) if match: add = int(match.group(1), 16) if not end or add > end: end = add if not start or add < start: start = add with open(stacktrace, "r") as fh: in_stack = False for line in fh: if re.search(r'>>>stack>>>', line): in_stack = True if in_stack: addrs = re.split(r'[: ]+', line) for addr in addrs: try: add = int(addr, 16) except: continue if add < start or add > end: #print("Ignoring: %s (%08x, %08x)" % (addr, start, end)) continue for i in range(0, len(funcs)): if funcs[i][0] <= add: continue print("%s : %s" % (addr, funcs[i-1][1])) break
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
tests/ignite/engine/test_deterministic.py
import os import random import sys from unittest.mock import patch import numpy as np import pytest import torch import torch.nn as nn from torch.optim import SGD from torch.utils.data import BatchSampler, DataLoader, RandomSampler import ignite.distributed as idist from ignite.engine import Events from ignite.engine.deterministic import ( DeterministicEngine, ReproducibleBatchSampler, keep_random_state, update_dataloader, ) from ignite.utils import manual_seed from tests.ignite.engine import BatchChecker, setup_sampler def test_dengine_setup_seed_div_by_zero(): with pytest.raises(ValueError, match=r"iter_counter should be positive value"): DeterministicEngine(lambda e, b: None)._setup_seed(iter_counter=0) def test_update_dataloader(): def _test(sampler_type=None): num_epochs = 3 total_batch_size = 4 num_iters = 17 data = torch.randint(0, 1000, size=(num_iters * total_batch_size,)) num_workers = 2 sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size) dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory=False, sampler=sampler, drop_last=True, shuffle=sampler is None, ) torch.manual_seed(12) seen_batches = [] for i in range(num_epochs): t = [] if sampler_type == "distributed": sampler.set_epoch(i) for b in dataloader: t.append(b) seen_batches.append(t) sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size) dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory=False, sampler=sampler, drop_last=True, shuffle=sampler is None, ) batch_sampler = dataloader.batch_sampler new_dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(batch_sampler)) torch.manual_seed(12) new_batches = [] for i in range(num_epochs): t = [] if sampler_type == "distributed": sampler.set_epoch(i) for b in new_dataloader: t.append(b) new_batches.append(t) for i in range(num_epochs): assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], new_batches[i])]) _test() _test("weighted") _test("distributed") def test_reproducible_batch_sampler_wrong_input(): with pytest.raises(TypeError, match=r"Argument batch_sampler should be torch.utils.data.sampler.BatchSampler"): ReproducibleBatchSampler("abc") def test_reproducible_batch_sampler(): data = list(range(100)) dataloader = DataLoader(data, batch_size=12, num_workers=0, shuffle=True, drop_last=True) torch.manual_seed(12 + 0) dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler)) seen_batches = [] num_epochs = 3 for i in range(num_epochs): t = [] for b in dataloader_: t.append(b) seen_batches.append(t) torch.manual_seed(12 + i + 1) for i in range(num_epochs - 1): for j in range(i + 1, num_epochs): assert not all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], seen_batches[j])]) for resume_epoch in range(num_epochs): torch.manual_seed(12 + resume_epoch) dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler)) resumed_seen_batches = [] for b in dataloader_: resumed_seen_batches.append(b) assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[resume_epoch], resumed_seen_batches)]) def _test_keep_random_state(with_numpy): manual_seed(54) true_values = [] for _ in range(5): t = [ torch.tensor([random.random()]), torch.rand(2), ] if with_numpy: t.append(torch.from_numpy(np.random.rand(2))) true_values.append(t) @keep_random_state def user_handler(): manual_seed(22) _ = [ random.random(), torch.rand(2), ] if with_numpy: _ = np.random.rand(2) manual_seed(54) res_values = [] for _ in range(5): r = [ torch.tensor([random.random()]), torch.rand(2), ] if with_numpy: r.append(torch.from_numpy(np.random.rand(2))) res_values.append(r) user_handler() for a, b in zip(true_values, res_values): for i, j in zip(a, b): assert (i == j).all() def test_keep_random_state(): _test_keep_random_state(with_numpy=True) def test_keep_random_state_without_numpy(): with patch.dict("sys.modules", {"numpy": None}): _test_keep_random_state(with_numpy=False) def test_strict_resume_from_iter(): def _test(epoch_length=None): max_epochs = 5 num_iters = 21 torch.manual_seed(0) data = torch.randint(0, 1000, size=(num_iters,)) if epoch_length is None: epoch_length = num_iters for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 4): batch_checker = BatchChecker(data, init_counter=resume_iteration) def update_fn(_, batch): assert batch_checker.check( batch ), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) @engine.on(Events.EPOCH_COMPLETED) def check_iteration(_): assert engine.state.iteration == batch_checker.counter resume_state_dict = dict( iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) engine.run(data) assert engine.state.epoch == max_epochs assert engine.state.iteration == epoch_length * max_epochs _test() _test(60) _test(15) def test_strict_resume_from_epoch(): def _test(epoch_length=None): max_epochs = 10 num_iters = 21 torch.manual_seed(0) data = torch.randint(0, 1000, size=(num_iters,)) if epoch_length is None: epoch_length = num_iters for resume_epoch in range(1, max_epochs): batch_checker = BatchChecker(data, init_counter=resume_epoch * epoch_length) def update_fn(_, batch): assert batch_checker.check( batch ), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) resume_state_dict = dict( epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) engine.run(data) assert engine.state.epoch == max_epochs assert engine.state.iteration == epoch_length * max_epochs _test() _test(60) _test(15) def _test_resume_random_dataloader_from_epoch(device, _setup_sampler, sampler_type=None): def _test(epoch_length=None): max_epochs = 5 total_batch_size = 4 num_iters = 21 torch.manual_seed(0) data = torch.randint(0, 1000, size=(num_iters * total_batch_size,)) if epoch_length is None: epoch_length = num_iters for resume_epoch in range(1, max_epochs, 2): for num_workers in [0, 2]: sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size) orig_dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory="cuda" in torch.device(device).type, sampler=sampler, drop_last=True, shuffle=sampler is None, ) seen_batchs = [] def update_fn(_, batch): batch_to_device = batch.to(device) seen_batchs.append(batch) engine = DeterministicEngine(update_fn) if sampler_type == "distributed": @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch - 1) torch.manual_seed(87) engine.run( orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length, ) batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size) resume_dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory="cuda" in torch.device(device).type, sampler=sampler, drop_last=True, shuffle=sampler is None, ) def update_fn(_, batch): batch_to_device = batch.to(device) assert batch_checker.check( batch ), f"{num_workers} {resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) if sampler_type == "distributed": @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch - 1) resume_state_dict = dict( epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) torch.manual_seed(87) engine.run(resume_dataloader) assert engine.state.epoch == max_epochs assert engine.state.iteration == epoch_length * max_epochs _test() if sampler_type != "distributed": _test(60) _test(15) @pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX") def test_resume_random_dataloader_from_epoch(): _test_resume_random_dataloader_from_epoch("cpu", setup_sampler) _test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="weighted") _test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="distributed") class AugmentedData: def __init__(self, data, enabled=True): self.data = data self.enabled = enabled def __getitem__(self, i): dp = self.data[i] r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0 return dp + r * 0.01 def __len__(self): return len(self.data) def _test_resume_random_dataloader_from_iter(device, _setup_sampler, sampler_type=None): def _test(epoch_length=None): max_epochs = 3 total_batch_size = 4 num_iters = 17 torch.manual_seed(0) data = torch.randint(0, 1000, size=(num_iters * total_batch_size,)) if epoch_length is None: epoch_length = num_iters for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 13): for num_workers in [0, 2]: sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size) orig_dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory="cuda" in torch.device(device).type, sampler=sampler, drop_last=True, shuffle=sampler is None, ) seen_batchs = [] def update_fn(_, batch): batch_to_device = batch.to(device) seen_batchs.append(batch) engine = DeterministicEngine(update_fn) if sampler_type == "distributed": @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch) torch.manual_seed(12) engine.run( orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length, ) batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size) resume_dataloader = DataLoader( data, batch_size=batch_size, num_workers=num_workers, pin_memory="cuda" in torch.device(device).type, sampler=sampler, drop_last=True, shuffle=sampler is None, ) def update_fn(_, batch): batch_to_device = batch.to(device) cfg_msg = f"{num_workers} {resume_iteration}" assert batch_checker.check( batch ), f"{cfg_msg} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) if sampler_type == "distributed": @engine.on(Events.EPOCH_STARTED) def _(engine): sampler.set_epoch(engine.state.epoch) resume_state_dict = dict( iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) torch.manual_seed(12) engine.run(resume_dataloader) assert engine.state.epoch == max_epochs assert ( engine.state.iteration == epoch_length * max_epochs ), f"{num_workers}, {resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}" _test() if sampler_type != "distributed": _test(40) _test(11) @pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX") def test_resume_random_dataloader_from_iter(): _test_resume_random_dataloader_from_iter("cpu", setup_sampler) _test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="weighted") _test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="distributed") def _test_resume_random_data_iterator_from_epoch(device): def _test(epoch_length=None): max_epochs = 5 batch_size = 4 num_iters = 21 def infinite_data_iterator(): while True: for _ in range(num_iters): data = torch.randint(0, 1000, size=(batch_size,), device=device) yield data if epoch_length is None: epoch_length = num_iters for resume_epoch in range(1, max_epochs): seen_batchs = [] def update_fn(_, batch): # if there is a random op when using data batch etc, we can not resume correctly # torch.rand(1) seen_batchs.append(batch) engine = DeterministicEngine(update_fn) torch.manual_seed(121) engine.run( infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length, ) batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length) def update_fn(_, batch): assert batch_checker.check( batch ), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) resume_state_dict = dict( epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) torch.manual_seed(121) engine.run(infinite_data_iterator()) assert engine.state.epoch == max_epochs assert engine.state.iteration == epoch_length * max_epochs _test() _test(60) _test(15) def test_resume_random_data_iterator_from_epoch(): _test_resume_random_data_iterator_from_epoch("cpu") def _test_resume_random_data_iterator_from_iter(device): def _test(epoch_length=None): max_epochs = 3 batch_size = 4 num_iters = 17 def infinite_data_iterator(): while True: for _ in range(num_iters): data = torch.randint(0, 1000, size=(batch_size,), device=device) yield data if epoch_length is None: epoch_length = num_iters for resume_iteration in range(1, min(num_iters * max_epochs, epoch_length * max_epochs), 7): seen_batchs = [] def update_fn(_, batch): seen_batchs.append(batch) engine = DeterministicEngine(update_fn) torch.manual_seed(24) engine.run( infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length, ) batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration) def update_fn(_, batch): assert batch_checker.check( batch ), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}" engine = DeterministicEngine(update_fn) resume_state_dict = dict( iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None ) engine.load_state_dict(resume_state_dict) torch.manual_seed(24) engine.run(infinite_data_iterator()) assert engine.state.epoch == max_epochs assert ( engine.state.iteration == epoch_length * max_epochs ), f"{resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}" _test() _test(50) _test(11) def test_resume_random_data_iterator_from_iter(): _test_resume_random_data_iterator_from_iter("cpu") @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() _test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed") _test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed") @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): device = idist.device() _test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed") _test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed") @pytest.mark.xfail @pytest.mark.multinode_distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo): device = idist.device() _test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed") _test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed") @pytest.mark.multinode_distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed") def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): device = idist.device() _test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed") _test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed") def test_concepts_snippet_resume(): # Commented imports required in the snippet # import torch # from torch.utils.data import DataLoader # from ignite.engine import DeterministicEngine # from ignite.utils import manual_seed seen_batches = [] manual_seed(seed=15) def random_train_data_loader(size): data = torch.arange(0, size) return DataLoader(data, batch_size=4, shuffle=True) def print_train_data(engine, batch): i = engine.state.iteration e = engine.state.epoch print("train", e, i, batch.tolist()) seen_batches.append(batch) trainer = DeterministicEngine(print_train_data) print("Original Run") manual_seed(56) trainer.run(random_train_data_loader(40), max_epochs=2, epoch_length=5) original_batches = list(seen_batches) seen_batches = [] print("Resumed Run") trainer.load_state_dict({"epoch": 1, "epoch_length": 5, "max_epochs": 2, "rng_states": None}) manual_seed(56) trainer.run(random_train_data_loader(40)) resumed_batches = list(seen_batches) seen_batches = [] for b1, b2 in zip(original_batches[5:], resumed_batches): assert (b1 == b2).all() def test_concepts_snippet_warning(): def random_train_data_generator(): while True: yield torch.randint(0, 100, size=(1,)) def print_train_data(engine, batch): i = engine.state.iteration e = engine.state.epoch print("train", e, i, batch.tolist()) trainer = DeterministicEngine(print_train_data) @trainer.on(Events.ITERATION_COMPLETED(every=3)) def user_handler(_): # handler synchronizes the random state torch.manual_seed(12) a = torch.rand(1) trainer.run(random_train_data_generator(), max_epochs=3, epoch_length=5) def _test_gradients_on_resume( dirname, device, with_dropout=True, with_dataaugs=True, data_size=24, batch_size=4, save_iter=None, save_epoch=None ): debug = False def random_train_data_loader(size): d = AugmentedData(torch.rand(size, 3, 32, 32), enabled=with_dataaugs) return DataLoader(d, batch_size=batch_size, shuffle=True, num_workers=2) def _train(save_iter=None, save_epoch=None, sd=None): w_norms = [] grad_norms = [] data = [] chkpt = [] manual_seed(12) arch = [ nn.Conv2d(3, 10, 3), nn.ReLU(), nn.Conv2d(10, 10, 3), nn.ReLU(), nn.AdaptiveAvgPool2d(1), nn.Flatten(), nn.Linear(10, 5), nn.ReLU(), nn.Linear(5, 2), ] if with_dropout: arch.insert(2, nn.Dropout2d()) arch.insert(-2, nn.Dropout()) model = nn.Sequential(*arch).to(device) opt = SGD(model.parameters(), lr=0.001) def proc_fn(e, b): from ignite.engine.deterministic import _get_rng_states, _repr_rng_state s = _repr_rng_state(_get_rng_states()) model.train() opt.zero_grad() y = model(b.to(device)) y.sum().backward() opt.step() if debug: print( trainer.state.iteration, trainer.state.epoch, "proc_fn - b.shape", b.shape, torch.norm(y).item(), s ) trainer = DeterministicEngine(proc_fn) if save_iter is not None: ev = Events.ITERATION_COMPLETED(once=save_iter) elif save_epoch is not None: ev = Events.EPOCH_COMPLETED(once=save_epoch) save_iter = save_epoch * (data_size // batch_size) @trainer.on(ev) def save_chkpt(_): if debug: print(trainer.state.iteration, "save_chkpt") fp = os.path.join(dirname, "test.pt") from ignite.engine.deterministic import _repr_rng_state tsd = trainer.state_dict() if debug: print("->", _repr_rng_state(tsd["rng_states"])) torch.save([model.state_dict(), opt.state_dict(), tsd], fp) chkpt.append(fp) def log_event_filter(_, event): if (event // save_iter == 1) and 1 <= (event % save_iter) <= 5: return True return False @trainer.on(Events.ITERATION_COMPLETED(event_filter=log_event_filter)) def write_data_grads_weights(e): x = e.state.batch i = e.state.iteration data.append([i, x.mean().item(), x.std().item()]) total = [0.0, 0.0] out1 = [] out2 = [] for p in model.parameters(): n1 = torch.norm(p).item() n2 = torch.norm(p.grad).item() out1.append(n1) out2.append(n2) total[0] += n1 total[1] += n2 w_norms.append([i, total[0]] + out1) grad_norms.append([i, total[1]] + out2) if sd is not None: sd = torch.load(sd) model.load_state_dict(sd[0]) opt.load_state_dict(sd[1]) from ignite.engine.deterministic import _repr_rng_state if debug: print("-->", _repr_rng_state(sd[2]["rng_states"])) trainer.load_state_dict(sd[2]) manual_seed(32) trainer.run(random_train_data_loader(size=data_size), max_epochs=5) return {"sd": chkpt, "data": data, "grads": grad_norms, "weights": w_norms} out_original = _train(save_iter=save_iter, save_epoch=save_epoch) assert len(out_original["sd"]) > 0 out_resumed = _train(save_iter=save_iter, save_epoch=save_epoch, sd=out_original["sd"][0]) if debug: print("Original:") print(" data:", out_original["data"]) print("grads:", out_original["grads"]) print(" W:", out_original["weights"]) print("Resume:") print(" data:", out_resumed["data"]) print("grads:", out_resumed["grads"]) print(" W:", out_resumed["weights"]) # check data: for d1, d2 in zip(out_original["data"], out_resumed["data"]): assert d1 == d2 # check grads: for d1, d2 in zip(out_original["grads"], out_resumed["grads"]): assert d1 == d2 # check weights: for d1, d2 in zip(out_original["weights"], out_resumed["weights"]): assert d1 == d2 def test_gradients_on_resume_cpu(dirname): with pytest.raises(AssertionError): _test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_iter=25) _test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_iter=25) # resume from epoch _test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_epoch=3) _test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_epoch=3) @pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU") def test_gradients_on_resume_on_cuda(dirname): with pytest.raises(AssertionError): _test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_iter=25) with pytest.raises(AssertionError): _test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_iter=25) # resume from epoch _test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_epoch=3) _test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_epoch=3) def test_engine_with_dataloader_no_auto_batching(): # tests https://github.com/pytorch/ignite/issues/941 data = torch.rand(64, 4, 10) data_loader = DataLoader( data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True) ) counter = [0] def foo(e, b): print(f"{e.state.epoch}-{e.state.iteration}: {b}") counter[0] += 1 engine = DeterministicEngine(foo) engine.run(data_loader, epoch_length=10, max_epochs=5) assert counter[0] == 50 def test_run_finite_iterator_no_epoch_length(): # FR: https://github.com/pytorch/ignite/issues/871 unknown_size = 11 def finite_unk_size_data_iter(): for i in range(unknown_size): yield i bc = BatchChecker(data=list(range(unknown_size))) engine = DeterministicEngine(lambda e, b: bc.check(b)) @engine.on(Events.DATALOADER_STOP_ITERATION) def restart_iter(): engine.state.dataloader = finite_unk_size_data_iter() data_iter = finite_unk_size_data_iter() engine.run(data_iter, max_epochs=5) assert engine.state.epoch == 5 assert engine.state.iteration == unknown_size * 5 class OldDataLoader(DataLoader): def __init__(self, dl, *args, **kwargs): self.dl = dl self.sampler = self.dl.sampler self.batch_sampler = self.dl.batch_sampler def __len__(self): return len(self.dl) def __iter__(self): return iter(self.dl) def test_dataloader_no_dataset_kind(): # tests issue : https://github.com/pytorch/ignite/issues/1022 engine = DeterministicEngine(lambda e, b: None) data = torch.randint(0, 1000, size=(100 * 4,)) dataloader = DataLoader(data, batch_size=4) dataloader = OldDataLoader(dataloader) engine.run(dataloader)
[]
[]
[]
[]
[]
python
0
0
register-staff-to-existing-clinic/index.py
import sys import logging import pymysql import json import os #rds settings rds_endpoint = os.environ['rds_endpoint'] username=os.environ['username'] password=os.environ['password'] db_name=os.environ['db_name'] logger = logging.getLogger() logger.setLevel(logging.INFO) #Connection try: connection = pymysql.connect(host=rds_endpoint, user=username, passwd=password, db=db_name) except pymysql.MySQLError as e: logger.error("ERROR: Unexpected error: Could not connect to MySQL instance.") logger.error(e) sys.exit() logger.info("SUCCESS: Connection to RDS MySQL instance succeeded") def handler(event, context): cur = connection.cursor() ## Retrieve Data ##STAFF isAdmin='' if event['job'] =='A' or event['job'] =='D': isAdmin = 'Y' else: isAdmin = 'N' query = "INSERT INTO Staff(email,password,name,addr,contactNo,job,status,isAdmin,branchId) \ VALUES('{}','{}','{}','{}','{}','{}','{}','{}','{}')"\ .format(event['email'], event['password'], event['name'], event['addr'], event['contactNo'], event['job'],'P',isAdmin,event['branchId']) cur.execute(query) connection.commit() print(cur.rowcount, "record(s) affected") ## Construct body of the response object transactionResponse = {} # Construct http response object responseObject = {} # responseObject['statusCode'] = 200 # responseObject['headers'] = {} # responseObject['headers']['Content-Type']='application/json' # responseObject['headers']['Access-Control-Allow-Origin']='*' responseObject['data'] = json.dumps(transactionResponse, sort_keys=True,default=str) #k = json.loads(responseObject['body']) #print(k['uin']) return responseObject
[]
[]
[ "db_name", "password", "username", "rds_endpoint" ]
[]
["db_name", "password", "username", "rds_endpoint"]
python
4
0
tests/integration_tests/test_chainermn.py
import gc import pytest from optuna import create_study from optuna import distributions from optuna import integration from optuna.integration import ChainerMNStudy from optuna import pruners from optuna.storages import InMemoryStorage from optuna.storages import RDBStorage from optuna import Study from optuna.testing.integration import DeterministicPruner from optuna.testing.sampler import DeterministicRelativeSampler from optuna.testing.storage import StorageSupplier from optuna.trial import Trial from optuna.trial import TrialState from optuna import TrialPruned from optuna import type_checking if type_checking.TYPE_CHECKING: from types import TracebackType # NOQA from typing import Any # NOQA from typing import Callable # NOQA from typing import Dict # NOQA from typing import Optional # NOQA from typing import Type # NOQA from optuna.integration.chainermn import ChainerMNTrial # NOQA from optuna.pruners import BasePruner # NOQA from optuna.samplers import BaseSampler # NOQA from optuna.storages import BaseStorage # NOQA try: import chainermn from chainermn.communicators.communicator_base import CommunicatorBase # NOQA _available = True except ImportError: _available = False STORAGE_MODES = ["sqlite"] PRUNER_INIT_FUNCS = [lambda: pruners.MedianPruner(), lambda: pruners.SuccessiveHalvingPruner()] class Func(object): def __init__(self): # type: () -> None self.suggested_values = {} # type: Dict[int, Dict[str, Any]] def __call__(self, trial, comm): # type: (ChainerMNTrial, CommunicatorBase) -> float x = trial.suggest_uniform("x", -10, 10) y = trial.suggest_loguniform("y", 20, 30) z = trial.suggest_categorical("z", (-1.0, 1.0)) self.suggested_values[trial.number] = {} self.suggested_values[trial.number]["x"] = x self.suggested_values[trial.number]["y"] = y self.suggested_values[trial.number]["z"] = z return (x - 2) ** 2 + (y - 25) ** 2 + z class MultiNodeStorageSupplier(StorageSupplier): def __init__(self, storage_specifier, comm): # type: (str, CommunicatorBase) -> None super(MultiNodeStorageSupplier, self).__init__(storage_specifier) self.comm = comm self.storage = None # type: Optional[RDBStorage] def __enter__(self): # type: () -> RDBStorage if self.comm.rank == 0: storage = super(MultiNodeStorageSupplier, self).__enter__() assert isinstance(storage, RDBStorage) url = str(storage.engine.url) else: url = "dummy_url" url = self.comm.mpi_comm.bcast(url) self.storage = RDBStorage(url) return self.storage def __exit__(self, exc_type, exc_val, exc_tb): # type: (Type[BaseException], BaseException, TracebackType) -> None # Explicitly call storage's __del__ before sqlite tempfile is deleted. del self.storage gc.collect() self.comm.mpi_comm.barrier() if self.comm.rank == 0: super(MultiNodeStorageSupplier, self).__exit__(exc_type, exc_val, exc_tb) @pytest.fixture def comm(): # type: () -> CommunicatorBase if not _available: pytest.skip("This test requires ChainerMN.") return chainermn.create_communicator("naive") class TestChainerMNStudy(object): @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_init(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_study = ChainerMNStudy(study, comm) assert mn_study.study_name == study.study_name @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_init_with_multiple_study_names(storage_mode, comm): # type: (str, CommunicatorBase) -> None TestChainerMNStudy._check_multi_node(comm) with MultiNodeStorageSupplier(storage_mode, comm) as storage: # Create study_name for each rank. name = create_study(storage).study_name study = Study(name, storage) with pytest.raises(ValueError): ChainerMNStudy(study, comm) @staticmethod def test_init_with_incompatible_storage(comm): # type: (CommunicatorBase) -> None study = create_study(InMemoryStorage(), study_name="in-memory-study") with pytest.raises(ValueError): ChainerMNStudy(study, comm) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_optimize(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_study = ChainerMNStudy(study, comm) # Invoke optimize. n_trials = 20 func = Func() mn_study.optimize(func, n_trials=n_trials) # Assert trial counts. assert len(mn_study.trials) == n_trials # Assert the same parameters have been suggested among all nodes. for trial in mn_study.trials: assert trial.params == func.suggested_values[trial.number] @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) @pytest.mark.parametrize("pruner_init_func", PRUNER_INIT_FUNCS) def test_pruning(storage_mode, pruner_init_func, comm): # type: (str, Callable[[], BasePruner], CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: pruner = pruner_init_func() study = TestChainerMNStudy._create_shared_study(storage, comm, pruner=pruner) mn_study = ChainerMNStudy(study, comm) def objective(_trial, _comm): # type: (ChainerMNTrial, bool) -> float raise TrialPruned # Always be pruned. # Invoke optimize. n_trials = 20 mn_study.optimize(objective, n_trials=n_trials) # Assert trial count. assert len(mn_study.trials) == n_trials # Assert pruned trial count. pruned_trials = [t for t in mn_study.trials if t.state == TrialState.PRUNED] assert len(pruned_trials) == n_trials @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_failure(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_study = ChainerMNStudy(study, comm) def objective(_trial, _comm): # type: (ChainerMNTrial, bool) -> float raise ValueError # Always fails. # Invoke optimize in which `ValueError` is accepted. n_trials = 20 mn_study.optimize(objective, n_trials=n_trials, catch=(ValueError,)) # Assert trial count. assert len(mn_study.trials) == n_trials # Assert failed trial count. failed_trials = [t for t in mn_study.trials if t.state == TrialState.FAIL] assert len(failed_trials) == n_trials # Synchronize nodes before executing the next optimization. comm.mpi_comm.barrier() # Invoke optimize in which no exceptions are accepted. with pytest.raises(ValueError): mn_study.optimize(objective, n_trials=n_trials, catch=()) # Assert trial count. assert len(mn_study.trials) == n_trials + 1 # Assert failed trial count. failed_trials = [t for t in mn_study.trials if t.state == TrialState.FAIL] assert len(failed_trials) == n_trials + 1 @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_relative_sampling(storage_mode, comm): # type: (str, CommunicatorBase) -> None relative_search_space = { "x": distributions.UniformDistribution(low=-10, high=10), "y": distributions.LogUniformDistribution(low=20, high=30), "z": distributions.CategoricalDistribution(choices=(-1.0, 1.0)), } relative_params = {"x": 1.0, "y": 25.0, "z": -1.0} sampler = DeterministicRelativeSampler( relative_search_space, relative_params # type: ignore ) with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm, sampler=sampler) mn_study = ChainerMNStudy(study, comm) # Invoke optimize. n_trials = 20 func = Func() mn_study.optimize(func, n_trials=n_trials) # Assert trial counts. assert len(mn_study.trials) == n_trials # Assert the parameters in `relative_params` have been suggested among all nodes. for trial in mn_study.trials: assert trial.params == relative_params @staticmethod def _create_shared_study(storage, comm, pruner=None, sampler=None): # type: (BaseStorage, CommunicatorBase, BasePruner, BaseSampler) -> Study name_local = create_study(storage).study_name if comm.rank == 0 else None name_bcast = comm.mpi_comm.bcast(name_local) return Study(name_bcast, storage, pruner=pruner, sampler=sampler) @staticmethod def _check_multi_node(comm): # type: (CommunicatorBase) -> None if comm.size < 2: pytest.skip("This test is for multi-node only.") class TestChainerMNTrial(object): @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_init(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) trial = study.trials[-1] assert mn_trial.number == trial.number @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_float(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low1 = 0.5 high1 = 1.0 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_float("x1", low1, high1) assert low1 <= x1 <= high1 x2 = mn_trial.suggest_uniform("x1", low1, high1) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_loguniform("x1", low1, high1) low2 = 1e-7 high2 = 1e-2 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x3 = mn_trial.suggest_float("x2", low2, high2, log=True) assert low2 <= x3 <= high2 x4 = mn_trial.suggest_loguniform("x2", low2, high2) assert x3 == x4 with pytest.raises(ValueError): mn_trial.suggest_uniform("x2", low2, high2) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_uniform(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low = 0.5 high = 1.0 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_uniform("x", low, high) assert low <= x1 <= high x2 = mn_trial.suggest_uniform("x", low, high) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_loguniform("x", low, high) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_loguniform(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low = 1e-7 high = 1e-2 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_loguniform("x", low, high) assert low <= x1 <= high x2 = mn_trial.suggest_loguniform("x", low, high) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_uniform("x", low, high) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_discrete_uniform(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low = 0.0 high = 10.0 q = 1.0 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_discrete_uniform("x", low, high, q) assert low <= x1 <= high x2 = mn_trial.suggest_discrete_uniform("x", low, high, q) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_uniform("x", low, high) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) @pytest.mark.parametrize("enable_log", [False, True]) def test_suggest_int_step1( storage_mode: str, comm: CommunicatorBase, enable_log: bool ) -> None: with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low = 1 high = 10 step = 1 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_int("x", low, high, step=step, log=enable_log) assert low <= x1 <= high x2 = mn_trial.suggest_int("x", low, high, step=step, log=enable_log) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_uniform("x", low, high) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_int_step2(storage_mode: str, comm: CommunicatorBase) -> None: with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) low = 1 high = 10 step = 2 for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_int("x", low, high, step=step, log=False) assert low <= x1 <= high x2 = mn_trial.suggest_int("x", low, high, step=step, log=False) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_uniform("x", low, high) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_suggest_categorical(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) choices = ("a", "b", "c") for _ in range(10): mn_trial = _create_new_chainermn_trial(study, comm) x1 = mn_trial.suggest_categorical("x", choices) assert x1 in choices x2 = mn_trial.suggest_categorical("x", choices) assert x1 == x2 with pytest.raises(ValueError): mn_trial.suggest_uniform("x", 0.0, 1.0) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) @pytest.mark.parametrize("is_pruning", [True, False]) def test_report_and_should_prune(storage_mode, comm, is_pruning): # type: (str, CommunicatorBase, bool) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study( storage, comm, DeterministicPruner(is_pruning) ) mn_trial = _create_new_chainermn_trial(study, comm) mn_trial.report(1.0, 0) assert mn_trial.should_prune() == is_pruning @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_params(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) x = mn_trial.suggest_categorical("x", [1]) assert mn_trial.params["x"] == x @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_distributions(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) mn_trial.suggest_categorical("x", [1]) assert mn_trial.distributions == { "x": distributions.CategoricalDistribution(choices=(1,)) } @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_user_attrs(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) mn_trial.set_user_attr("data", "MNIST") assert mn_trial.user_attrs["data"] == "MNIST" @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_system_attrs(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) mn_trial.set_system_attr("system_message", "test") assert mn_trial.system_attrs["system_message"] == "test" @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_call_with_mpi(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) with pytest.raises(RuntimeError): def func(): # type: () -> None raise RuntimeError mn_trial._call_with_mpi(func) @staticmethod @pytest.mark.parametrize("storage_mode", STORAGE_MODES) def test_datetime_start(storage_mode, comm): # type: (str, CommunicatorBase) -> None with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study(storage, comm) mn_trial = _create_new_chainermn_trial(study, comm) assert mn_trial.datetime_start is not None def _create_new_chainermn_trial(study, comm): # type: (Study, CommunicatorBase) -> integration.chainermn.ChainerMNTrial if comm.rank == 0: trial_id = study._storage.create_new_trial(study._study_id) trial = Trial(study, trial_id) mn_trial = integration.chainermn.ChainerMNTrial(trial, comm) else: mn_trial = integration.chainermn.ChainerMNTrial(None, comm) comm.mpi_comm.barrier() return mn_trial
[]
[]
[]
[]
[]
python
null
null
null
source/nvda.pyw
#nvda.pyw #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2006-2018 NV Access Limited, Aleksey Sadovoy, Babbage B.V., Joseph Lee #This file is covered by the GNU General Public License. #See the file COPYING for more details. """The NVDA launcher. It can handle some command-line arguments (including help). It sets up logging, and then starts the core.""" import sys import os if getattr(sys, "frozen", None): # We are running as an executable. # Append the path of the executable to sys so we can import modules from the dist dir. sys.path.append(sys.prefix) os.chdir(sys.prefix) else: import sourceEnv #We should always change directory to the location of this module (nvda.pyw), don't rely on sys.path[0] os.chdir(os.path.normpath(os.path.dirname(__file__))) import pythonMonkeyPatches import ctypes import locale import gettext #Localization settings locale.setlocale(locale.LC_ALL,'') try: gettext.translation('nvda',localedir='locale',languages=[locale.getlocale()[0]]).install(True) except: gettext.install('nvda',unicode=True) import time import argparse import win32con import globalVars import config import logHandler from logHandler import log import winUser import winKernel # Find out if NVDA is running as a Windows Store application bufLen=ctypes.c_int() try: GetCurrentPackageFullName=ctypes.windll.kernel32.GetCurrentPackageFullName except AttributeError: config.isAppX=False else: bufLen=ctypes.c_int() # Use GetCurrentPackageFullName to detect if we are running as a store app. # #8362: error 15700 (not a package) error is returned if this is not a Windows Store package. config.isAppX=(GetCurrentPackageFullName(ctypes.byref(bufLen),None)!=15700) class NoConsoleOptionParser(argparse.ArgumentParser): """A commandline option parser that shows its messages using dialogs, as this pyw file has no dos console window associated with it""" def print_help(self, file=None): """Shows help in a standard Windows message dialog""" winUser.MessageBox(0, unicode(self.format_help()), u"Help", 0) def error(self, message): """Shows an error in a standard Windows message dialog, and then exits NVDA""" out = "" out = self.format_usage() out += "\nerror: %s" % message winUser.MessageBox(0, unicode(out), u"Error", 0) sys.exit(2) globalVars.startTime=time.time() # Check OS version requirements import winVersion if not winVersion.isSupportedOS(): winUser.MessageBox(0, ctypes.FormatError(winUser.ERROR_OLD_WIN_VERSION), None, winUser.MB_ICONERROR) sys.exit(1) def decodeMbcs(string): """Decode a multi-byte character set string""" return string.decode("mbcs") def stringToBool(string): """Wrapper for configobj.validate.is_boolean to raise the proper exception for wrong values.""" from configobj.validate import is_boolean, ValidateError try: return is_boolean(string) except ValidateError as e: raise argparse.ArgumentTypeError(e.message) #Process option arguments parser=NoConsoleOptionParser() quitGroup = parser.add_mutually_exclusive_group() quitGroup.add_argument('-q','--quit',action="store_true",dest='quit',default=False,help="Quit already running copy of NVDA") quitGroup.add_argument('-r','--replace',action="store_true",dest='replace',default=False,help="Quit already running copy of NVDA and start this one") parser.add_argument('-k','--check-running',action="store_true",dest='check_running',default=False,help="Report whether NVDA is running via the exit code; 0 if running, 1 if not running") parser.add_argument('-f','--log-file',dest='logFileName',type=decodeMbcs,help="The file where log messages should be written to") parser.add_argument('-l','--log-level',dest='logLevel',type=int,default=0,choices=[10, 12, 15, 20, 30, 40, 50, 100],help="The lowest level of message logged (debug 10, input/output 12, debugwarning 15, info 20, warning 30, error 40, critical 50, off 100), default is info") parser.add_argument('-c','--config-path',dest='configPath',default=None,type=decodeMbcs,help="The path where all settings for NVDA are stored") parser.add_argument('-m','--minimal',action="store_true",dest='minimal',default=False,help="No sounds, no interface, no start message etc") parser.add_argument('-s','--secure',action="store_true",dest='secure',default=False,help="Secure mode (disable Python console)") parser.add_argument('--disable-addons',action="store_true",dest='disableAddons',default=False,help="Disable all add-ons") parser.add_argument('--debug-logging',action="store_true",dest='debugLogging',default=False,help="Enable debug level logging just for this run. This setting will override any other log level (--loglevel, -l) argument given, as well as no logging option.") parser.add_argument('--no-logging',action="store_true",dest='noLogging',default=False,help="Disable logging completely for this run. This setting can be overwritten with other log level (--loglevel, -l) switch or if debug logging is specified.") parser.add_argument('--no-sr-flag',action="store_false",dest='changeScreenReaderFlag',default=True,help="Don't change the global system screen reader flag") installGroup = parser.add_mutually_exclusive_group() installGroup.add_argument('--install',action="store_true",dest='install',default=False,help="Installs NVDA (starting the new copy after installation)") installGroup.add_argument('--install-silent',action="store_true",dest='installSilent',default=False,help="Installs NVDA silently (does not start the new copy after installation).") installGroup.add_argument('--create-portable',action="store_true",dest='createPortable',default=False,help="Creates a portable copy of NVDA (starting the new copy after installation)") installGroup.add_argument('--create-portable-silent',action="store_true",dest='createPortableSilent',default=False,help="Creates a portable copy of NVDA silently (does not start the new copy after installation).") parser.add_argument('--portable-path',dest='portablePath',default=None,type=decodeMbcs,help="The path where a portable copy will be created") parser.add_argument('--launcher',action="store_true",dest='launcher',default=False,help="Started from the launcher") parser.add_argument('--enable-start-on-logon',metavar="True|False",type=stringToBool,dest='enableStartOnLogon',default=None, help="When installing, enable NVDA's start on the logon screen") # This option currently doesn't actually do anything. # It is passed by Ease of Access so that if someone downgrades without uninstalling (despite our discouragement), # the downgraded copy won't be started in non-secure mode on secure desktops. # (Older versions always required the --secure option to start in secure mode.) # If this occurs, the user will see an obscure error, # but that's far better than a major security hazzard. parser.add_argument('--ease-of-access',action="store_true",dest='easeOfAccess',default=False,help="Started by Windows Ease of Access") (globalVars.appArgs,globalVars.appArgsExtra)=parser.parse_known_args() def terminateRunningNVDA(window): processID,threadID=winUser.getWindowThreadProcessID(window) winUser.PostMessage(window,win32con.WM_QUIT,0,0) h=winKernel.openProcess(winKernel.SYNCHRONIZE,False,processID) if not h: # The process is already dead. return try: res=winKernel.waitForSingleObject(h,4000) if res==0: # The process terminated within the timeout period. return finally: winKernel.closeHandle(h) # The process is refusing to exit gracefully, so kill it forcefully. h = winKernel.openProcess(winKernel.PROCESS_TERMINATE | winKernel.SYNCHRONIZE, False, processID) if not h: raise OSError("Could not open process for termination") try: winKernel.TerminateProcess(h, 1) winKernel.waitForSingleObject(h, 2000) finally: winKernel.closeHandle(h) #Handle running multiple instances of NVDA try: oldAppWindowHandle=winUser.FindWindow(u'wxWindowClassNR',u'NVDA') except: oldAppWindowHandle=0 if not winUser.isWindow(oldAppWindowHandle): oldAppWindowHandle=0 if oldAppWindowHandle and (globalVars.appArgs.quit or globalVars.appArgs.replace): try: terminateRunningNVDA(oldAppWindowHandle) except: sys.exit(1) if globalVars.appArgs.quit or (oldAppWindowHandle and not globalVars.appArgs.replace): sys.exit(0) elif globalVars.appArgs.check_running: # NVDA is not running. sys.exit(1) UOI_NAME = 2 def getDesktopName(): desktop = ctypes.windll.user32.GetThreadDesktop(ctypes.windll.kernel32.GetCurrentThreadId()) name = ctypes.create_unicode_buffer(256) ctypes.windll.user32.GetUserObjectInformationW(desktop, UOI_NAME, ctypes.byref(name), ctypes.sizeof(name), None) return name.value #Ensure multiple instances are not fully started by using a mutex ERROR_ALREADY_EXISTS=0XB7 desktopName=getDesktopName() mutex=ctypes.windll.kernel32.CreateMutexW(None,True,u"Local\\NVDA_%s"%desktopName) if not mutex or ctypes.windll.kernel32.GetLastError()==ERROR_ALREADY_EXISTS: if mutex: ctypes.windll.kernel32.CloseHandle(mutex) sys.exit(1) isSecureDesktop = desktopName == "Winlogon" if isSecureDesktop: try: import _winreg as winreg # Python 2.7 import except ImportError: import winreg # Python 3 import try: k = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, ur"SOFTWARE\NVDA") if not winreg.QueryValueEx(k, u"serviceDebug")[0]: globalVars.appArgs.secure = True except WindowsError: globalVars.appArgs.secure = True globalVars.appArgs.changeScreenReaderFlag = False globalVars.appArgs.minimal = True globalVars.appArgs.configPath = os.path.join(sys.prefix, "systemConfig") #os.environ['PYCHECKER']="--limit 10000 -q --changetypes" #import pychecker.checker #Initial logging and logging code # #8516: because config manager isn't ready yet, we must let start and exit messages be logged unless disabled via --no-logging switch. # However, do log things if debug logging or log level other than 0 (not set) is requested from command line switches. logLevel=globalVars.appArgs.logLevel if globalVars.appArgs.noLogging and (not globalVars.appArgs.debugLogging and logLevel == 0): logLevel = log.OFF else: if logLevel<=0: logLevel=log.INFO if globalVars.appArgs.debugLogging: logLevel=log.DEBUG logHandler.initialize() logHandler.log.setLevel(logLevel) if logLevel is log.DEBUG: log.debug("Provided arguments: {}".format(sys.argv[1:])) log.info("Starting NVDA") log.debug("Debug level logging enabled") if globalVars.appArgs.changeScreenReaderFlag: winUser.setSystemScreenReaderFlag(True) #Accept wm_quit from other processes, even if running with higher privilages if not ctypes.windll.user32.ChangeWindowMessageFilter(win32con.WM_QUIT,1): raise WinError() # Make this the last application to be shut down and don't display a retry dialog box. winKernel.SetProcessShutdownParameters(0x100, winKernel.SHUTDOWN_NORETRY) if not isSecureDesktop and not config.isAppX: import easeOfAccess easeOfAccess.notify(3) try: import core core.main() except: log.critical("core failure",exc_info=True) sys.exit(1) finally: if not isSecureDesktop and not config.isAppX: easeOfAccess.notify(2) if globalVars.appArgs.changeScreenReaderFlag: winUser.setSystemScreenReaderFlag(False) ctypes.windll.kernel32.CloseHandle(mutex) log.info("NVDA exit") sys.exit(globalVars.exitCode)
[]
[]
[ "PYCHECKER" ]
[]
["PYCHECKER"]
python
1
0
rest2cmd/rest2cmd.py
# -*- coding: utf-8 -*- import sys import os import json import yaml import string import random import shlex import subprocess from traceback import format_exc from flask import Flask, request, jsonify app = Flask(__name__) app.url_map.strict_slashes = False assert 'APP_ROOT' in os.environ, 'No APP_ROOT env variable found!' APP_ROOT = os.environ['APP_ROOT'] print('APP_ROOT', APP_ROOT) assert 'HTTP_MAP_PATH' in os.environ, 'No HTTP_MAP_PATH env variable found!' HTTP_MAP_PATH = os.environ['HTTP_MAP_PATH'] print('HTTP_MAP_PATH', HTTP_MAP_PATH) with open(HTTP_MAP_PATH, 'r') as f: try: HTTP_MAP = yaml.load(f) except yaml.YAMLError as exc: print('Problem loading yaml http map file', file=sys.stderr) print(exc, file=sys.stderr) sys.exit(1) print('HTTP_MAP', HTTP_MAP, file=sys.stderr) assert not isinstance('HTTP_MAP', dict), ( 'Wrong content in HTTP_MAP! Got %r' % HTTP_MAP ) def execute(executable, command, plugin_path): try: cmd = '%s %s' % (executable, command) parts = shlex.split(cmd) cwd = os.path.normpath(os.path.join(APP_ROOT, plugin_path)) print( 'Resolved as: %s | @%s | %s' % (cmd, cwd, parts), file=sys.stderr ) proc = subprocess.Popen( parts, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd ) # wait for the process to terminate # while proc.poll() is None: # time.sleep(0.2) out, err = proc.communicate() # wrap response is_error = proc.returncode != 0 content_stream = (err if is_error else out).decode('utf8').strip() content = content_stream.split('\n') return { 'is_error': is_error, 'content': content } except Exception: return { 'is_error': True, 'content': format_exc().split('\n') } def format_status(output): if output['is_error']: return 400 if len(output['content']) == 0: return 204 return 200 def format_output(output, is_json): # if app outpput is json format, it means there is a single line # of output or there is empty output # if it's not json, simply return what is in output content if is_json and len(output['content']) > 0: # it should be single line, first one, with json content # try to parse it, and if it fails, failover to plain text lines # this could be case if output is an error, like traceback # and executable has no control over this and can't json it try: return json.loads(output['content'][0]) except json.decoder.JSONDecodeError: pass return output['content'] def normalize_url_args(**url_args): normalized = {} for arg_name in url_args: value = url_args[arg_name] normalized[arg_name] = ('\'%s\'' if ' ' in value else '%s') % value return normalized def route_handler(path, method, config): def _call(**url_args): x_groups = request.headers.get('X-GROUPS', '').split(',') groups = config.get('groups', None) if groups is not None: intersection = set(x_groups) & set(groups) if len(intersection) == 0: return jsonify({ 'message': ( 'You don\'t have permission to access this resource.' ) }), 403 data = request.json or {} payload = {**url_args, 'http_payload': json.dumps(data)} for k, v in (data if isinstance(data, dict) else {}).items(): payload['http_payload__%s' % k] = v payload = normalize_url_args(**payload) print('Got payload: %s', payload, file=sys.stderr) command_parts = [p % payload for p in config['command'].split()] command = ' '.join(command_parts) print('Executing: %s', command, file=sys.stderr) output = execute(config['executable'], command, config['plugin_path']) print('Got output: %s', output, file=sys.stderr) content = format_output(output, config.get('is_json', False)) status = format_status(output) print('http response(%d): %s' % (status, content), file=sys.stderr) return jsonify(content), status # id(_call) is always unique, but we need to randomize name _call.__name__ = ''.join( random.choice(string.ascii_lowercase) for _ in range(10) ) app.route(path, methods=[method])(_call) # dynamically create flask routes from http map for method, routes in HTTP_MAP.items(): for path, config in routes.items(): route_handler(path, method, config) print('Starting app ..', file=sys.stderr) if __name__ == '__main__': app.run()
[]
[]
[ "APP_ROOT", "HTTP_MAP_PATH" ]
[]
["APP_ROOT", "HTTP_MAP_PATH"]
python
2
0
examples/service/chat/user_binding/page/user_binding_page_example.go
package main import ( "log" "os" "github.com/RJPearson94/twilio-sdk-go" v2 "github.com/RJPearson94/twilio-sdk-go/service/chat/v2" "github.com/RJPearson94/twilio-sdk-go/service/chat/v2/service/user/bindings" "github.com/RJPearson94/twilio-sdk-go/session/credentials" ) var chatClient *v2.Chat func init() { creds, err := credentials.New(credentials.Account{ Sid: os.Getenv("TWILIO_ACCOUNT_SID"), AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"), }) if err != nil { log.Panicf("%s", err.Error()) } chatClient = twilio.NewWithCredentials(creds).Chat.V2 } func main() { resp, err := chatClient. Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"). User("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"). Bindings. Page(&bindings.UserBindingsPageOptions{}) if err != nil { log.Panicf("%s", err.Error()) } log.Printf("%v user binding(s) found on page", len(resp.Bindings)) }
[ "\"TWILIO_ACCOUNT_SID\"", "\"TWILIO_AUTH_TOKEN\"" ]
[]
[ "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID" ]
[]
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
go
2
0
AutomatedTests/teststeps.py
# # Copyright (c) Andre Slabber. All rights reserved. # Licensed under the MIT License. See LICENSE file in the project root for full license information. # import os import time import shutil import pyautogui from pyscreeze import ImageNotFoundException import testtoolkit as tk # the very basic routines we use from PyAutoGUI again and again. def harmless_click_to_focus(): return tk.click(145, 1028) def clear_appdata(): futureai_localappdata = os.path.join(os.getenv('LOCALAPPDATA'), 'FutureAI') shutil.rmtree(futureai_localappdata) return True def select_no_on_save_prompt(): time.sleep(0.2) if tk.wait_and_hover('save_question'): time.sleep(0.2) tk.click(975, 600) return True def do_menu_choice(menu, item): if not harmless_click_to_focus(): return False if not tk.wait_and_click(menu): return False if not tk.wait_and_click(item): return False def do_icon_choice(icon_choice): if not harmless_click_to_focus(): return False if not tk.wait_and_click(icon_choice): return False return True def check_synapse_is_drawn_correctly(weight, model, drawn_synapse): select_weight_combobox(int(weight)) select_model_combobox(int(model)) tk.drag_from_to(30, 115, 95, 115, 0.2) harmless_click_to_focus() if not tk.wait_and_hover(drawn_synapse): return False pyautogui.hotkey('control', 'Z') return True def select_module_combobox(page, option): ys = [92, 110, 130, 150, 170, 190, 210, 230, 250, 270, 290, 310, 330, 350, 370, 390, 410] tk.click(1411, 67) time.sleep(0.5) # go to first page for i in range(2): tk.click(1460, 103) # go to correct page for i in range(page): tk.click(1460, 420) tk.click(1411, ys[option]) def select_weight_combobox(option): ys = [92, 110, 130, 150, 170, 190, 210, 230, 250, 270, 290] tk.click(1630, 67) time.sleep(0.1) tk.click(1630, ys[option]) def select_model_combobox(option): ys = [92, 110, 130, 150] tk.click(1756, 67) time.sleep(0.1) tk.click(1756, ys[option]) def insert_module(page, index): select_module_combobox(int(page), int(index)) pyautogui.moveTo([95, 185]) pyautogui.click([95, 185]) harmless_click_to_focus() def select_module(page, screenshot): # go to first page for i in range(2): tk.click(365, 350) # go to correct page for i in range(page): tk.click(365, 670) if not tk.wait_and_click(screenshot): result = False def remove_module(): pyautogui.rightClick([70, 150]) return tk.wait_and_click('delete_module_item')
[]
[]
[ "LOCALAPPDATA" ]
[]
["LOCALAPPDATA"]
python
1
0
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ 'flask', 'requests', 'PyYAML', 'celery', 'redis', 'click', ] test_requirements = [ # TODO: put package test requirements here ] setup( name='pia', version='0.1.0', description="Chaining http requests into one call.", long_description=readme + '\n\n' + history, author="Ju Lin", author_email='[email protected]', url='https://github.com/soasme/pia', packages=[ 'pia', ], package_dir={'pia': 'pia'}, include_package_data=True, install_requires=requirements, license="ISCL", zip_safe=False, keywords='pia', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: ISC License (ISCL)', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], test_suite='tests', tests_require=test_requirements )
[]
[]
[]
[]
[]
python
null
null
null
ansible/plugins/lookup/hashi_vault.py
# (c) 2015, Jonathan Davila <jdavila(at)ansible.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: hashi_vault author: Jonathan Davila <jdavila(at)ansible.com> version_added: "2.0" short_description: retrieve secrets from HasihCorp's vault requirements: - hvac (python library) description: - retrieve secrets from HasihCorp's vault notes: - Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified. options: secret: description: query you are making required: True token: description: vault token env: - name: VAULT_TOKEN url: description: url to vault service env: - name: VAULT_ADDR default: 'http://127.0.0.1:8200' username: description: authentication user name password: description: authentication password auth_method: description: authentication method used mount_point: description: vault mount point, only required if you have a custom mount point default: ldap cacert: description: path to certificate to use for authentication validate_certs: description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones. type: boolean default: True """ EXAMPLES = """ - debug: msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}" - name: Return all secrets from a path debug: msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}" - name: Vault that requires authentication via LDAP debug: msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}" - name: Using an ssl vault debug: msg: "{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}" - name: using certificate auth debug: msg: "{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}" """ RETURN = """ _raw: description: - secrets(s) requested """ import os from ansible.errors import AnsibleError from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.lookup import LookupBase HAS_HVAC = False try: import hvac HAS_HVAC = True except ImportError: HAS_HVAC = False ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200' if os.getenv('VAULT_ADDR') is not None: ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR'] class HashiVault: def __init__(self, **kwargs): self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR) # split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value' s = kwargs.get('secret') if s is None: raise AnsibleError("No secret specified for hashi_vault lookup") s_f = s.split(':') self.secret = s_f[0] if len(s_f) >= 2: self.secret_field = s_f[1] else: self.secret_field = '' # If a particular backend is asked for (and its method exists) we call it, otherwise drop through to using # token auth. This means if a particular auth backend is requested and a token is also given, then we # ignore the token and attempt authentication against the specified backend. # # to enable a new auth backend, simply add a new 'def auth_<type>' method below. # self.auth_method = kwargs.get('auth_method') if self.auth_method: try: self.client = hvac.Client(url=self.url) # prefixing with auth_ to limit which methods can be accessed getattr(self, 'auth_' + self.auth_method)(**kwargs) except AttributeError: raise AnsibleError("Authentication method '%s' not supported" % self.auth_method) else: self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None)) if self.token is None and os.environ.get('HOME'): token_filename = os.path.join( os.environ.get('HOME'), '.vault-token' ) if os.path.exists(token_filename): with open(token_filename) as token_file: self.token = token_file.read().strip() if self.token is None: raise AnsibleError("No Vault Token specified") self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', '')) self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify) if not self.client.is_authenticated(): raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup") def get(self): data = self.client.read(self.secret) if data is None: raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret) if self.secret_field == '': return data['data'] if self.secret_field not in data['data']: raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field)) return data['data'][self.secret_field] def auth_ldap(self, **kwargs): username = kwargs.get('username') if username is None: raise AnsibleError("Authentication method ldap requires a username") password = kwargs.get('password') if password is None: raise AnsibleError("Authentication method ldap requires a password") mount_point = kwargs.get('mount_point') if mount_point is None: mount_point = 'ldap' self.client.auth_ldap(username, password, mount_point) def boolean_or_cacert(self, validate_certs, cacert): validate_certs = boolean(validate_certs, strict=False) '''' return a bool or cacert ''' if validate_certs is True: if cacert != '': return cacert else: return True else: return False class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): if not HAS_HVAC: raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.") vault_args = terms[0].split(' ') vault_dict = {} ret = [] for param in vault_args: try: key, value = param.split('=') except ValueError: raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms) vault_dict[key] = value vault_conn = HashiVault(**vault_dict) for term in terms: key = term.split()[0] value = vault_conn.get() ret.append(value) return ret
[]
[]
[ "VAULT_ADDR", "HOME", "VAULT_TOKEN" ]
[]
["VAULT_ADDR", "HOME", "VAULT_TOKEN"]
python
3
0
drrun4.py
#!/usr/bin/python2.7 '''Trains a simple convnet on the MNIST dataset. Gets to 99.25% test accuracy after 12 epochs (there is still a lot of margin for parameter tuning). 16 seconds per epoch on a GRID K520 GPU. ''' #python jetdual.py --save dualn2200 --network nnn2 --pt 200 --epoch 50 --stride 2 --gpu 3 #python jetdual.py --save dualn2m2200 --network nnn2 --pt 200 --epoch 50 --stride 2 --gpu 4 --pred 1 --mod 2 from __future__ import print_function import argparse parser=argparse.ArgumentParser() parser.add_argument("--end",type=float,default=1.,help='end ratio') parser.add_argument("--save",type=str,default="test",help='save name') parser.add_argument("--network",type=str,default="dr3dmodel",help='network name on symbols/') parser.add_argument("--opt",type=str,default="sgd",help='optimizer sgd rms adam') parser.add_argument("--pt",type=int,default=20,help='pt range pt~pt*1.1') parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1') parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1') parser.add_argument("--epochs",type=int,default=10,help='num epochs') parser.add_argument("--batch_size",type=int,default=512,help='batch_size') parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/') parser.add_argument("--gpu",type=int,default=0,help='gpu number') parser.add_argument("--voxel",type=int,default=0,help='0 or z or not') parser.add_argument("--unscale",type=int,default=1,help='end ratio') parser.add_argument("--normb",type=float,default=1.,help='end ratio') parser.add_argument("--stride",type=int,default=1,help='end ratio') parser.add_argument("--pred",type=int,default=0,help='end ratio') parser.add_argument("--channel",type=int,default=4,help='end ratio') parser.add_argument("--channels",type=str,default="1",help='end ratio') parser.add_argument("--mod",type=int,default=0,help='end ratio') parser.add_argument("--pix",type=int,default=23,help='pixel num') parser.add_argument("--rot",type=int,default=0,help='pixel num') parser.add_argument("--seed",type=str,default="",help='seed of model') parser.add_argument("--memo",type=str,default="",help='some memo') args=parser.parse_args() import os import keras from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten, Embedding from keras.layers import Conv2D, MaxPooling2D, SimpleRNN from keras import backend as K from numpy.random import seed #seed(101) import subprocess import random import warnings import math from array import array import numpy as np #import ROOT as rt import tensorflow as tf from keras.backend.tensorflow_backend import set_session from importlib import import_module from sklearn.utils import shuffle from sklearn.preprocessing import normalize import datetime from sklearn.metrics import roc_auc_score, auc, roc_curve def valauc(y_true,y_pred): #return roc_auc_score(y_true,y_pred) print(y_true,y_pred) return K.mean(y_pred) start=datetime.datetime.now() if(args.gpu!=-1): print("gpugpu") os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu) #config =tf.ConfigProto(device_count={'GPU':1}) #config.gpu_options.per_process_gpu_memory_fraction=0.6 #set_session(tf.Session(config=config)) #gpus = tf.config.experimental.list_physical_devices('GPU') #tf.config.experimental.set_visible_devices(gpus[0], 'GPU') batch_size = args.batch_size num_classes = 2 epochs = args.epochs print(epochs) # input image dimensions if(args.loss=="weakloss"):args.loss=weakloss net=import_module('symbols.symbols') channel=args.channel if(channel==-1): channels=[int(i) for i in args.channels.split(",")] pix=args.pix if(args.voxel==1): if(pix==24): input_shape=(channel,23,23,pix) elif(channel<1): input_shape=(1,pix,pix,pix) else: input_shape=(channel,pix,pix,pix) if("egp" in args.memo): model=net.dr3d(args.network,input_shape,3) else: model=net.dr3d(args.network,input_shape,2) else: if(channel==-1): model=net.drmodel((len(channels),pix,pix)) elif(channel<1): model=net.drmodel((1,pix,pix)) else: model=net.drmodel((channel,pix,pix)) if(args.opt=="sgd"): opt=keras.optimizers.SGD() if(args.opt=="rms"): opt=keras.optimizers.RMSprop() if(args.opt=="adam"): opt=keras.optimizers.Adam() losses=args.loss if(args.stride==2): if(args.mod==0): losses={"output1" : args.loss,"output2" : args.loss} lossweight={"output1" : 1.0, "output2" : 1.0} else: losses=losses={"output" : args.loss} lossweight= {"output" : 1.0} else: losses=losses={"output1" : args.loss} lossweight= {"output1" : 1.0} model.compile(loss=losses, optimizer=opt, loss_weights=lossweight, metrics=['accuracy',keras.metrics.AUC()]) """model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy']) """ savename='save/'+str(args.save) os.system("mkdir -p "+savename) os.system("rm "+savename+'/log.log') os.system("cp symbols/symbols.py "+savename+'/') #from keras.utils import plot_model #plot_model(model,to_file=savename+'/model.png') #plot_model(model,to_file='/home/yulee/keras/model.png') print("### plot done ###") import logging logging.basicConfig(filename=savename+'/log.log',level=logging.DEBUG) logging.info(str(args)) logging.info(str(datetime.datetime.now())) checkpoint=keras.callbacks.ModelCheckpoint(filepath=savename+'/check_{epoch}',monitor='val_loss',verbose=0,save_best_only=False,mode='auto',period=1) if(args.rot==1): if(args.voxel==1): if("qg" in args.memo): loaded=np.load("/home/yulee/keras/rot16ug{}vox.npz".format(args.pt)) else: loaded=np.load("/home/yulee/keras/rot16{}vox.npz".format(args.pt)) else: if("qg" in args.memo): loaded=np.load("/home/yulee/keras/rot16ug{}img.npz".format(args.pt)) else: loaded=np.load("/home/yulee/keras/rot16{}img.npz".format(args.pt)) else: if(pix==17): loaded=np.load("/hdfs/store/user/yulee/DRsim/side{}img.npz".format(args.pt)) if(pix==23): if(channel==0): loaded=np.load("/hdfs/store/user/yulee/DRsim/side23{}img.npz".format(args.pt)) else: loaded=np.load("/hdfs/store/user/yulee/DRsim/side023{}img.npz".format(args.pt)) if(pix==45): loaded=np.load("/home/yulee/keras/side45{}img.npz".format(args.pt)) if(pix==24): loaded=np.load("/home/yulee/keras/side0123{}img.npz".format(args.pt)) if(args.rot==1): imgset=loaded else: if(args.voxel==1): imgset=loaded["voxels"].item() else: imgset=loaded["imgset"].item() if("egp" in args.memo): el=imgset["el"][:,:channel] ga=imgset["ga"][:,:channel] pi=imgset["pi"][:,:channel] target="egp" ellabel=len(el)*[[1.,0.,0.]] galabel=len(ga)*[[0.,1.,0.]] pilabel=len(pi)*[[0.,0.,1.]] eentcut=int(0.7*len(ellabel)) gentcut=int(0.7*len(galabel)) pentcut=int(0.7*len(pilabel)) X,Y=shuffle(np.concatenate([el[:eentcut],ga[:gentcut],pi[:pentcut]]),np.concatenate([ellabel[:eentcut],galabel[:gentcut],pilabel[:pentcut]])) testX,testY=shuffle(np.concatenate([el[eentcut:],ga[gentcut:],pi[pentcut:]]),np.concatenate([ellabel[eentcut:],galabel[gentcut:],pilabel[pentcut:]])) else: if("qg" in args.memo): if(channel==-1): el=np.stack([normalize(imgset["uj"][:,i].reshape((-1,pix*pix))).reshape((-1,pix,pix)) for i in channels],axis=1) pi=np.stack([normalize(imgset["gj"][:,i].reshape((-1,pix*pix))).reshape((-1,pix,pix)) for i in channels],axis=1) #el=np.stack([imgset["uj"][:,i] for i in channels],axis=1) #pi=np.stack([imgset["gj"][:,i] for i in channels],axis=1) #el=imgset["uj"][:,0].reshape((-1,17*17)) #pi=imgset["gj"][:,0].reshape((-1,17*17)) """for i in range(len(el)): el[i]=el[i]/max(el[i]) for i in range(len(pi)): pi[i]=pi[i]/max(pi[i]) el=el.reshape((-1,1,17,17)) pi=pi.reshape((-1,1,17,17))""" else: if(channel==0): el=imgset["uj"][:,:1] pi=imgset["gj"][:,:1] else: el=imgset["uj"][:,:channel] pi=imgset["gj"][:,:channel] """el2=[] for i in range(len(el)/2): el2.append(el[2*i]+el[2*i+1]) pi2=[] for i in range(len(pi)/2): pi2.append(pi[2*i]+pi[2*i+1]) el=np.array(el2) pi=np.array(pi2)""" target="qg" if("ep" in args.memo): el=imgset["el"][:,:channel] pi=imgset["pi"][:,:channel] target="ep" if("gp" in args.memo): el=imgset["ga"][:,:channel] pi=imgset["pi"][:,:channel] target="gp" if("eg" in args.memo): el=imgset["el"][:,:channel] pi=imgset["ga"][:,:channel] target="eg" ellabel=len(el)*[[1.,0.]] pilabel=len(pi)*[[0.,1.]] eentcut=int(0.7*len(ellabel)) pentcut=int(0.7*len(pilabel)) X,Y=shuffle(np.concatenate([el[:eentcut],pi[:pentcut]]),np.concatenate([ellabel[:eentcut],pilabel[:pentcut]])) testX,testY=shuffle(np.concatenate([el[eentcut:],pi[pentcut:]]),np.concatenate([ellabel[eentcut:],pilabel[pentcut:]])) print(model.summary()) print("shape",Y.shape,X.shape) if(args.stride==1):history=model.fit(X,Y,batch_size=128,epochs=epochs,verbose=1,validation_split=0.3,callbacks=[checkpoint]) #print(history.history) f=open(savename+'/history','w') try: one=history.history['val_loss'].index(min(history.history['val_loss'])) f.write(str(one)+'\n') print(one) for i in range(epochs): if(i!=one):os.system("rm "+savename+"/check_"+str(i+1)) except: print("failed to drop") f.write(str(history.history)) f.close() print (datetime.datetime.now()-start) logging.info("memo "+args.memo) logging.info("spent time "+str(datetime.datetime.now()-start)) logging.info("python jetdualpred.py --save {} --pt {} --stride {} --gpu {} --mod {}".format(args.save,args.pt,args.stride,args.gpu,args.mod)) import matplotlib.pyplot as plt bp=model.predict(testX,verbose=0) #bp=model.predict(X[int(0.4*len(X)):],verbose=0) fpr,tpr,thresholds=roc_curve(testY[:,0],bp[:,0]) #fpr,tpr,thresholds=roc_curve(Y[int(0.6*len(Y)):][:,0],bp[:,0]) fs=25 tnr=1-fpr plt.figure(figsize=(12, 8)) if("qg" in args.memo): plt.xlabel("Quark Efficiency", fontsize=fs*1.2) plt.ylabel("Gluon Rejection", fontsize=fs*1.2) if("ep" in args.memo): plt.xlabel("e- Efficiency", fontsize=fs*1.2) plt.ylabel("pi+ Rejection", fontsize=fs*1.2) if("gp" in args.memo): plt.xlabel("r- Efficiency", fontsize=fs*1.2) plt.ylabel("pi+ Rejection", fontsize=fs*1.2) if("eg" in args.memo): plt.xlabel("e- Efficiency", fontsize=fs*1.2) plt.ylabel("r Rejection", fontsize=fs*1.2) plt.tick_params(labelsize=fs) print("AUC:{}".format(round(roc_auc_score(testY[:,0],bp[:,0]),4))) f=open("/home/yulee/keras/{}.auc".format(args.memo),"a") if(channel==-1): f.write("{}:{}".format(channels,roc_auc_score(testY[:,0],bp[:,0]))) f.write("\n") else: f.write("{}".format(roc_auc_score(testY[:,0],bp[:,0]))) f.write("\n") f.close() label="AUC:{}".format(round(roc_auc_score(testY[:,0],bp[:,0]),4)) #label="AUC:{}".format(round(roc_auc_score(Y[int(0.6*len(Y)):][:,0],bp[:,0]),4)) plt.plot(tpr,tnr,lw=3.5,label=label,linestyle="-") plt.legend(loc=3, fontsize=fs*0.9) plt.grid(alpha=0.6) plt.axis((0,1,0,1)) plt.savefig("{}.png".format(args.memo),bbox_inches='tight',pad_inches=0.5)
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
cogs/top.py
from discord.ext import commands, tasks import os import logging import discordlists import topgg class DSLCount(commands.Cog): "Speacil Manager for publishing guild count and user count." def __init__(self, bot): self.bot = bot self.token = os.environ["topgg_key"] self.api = discordlists.Client(self.bot) self.api.set_auth("disforge.com", os.environ["disforge_key"]) self.api.set_auth("discord-botlist.eu", os.environ["botlist_eu_key"]) self.api.start_loop() self.topgg = topgg.DBLClient(self.bot, self.token) self.update_stats.start() @tasks.loop(minutes=5) async def update_stats(self): self.logger.info("Attempting to post server count") try: await self.topgg.post_guild_count() self.logger.info("Posted server count ({})".format(self.topgg.guild_count)) except Exception as e: self.logger.exception("Failed to post server count\n{}: {}".format(type(e).__name__, e)) def cog_unload(self): self.update_stats.stop() self.api.stop() # not sure if doing self.api is okay, but it should be. async def setup(bot): DSLCount.logger = logging.getLogger("bot") await bot.add_cog(DSLCount(bot))
[]
[]
[ "botlist_eu_key", "topgg_key", "disforge_key" ]
[]
["botlist_eu_key", "topgg_key", "disforge_key"]
python
3
0
check_duplicate_abstracts.py
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pep.settings') import django django.setup() from main.models import Abstract from django_file_md5 import calculate_md5 def check_duplicate_abstracts(): abstracts = Abstract.objects.all() md5_dict = {} for abstract in abstracts: md5_dict[abstract.uid] = calculate_md5(abstract.document) rev_multiduct_md5_dict = {} for uid, md5 in md5_dict.items(): rev_multiduct_md5_dict.setdefault(md5, set()).add(uid) with open('duplicate_abstracts_uid_list', 'w') as f: for uid_set in [uids for md5, uids in rev_multiduct_md5_dict.items() if len(uids) > 1]: f.write("%s\n" % uid_set) if __name__ == '__main__': print("Finding duplicate abstracts") check_duplicate_abstracts()
[]
[]
[]
[]
[]
python
0
0
cmd/config.go
/* Copyright 2016 Skippbox, Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "io/ioutil" "os" "github.com/spf13/cobra" ) // configCmd represents the config command var configCmd = &cobra.Command{ Use: "config", Short: "modify kubewatch configuration", Long: ` config command allows admin setup his own configuration for running kubewatch`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } var configAddCmd = &cobra.Command{ Use: "add", Short: "add webhook config to .kubewatch.yaml", Long: ` Adds webhook config to .kubewatch.yaml`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } var configViewCmd = &cobra.Command{ Use: "view", Short: "view .kubewatch.yaml", Long: ` display the contents of the contents of .kubewatch.yaml`, Run: func(cmd *cobra.Command, args []string) { fmt.Println("Contents of .kubewatch.yaml") configFile, err := ioutil.ReadFile(os.Getenv("HOME") + "/" + ".kubewatch.yaml") if err != nil { fmt.Printf("yamlFile.Get err #%v ", err) } fmt.Println(string(configFile)) }, } func init() { RootCmd.AddCommand(configCmd) configCmd.AddCommand( configViewCmd, configAddCmd, ) configAddCmd.AddCommand( slackConfigCmd, ) }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
drivers/cassandra-driver/cassandra_test.go
package cassandra import ( "net/url" "os" "reflect" "testing" "time" "github.com/db-journey/migrate/v2/direction" "github.com/db-journey/migrate/v2/driver" "github.com/db-journey/migrate/v2/file" "github.com/gocql/gocql" ) func TestMigrate(t *testing.T) { var session *gocql.Session host := os.Getenv("CASSANDRA_PORT_9042_TCP_ADDR") port := os.Getenv("CASSANDRA_PORT_9042_TCP_PORT") driverURL := "cassandra://" + host + ":" + port + "/system?protocol=4" // prepare a clean test database. u, err := url.Parse(driverURL) if err != nil { t.Fatal(err) } cluster := gocql.NewCluster(u.Host) cluster.Keyspace = u.Path[1:len(u.Path)] cluster.Consistency = gocql.All cluster.Timeout = 1 * time.Minute cluster.ProtoVersion = 4 session, err = cluster.CreateSession() if err != nil { //t.Fatal(err) } if err := resetKeySpace(session); err != nil { t.Fatal(err) } cluster.Keyspace = "migrate" session, err = cluster.CreateSession() driverURL = "cassandra://" + host + ":" + port + "/migrate?protocol=4" var d driver.Driver if d, err = Open(driverURL); err != nil { t.Fatal(err) } files := []file.File{ { Path: "/foobar", FileName: "20060102150405_foobar.up.sql", Version: 20060102150405, Name: "foobar", Direction: direction.Up, Content: []byte(` CREATE TABLE yolo ( id varint primary key, msg text ); CREATE INDEX ON yolo (msg); `), }, { Path: "/foobar", FileName: "20060102150405_foobar.down.sql", Version: 20060102150405, Name: "foobar", Direction: direction.Down, Content: []byte(` DROP TABLE yolo; `), }, { Path: "/foobar", FileName: "20060102150406_foobar.up.sql", Version: 20060102150406, Name: "foobar", Direction: direction.Up, Content: []byte(` CREATE TABLE error ( id THIS WILL CAUSE AN ERROR ) `), }, } err = d.Migrate(files[0]) if err != nil { t.Fatal(err) } version, err := d.Version() if err != nil { t.Fatal(err) } if version != 20060102150405 { t.Errorf("Expected version to be: %d, got: %d", 20060102150405, version) } // Check versions applied in DB. expectedVersions := file.Versions{20060102150405} versions, err := d.Versions() if err != nil { t.Errorf("Could not fetch versions: %s", err) } if !reflect.DeepEqual(versions, expectedVersions) { t.Errorf("Expected versions to be: %v, got: %v", expectedVersions, versions) } err = d.Migrate(files[1]) if err != nil { t.Fatal(err) } err = d.Migrate(files[2]) if err == nil { t.Error("Expected test case to fail") } // Check versions applied in DB. expectedVersions = file.Versions{} versions, err = d.Versions() if err != nil { t.Errorf("Could not fetch versions: %s", err) } if !reflect.DeepEqual(versions, expectedVersions) { t.Errorf("Expected versions to be: %v, got: %v", expectedVersions, versions) } if err := resetKeySpace(session); err != nil { t.Fatal(err) } if err := d.Close(); err != nil { t.Fatal(err) } } func resetKeySpace(session *gocql.Session) error { session.Query(`DROP KEYSPACE migrate;`).Exec() return session.Query(`CREATE KEYSPACE IF NOT EXISTS migrate WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1};`).Exec() }
[ "\"CASSANDRA_PORT_9042_TCP_ADDR\"", "\"CASSANDRA_PORT_9042_TCP_PORT\"" ]
[]
[ "CASSANDRA_PORT_9042_TCP_ADDR", "CASSANDRA_PORT_9042_TCP_PORT" ]
[]
["CASSANDRA_PORT_9042_TCP_ADDR", "CASSANDRA_PORT_9042_TCP_PORT"]
go
2
0
examples/service/autopilot/assistant/pagination/assistant_pagination_example.go
package main import ( "log" "os" "github.com/RJPearson94/twilio-sdk-go" v1 "github.com/RJPearson94/twilio-sdk-go/service/autopilot/v1" "github.com/RJPearson94/twilio-sdk-go/session/credentials" ) var autopilotClient *v1.Autopilot func init() { creds, err := credentials.New(credentials.Account{ Sid: os.Getenv("TWILIO_ACCOUNT_SID"), AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"), }) if err != nil { log.Panicf("%s", err.Error()) } autopilotClient = twilio.NewWithCredentials(creds).Autopilot.V1 } func main() { paginator := autopilotClient. Assistants. NewAssistantsPaginator() for paginator.Next() { currentPage := paginator.CurrentPage() log.Printf("%v assistant(s) found on page %v", len(currentPage.Assistants), currentPage.Meta.Page) } if paginator.Error() != nil { log.Panicf("%s", paginator.Error()) } log.Printf("Total number of assistant(s) found: %v", len(paginator.Assistants)) }
[ "\"TWILIO_ACCOUNT_SID\"", "\"TWILIO_AUTH_TOKEN\"" ]
[]
[ "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID" ]
[]
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
go
2
0
runbot.py
from imports import * from discord.ext import commands from dotenv import load_dotenv # Create a .env file and add your discord token as DISCORD_TOKEN for the bot to run. Package for dotevn have to be installed as well load_dotenv() TOKEN = os.getenv('DISCORD_TOKEN') GUILD = os.getenv('DISCORD_GUILD') now = datetime.datetime.now() t1 = time.perf_counter() messageid = [] client = commands.Bot(command_prefix="&") client.remove_command('help') async def athchk(ctx): return ctx.author == client.user @client.event async def on_ready(): print(f'{client.user} is connected to Discord on the following guild:\n') for guild in client.guilds: if guild.name == GUILD: break print(f'{guild.name}(id: {guild.id})') await client.change_presence(status=discord.Status.dnd, activity=discord.Activity(type=discord.ActivityType.listening, name="Type &help or &commands")) # .idle,.online,.dnd @client.command(name='invite') async def inv(ctx): if await athchk(ctx): return else: await ctx.reply(embed=discord.Embed(title="Invite me using this link!", url="https://discord.com/api/oauth2/authorize?client_id=936259816572198912&permissions=8&scope=bot", color=ctx.author.color)) # Word Filter starts from here # Comamnds for JSON file handlings for adding/removing words in the filter list. # Comman Call for checking if the word exist in the filter list. @client.command(name='wfcheck') async def wfcheck(ctx): if await athchk(ctx): return word = None args = ctx.message.content.split(' ') if len(args) > 1: word = args[-1] word_list_status = words.json_file_read_word(f'./Server_Files/{ctx.message.guild.id}.json', word) msg = await ctx.reply(embed=word_list_status) await msg.add_reaction( "🪄") # This and the following reactions does not do anything as of now, the code is supposed to delete the msg. # Command Call for adding a word to the list. @client.command(name='wfadd') async def wfadd(ctx): if await athchk(ctx): return else: if len(ctx.message.content.split(' ')) == 1: msg = await ctx.reply("Oops you forgot to mention what word to add! Try again") else: writing_status = words.json_file_write(f'./Server_Files/{ctx.message.guild.id}.json', { ctx.message.content.split(' ')[1]: words.word_generator(ctx.message.content.split(' ')[1])}) msg = await ctx.reply(embed=writing_status) await msg.add_reaction("🪄") # Command Call for deleting a word to the list. @client.command(name='wfdelete') async def wfdelete(ctx): if await athchk(ctx): return word = None args = ctx.message.content.split(' ') if len(args) > 1: word = args[-1] if not word: def check(message): # Checking if the author was the same one who triggered the message and if it is in the same channel. return (message.author == ctx.message.author) and (message.channel == ctx.message.channel) confirmation_embed = discord.Embed(title="Confirmation", description="You are about to clear the complete list of this server", colour=discord.Colour.orange()) confirmation_embed.add_field(name="To Confirm", value="Type `Yes`/`yes`/`Y`/`y`", inline=True) confirmation_embed.add_field(name="To Cancel", value="Type `No`/`no`/`N`/`n`", inline=True) confirmation_embed.set_footer(text="Default time out in 30 seconds!⏱️") msg = await ctx.message.channel.send(embed=confirmation_embed) await msg.add_reaction("🪄") try: user_response = await client.wait_for('message', check=check, timeout=30) if user_response.content in ('Yes', 'yes', 'y', 'Y'): delete_status = words.json_word_delete(f'./Server_Files/{ctx.message.guild.id}.json') msg = await ctx.message.channel.send(embed=delete_status) else: msg = await ctx.message.channel.send("That was close!") except asyncio.TimeoutError: msg = await ctx.message.channel.send("You missed the window!\nExiting Menu..") else: delete_status = words.json_word_delete(f'./Server_Files/{ctx.message.guild.id}.json', word) msg = await ctx.reply(embed=delete_status) await msg.add_reaction("🪄") # Server based settings, starts from here. # To add channel to ignore list. @client.command(name='ichadd') async def ichadd(ctx): if await athchk(ctx): return if len(ctx.message.content.split(' ')) == 1: channel_add_status = server_settings.add_ignore_channels(f'Server_Settings/{ctx.message.guild.id}.json', [ctx.message.channel]) else: channel_add_status = server_settings.add_ignore_channels(f'Server_Settings/{ctx.message.guild.id}.json', ctx.message.channel_mentions) msg = await ctx.reply(embed=channel_add_status) await msg.add_reaction("🪄") # Delete channels from ignore list @client.command(name='ichdel') async def ichdel(ctx): if await athchk(ctx): return if len(ctx.message.content.split(' ')) == 1: def check(message): # Checking if the author was the same one who triggered the message and if it is in the same channel. return (message.author == ctx.message.author) and (message.channel == ctx.message.channel) confirmation_embed = discord.Embed(title="Confirmation", description="You are about to clear the complete ignore channel list of this server", colour=discord.Colour.orange()) confirmation_embed.add_field(name="To Confirm", value="Type `Yes`/`yes`/`Y`/`y`", inline=True) confirmation_embed.add_field(name="To Cancel", value="Type `No`/`no`/`N`/`n`", inline=True) confirmation_embed.set_footer(text="Default time out in 30 seconds!⏱️") msg = await ctx.message.channel.send(embed=confirmation_embed) await msg.add_reaction("🪄") try: user_response = await client.wait_for('message', check=check, timeout=30) if user_response.content in ('Yes', 'yes', 'y', 'Y'): delete_status = server_settings.del_ingored_channel(f'Server_Settings/{ctx.message.guild.id}.json') msg = await ctx.message.channel.send(embed=delete_status) else: msg = await ctx.message.channel.send("That was close!") except asyncio.TimeoutError: msg = await ctx.message.channel.send("You missed the window!\nExiting Menu..") else: delete_status = server_settings.del_ingored_channel(f'Server_Settings/{ctx.message.guild.id}.json', ctx.message.channel_mentions) msg = await ctx.reply(embed=delete_status) await msg.add_reaction("🪄") # Add role to ignore list. @client.command(name='iradd') async def iradd(ctx): if await athchk(ctx): return if len(ctx.message.content.split(' ')) == 1: msg = await ctx.channel.reply('Woah, I think you missed to mention the role/roles.') else: role_add_status = server_settings.add_ignore_roles(f'Server_Settings/{ctx.message.guild.id}.json', ctx.message.role_mentions) msg = await ctx.reply(embed=role_add_status) await msg.add_reaction("🪄") # Delete role from ignore list @client.command(name='irdel') async def irdel(ctx): if await athchk(ctx): return if len(ctx.message.content.split(' ')) == 1: def check(message): # Checking if the author was the same one who triggered the message and if it is in the same channel. return (message.author == ctx.message.author) and (message.channel == ctx.message.channel) confirmation_embed = discord.Embed(title="Confirmation", description="You are about to clear the complete ignore channel list of this server", colour=discord.Colour.orange()) confirmation_embed.add_field(name="To Confirm", value="Type `Yes`/`yes`/`Y`/`y`", inline=True) confirmation_embed.add_field(name="To Cancel", value="Type `No`/`no`/`N`/`n`", inline=True) confirmation_embed.set_footer(text="Default time out in 30 seconds!⏱️") msg = await ctx.message.channel.send(embed=confirmation_embed) await msg.add_reaction("🪄") try: user_response = await client.wait_for('message', check=check, timeout=30) if user_response.content in ('Yes', 'yes', 'y', 'Y'): delete_status = server_settings.del_ingored_roles(f'Server_Settings/{ctx.message.guild.id}.json') msg = await ctx.message.channel.send(embed=delete_status) else: msg = await ctx.message.channel.send("That was close!") except asyncio.TimeoutError: msg = await ctx.message.channel.send("You missed the window!\nExiting Menu..") else: delete_status = server_settings.del_ingored_roles(f'Server_Settings/{ctx.message.guild.id}.json', ctx.message.channel_mentions) msg = await ctx.reply(embed=delete_status) await msg.add_reaction("🪄") # View ignored roles/channels @client.command(name='igview') async def igview(ctx): if await athchk(ctx): return try: args = ctx.message.content.split(' ')[1] if len(ctx.message.content.split(' ')) == 1: msg = await ctx.reply("Oops you forgot to mention what word to add! Try again") await msg.add_reaction("🪄") else: if args == 'channel': channels = server_settings.view_ignored(f'Server_Settings/{ctx.message.guild.id}.json', 'channel') embed = discord.Embed(title='Ignored channels', description='Here is the list of all the ignored channels in this server!', colour=discord.Colour.blue()) if not channels: msg = await ctx.channel.send("I ran into some error! Try again maybe?") await msg.add_reaction("🪄") for item in channels: embed.add_field(name=client.get_channel(item), value=f"ID: {item}") await ctx.reply(embed=embed) elif args == 'role': roles = server_settings.view_ignored(f'Server_Settings/{ctx.message.guild.id}.json', 'role') embed = discord.Embed(title='Ignored roles', description='Here is the list of all the ignored role in this server!', colour=discord.Colour.blue()) if not roles: msg = await ctx.channel.send("I ran into some error! Try again maybe?") await msg.add_reaction("🪄") for item in roles: embed.add_field(name=ctx.guild.get_role(item), value=f"ID: {item}") await ctx.reply(embed=embed) else: msg = await ctx.channel.send(f"I don't think I have details of those.") await msg.add_reaction("🪄") except Exception: msg = await ctx.channel.send("I ran into some error! Try again maybe?") await msg.add_reaction("🪄") # @client.event() to call the reaction element and then get the payload from that message with reaction to make actions on that message. # Event listener starts from here # Used to check if sent message is a bot command or not. def is_command(msg): bot_commands = ('&wfcheck', '&wfadd', '&wfdelete') if any(msg.startswith(i) for i in bot_commands): # any can be used as a or operator return True else: return False # This is used to make the bot read every messsage sent on the server. @client.listen('on_message') async def msg_check(message): server_id = message.channel.guild.id channel = message.channel author_roles = [] for item in message.author.roles: # getting all roles of author as object in form of list author_roles.append(item.id) try: with open(f"Server_Settings/{server_id}.json") as data_reading: ignore_data = json.load(data_reading) channel_ids = ignore_data['channel_id'] role_ids = ignore_data['role_id'] if message.channel.id in channel_ids: return for item in author_roles: if item in role_ids: return if is_command(message.content): return with open(f"Server_Files/{server_id}.json") as data_reading: # To get banned word data server_data = json.load(data_reading) for key in server_data.keys(): for item in server_data[key]: if item in message.content.split(): # Splitting the words in a message to check if the word is present in the message. await message.delete() msg = await channel.send(f'{message.author.mention}, That word is banned!') await msg.add_reaction("🪄") except discord.Forbidden: # If the bot does not have enough permissions to delete the message. msg = await channel.send("I do not have enough permission to delete messages!") await msg.add_reaction("🪄") except Exception: msg = await channel.send("I ran into some error! I was unable to delete that message!") await msg.add_reaction("🪄") # Help command in the bot @client.command(name='help') async def helps(ctx): if await athchk(ctx): return hlp_cmd = None args = ctx.message.content.split(' ') try: if len(args) > 1: hlp_cmd = args[-1] if not hlp_cmd: hlps = help_commands.help_cmd() msg = await ctx.channel.send(embed=hlps) else: hlps = help_commands.help_cmd(hlp_cmd) msg = await ctx.channel.send(embed=hlps) except Exception: msg = await ctx.channel.send("I ran into some error! I was unable to delete that message!") await msg.add_reaction("🪄") # To add: # - Only admins/users with role permission of administrator or manage message can or should invoke most of the bot comamnds except viewing. # - Feature to add/remove combination of special letter from JSON file # - Add a generic ban word list option # - Option to add it or remove the generic ban word list # - Add server logging feature # - Log a user's ban word usage (counter) # - Word usage counter (Think if you need this, make it optional logging feature else.) client.run(TOKEN)
[]
[]
[ "DISCORD_TOKEN", "DISCORD_GUILD" ]
[]
["DISCORD_TOKEN", "DISCORD_GUILD"]
python
2
0
api/telebot.go
package api import ( "fmt" "os" "path" "strconv" "strings" "sync" "time" "github.com/ailinykh/pullanusbot/v2/core" "github.com/ailinykh/pullanusbot/v2/helpers" tb "gopkg.in/tucnak/telebot.v2" ) // Telebot is a telegram API type Telebot struct { bot *tb.Bot logger core.ILogger commandHandlers []string textHandlers []core.ITextHandler documentHandlers []core.IDocumentHandler imageHandlers []core.IImageHandler videoHandlers []core.IVideoHandler } // CreateTelebot is a default Telebot factory func CreateTelebot(token string, logger core.ILogger) *Telebot { poller := tb.NewMiddlewarePoller(&tb.LongPoller{Timeout: 10 * time.Second}, func(upd *tb.Update) bool { return true }) var err error bot, err := tb.NewBot(tb.Settings{ Token: token, Poller: poller, }) if err != nil { panic(err) } telebot := &Telebot{bot, logger, []string{}, []core.ITextHandler{}, []core.IDocumentHandler{}, []core.IImageHandler{}, []core.IVideoHandler{}} bot.Handle(tb.OnText, func(m *tb.Message) { for _, h := range telebot.textHandlers { err := h.HandleText(makeMessage(m), makeIBot(m, telebot)) if err != nil { logger.Errorf("%T: %s", h, err) telebot.reportError(m, err) } } }) var mutex sync.Mutex bot.Handle(tb.OnDocument, func(m *tb.Message) { // TODO: inject `download` to get rid of MIME cheking if m.Document.MIME[:5] == "video" || m.Document.MIME == "image/gif" { mutex.Lock() defer mutex.Unlock() logger.Infof("Attempt to download %s %s (sent by %s)", m.Document.FileName, m.Document.MIME, m.Sender.Username) path := path.Join(os.TempDir(), m.Document.FileName) err := bot.Download(&m.Document.File, path) if err != nil { logger.Error(err) return } logger.Infof("Downloaded to %s", strings.ReplaceAll(path, os.TempDir(), "$TMPDIR/")) defer os.Remove(path) for _, h := range telebot.documentHandlers { err := h.HandleDocument(&core.Document{ File: core.File{Name: m.Document.FileName, Path: path}, MIME: m.Document.MIME, }, makeMessage(m), makeIBot(m, telebot)) if err != nil { logger.Errorf("%T: %s", h, err) telebot.reportError(m, err) } } } }) bot.Handle(tb.OnPhoto, func(m *tb.Message) { image := &core.Image{ ID: m.Photo.FileID, FileURL: m.Photo.FileURL, Width: m.Photo.Width, Height: m.Photo.Height, } for _, h := range telebot.imageHandlers { err := h.HandleImage(image, makeMessage(m), makeIBot(m, telebot)) if err != nil { logger.Errorf("%T: %s", h, err) telebot.reportError(m, err) } } }) bot.Handle(tb.OnVideo, func(m *tb.Message) { video := &core.Video{ ID: m.Video.FileID, Width: m.Video.Width, Height: m.Video.Height, } for _, h := range telebot.videoHandlers { err := h.HandleImage(video, makeMessage(m), makeIBot(m, telebot)) if err != nil { logger.Errorf("%T: %s", h, err) telebot.reportError(m, err) } } }) return telebot } // Download is a core.IImageDownloader interface implementation func (t *Telebot) Download(image *core.Image) (*core.File, error) { //TODO: potential race condition file := tb.FromURL(image.FileURL) file.FileID = image.ID name := helpers.RandStringRunes(4) + ".jpg" path := path.Join(os.TempDir(), name) err := t.bot.Download(&file, path) if err != nil { t.logger.Error(err) return nil, err } t.logger.Infof("image %s downloaded to %s", file.UniqueID, path) return makeFile(name, path), nil } // AddHandler register object as one of core.Handler's func (t *Telebot) AddHandler(handler ...interface{}) { switch h := handler[0].(type) { case core.IDocumentHandler: t.documentHandlers = append(t.documentHandlers, h) case core.ITextHandler: t.textHandlers = append(t.textHandlers, h) case core.IImageHandler: t.imageHandlers = append(t.imageHandlers, h) case string: t.registerCommand(h) if f, ok := handler[1].(func(*core.Message, core.IBot) error); ok { t.bot.Handle(h, func(m *tb.Message) { f(makeMessage(m), &TelebotAdapter{m, t}) }) } else { panic("interface must implement func(*core.Message, core.IBot) error") } default: panic(fmt.Sprintf("something wrong with %s", h)) } } // Run bot loop func (t *Telebot) Run() { t.bot.Start() } func (t *Telebot) registerCommand(command string) { for _, c := range t.commandHandlers { if c == command { panic("Handler for " + command + " already set!") } } t.commandHandlers = append(t.commandHandlers, command) } func (t *Telebot) reportError(m *tb.Message, e error) { chatID, err := strconv.ParseInt(os.Getenv("ADMIN_CHAT_ID"), 10, 64) if err != nil { return } chat := &tb.Chat{ID: chatID} opts := &tb.SendOptions{DisableWebPagePreview: true} t.bot.Forward(chat, m, opts) t.bot.Send(chat, e.Error(), opts) } func makeMessage(m *tb.Message) *core.Message { text := m.Text if m.Document != nil { text = m.Caption } message := &core.Message{ ID: m.ID, ChatID: m.Chat.ID, IsPrivate: m.Private(), Sender: makeUser(m.Sender), Text: text, } if m.ReplyTo != nil { message.ReplyTo = makeMessage(m.ReplyTo) } if m.Video != nil { message.Video = makeVideo(m.Video) } return message } func makeUser(u *tb.User) *core.User { return &core.User{ ID: u.ID, FirstName: u.FirstName, LastName: u.LastName, Username: u.Username, LanguageCode: u.LanguageCode, } } func makeVideo(v *tb.Video) *core.Video { return &core.Video{ File: core.File{ Name: v.FileName, Path: v.FileURL, }, ID: v.FileID, Width: v.Width, Height: v.Height, Bitrate: 0, Duration: v.Duration, Codec: "", Thumb: makePhoto(v.Thumbnail), } } func makePhoto(p *tb.Photo) *core.Image { return &core.Image{ File: core.File{ Name: p.FileLocal, Path: p.FilePath, }, ID: p.FileID, FileURL: p.FileURL, Width: p.Width, Height: p.Height, } } func makeFile(name string, path string) *core.File { return &core.File{ Name: name, Path: path, } } func makeIBot(m *tb.Message, t *Telebot) core.IBot { return &TelebotAdapter{m, t} }
[ "\"ADMIN_CHAT_ID\"" ]
[]
[ "ADMIN_CHAT_ID" ]
[]
["ADMIN_CHAT_ID"]
go
1
0
tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py
# Lint as: python2, python3 # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.orchestration.experimental.core.async_pipeline_task_gen.""" import os from absl.testing import parameterized import tensorflow as tf from tfx.orchestration import metadata from tfx.orchestration.experimental.core import async_pipeline_task_gen as asptg from tfx.orchestration.experimental.core import task as task_lib from tfx.orchestration.experimental.core import task_queue as tq from tfx.orchestration.experimental.core import test_utils as otu from tfx.orchestration.portable import test_utils as tu from tfx.proto.orchestration import pipeline_pb2 from ml_metadata.proto import metadata_store_pb2 class AsyncPipelineTaskGeneratorTest(tu.TfxTest, parameterized.TestCase): def setUp(self): super(AsyncPipelineTaskGeneratorTest, self).setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self.id()) self._pipeline_root = pipeline_root # Makes sure multiple connections within a test always connect to the same # MLMD instance. metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db') self._metadata_path = metadata_path connection_config = metadata.sqlite_metadata_connection_config( metadata_path) connection_config.sqlite.SetInParent() self._mlmd_connection = metadata.Metadata( connection_config=connection_config) # Sets up the pipeline. pipeline = pipeline_pb2.Pipeline() self.load_proto_from_text( os.path.join( os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'), pipeline) self._pipeline = pipeline self._pipeline_info = pipeline.pipeline_info self._pipeline_runtime_spec = pipeline.runtime_spec self._pipeline_runtime_spec.pipeline_root.field_value.string_value = ( pipeline_root) # Extracts components. self._example_gen = pipeline.nodes[0].pipeline_node self._transform = pipeline.nodes[1].pipeline_node self._trainer = pipeline.nodes[2].pipeline_node self._task_queue = tq.TaskQueue() def _verify_exec_node_task(self, node, execution_id, task): self.assertEqual( task_lib.NodeUid.from_pipeline_node(self._pipeline, node), task.node_uid) self.assertEqual(execution_id, task.execution.id) if node == self._transform: expected_context_names = ['my_pipeline', 'my_transform'] expected_input_artifacts_keys = ['examples'] expected_output_artifacts_keys = ['transform_graph'] output_artifact_uri = os.path.join(self._pipeline_root, node.node_info.id, 'transform_graph', str(execution_id)) elif node == self._trainer: expected_context_names = ['my_pipeline', 'my_trainer'] expected_input_artifacts_keys = ['examples', 'transform_graph'] expected_output_artifacts_keys = ['model'] output_artifact_uri = os.path.join(self._pipeline_root, node.node_info.id, 'model', str(execution_id)) else: raise ValueError('Not configured to verify for node: {}'.format(node)) self.assertCountEqual(expected_context_names, [c.name for c in task.contexts]) self.assertCountEqual(expected_input_artifacts_keys, list(task.input_artifacts.keys())) self.assertCountEqual(expected_output_artifacts_keys, list(task.output_artifacts.keys())) self.assertEqual( output_artifact_uri, task.output_artifacts[expected_output_artifacts_keys[0]][0].uri) self.assertEqual( os.path.join(self._pipeline_root, node.node_info.id, '.system', 'executor_execution', str(execution_id), 'executor_output.pb'), task.executor_output_uri) self.assertEqual( os.path.join(self._pipeline_root, node.node_info.id, '.system', 'stateful_working_dir', str(execution_id)), task.stateful_working_dir) def _dequeue_and_test(self, use_task_queue, node, execution_id): if use_task_queue: task = self._task_queue.dequeue() self._task_queue.task_done(task) self._verify_exec_node_task(node, execution_id, task) def _generate_and_test(self, use_task_queue, num_initial_executions, num_tasks_generated, num_new_executions, num_active_executions): """Generates tasks and tests the effects.""" with self._mlmd_connection as m: executions = m.store.get_executions() self.assertLen( executions, num_initial_executions, 'Expected {} execution(s) in MLMD.'.format(num_initial_executions)) task_gen = asptg.AsyncPipelineTaskGenerator( m, self._pipeline, self._task_queue.contains_task_id) tasks = task_gen.generate() self.assertLen( tasks, num_tasks_generated, 'Expected {} task(s) to be generated.'.format(num_tasks_generated)) executions = m.store.get_executions() num_total_executions = num_initial_executions + num_new_executions self.assertLen( executions, num_total_executions, 'Expected {} execution(s) in MLMD.'.format(num_total_executions)) active_executions = [ e for e in executions if e.last_known_state == metadata_store_pb2.Execution.RUNNING ] self.assertLen( active_executions, num_active_executions, 'Expected {} active execution(s) in MLMD.'.format( num_active_executions)) if use_task_queue: for task in tasks: self._task_queue.enqueue(task) return tasks, active_executions def test_no_tasks_generated_when_new(self): with self._mlmd_connection as m: task_gen = asptg.AsyncPipelineTaskGenerator(m, self._pipeline, lambda _: False) tasks = task_gen.generate() self.assertEmpty(tasks, 'Expected no task generation when no inputs.') self.assertEmpty( m.store.get_executions(), 'There must not be any registered executions since no tasks were ' 'generated.') @parameterized.parameters(False, True) def test_task_generation(self, use_task_queue): """Tests async pipeline task generation. Args: use_task_queue: If task queue is enabled, new tasks are only generated if a task with the same task_id does not already exist in the queue. `use_task_queue=False` is useful to test the case of task generation when task queue is empty (for eg: due to orchestrator restart). """ # Simulate that ExampleGen has already completed successfully. otu.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1, 1) # Before generation, there's 1 execution in MLMD. with self._mlmd_connection as m: executions = m.store.get_executions() self.assertLen(executions, 1) # Generate once. with self.subTest(generate=1): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=1, num_tasks_generated=1, num_new_executions=1, num_active_executions=1) self._verify_exec_node_task(self._transform, active_executions[0].id, tasks[0]) # No new effects if generate called again. with self.subTest(generate=2): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=2, num_tasks_generated=0 if use_task_queue else 1, num_new_executions=0, num_active_executions=1) execution_id = active_executions[0].id if not use_task_queue: self._verify_exec_node_task(self._transform, execution_id, tasks[0]) # Mark transform execution complete. otu.fake_transform_output(self._mlmd_connection, self._transform, active_executions[0]) # Dequeue the corresponding task if task queue is enabled. self._dequeue_and_test(use_task_queue, self._transform, active_executions[0].id) # Trainer execution task should be generated next. with self.subTest(generate=3): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=2, num_tasks_generated=1, num_new_executions=1, num_active_executions=1) execution_id = active_executions[0].id self._verify_exec_node_task(self._trainer, execution_id, tasks[0]) # Mark the trainer execution complete. otu.fake_trainer_output(self._mlmd_connection, self._trainer, active_executions[0]) # Dequeue the corresponding task if task queue is enabled. self._dequeue_and_test(use_task_queue, self._trainer, execution_id) # No more tasks should be generated as there are no new inputs. with self.subTest(generate=4): self._generate_and_test( use_task_queue, num_initial_executions=3, num_tasks_generated=0, num_new_executions=0, num_active_executions=0) # Fake another ExampleGen run. otu.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1, 1) # Both transform and trainer tasks should be generated as they both find # new inputs. with self.subTest(generate=4): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=4, num_tasks_generated=2, num_new_executions=2, num_active_executions=2) self._verify_exec_node_task(self._transform, active_executions[0].id, tasks[0]) self._verify_exec_node_task(self._trainer, active_executions[1].id, tasks[1]) # Re-generation will produce the same tasks when task queue enabled. with self.subTest(generate=5): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=6, num_tasks_generated=0 if use_task_queue else 2, num_new_executions=0, num_active_executions=2) if not use_task_queue: self._verify_exec_node_task(self._transform, active_executions[0].id, tasks[0]) self._verify_exec_node_task(self._trainer, active_executions[1].id, tasks[1]) # Mark transform execution complete. otu.fake_transform_output(self._mlmd_connection, self._transform, active_executions[0]) # Dequeue the corresponding task. self._dequeue_and_test(use_task_queue, self._transform, active_executions[0].id) # Mark the trainer execution complete. otu.fake_trainer_output(self._mlmd_connection, self._trainer, active_executions[1]) self._dequeue_and_test(use_task_queue, self._trainer, active_executions[1].id) # Trainer should be triggered again due to transform producing new output. with self.subTest(generate=6): tasks, active_executions = self._generate_and_test( use_task_queue, num_initial_executions=6, num_tasks_generated=1, num_new_executions=1, num_active_executions=1) self._verify_exec_node_task(self._trainer, active_executions[0].id, tasks[0]) # Finally, no new tasks once trainer completes. otu.fake_trainer_output(self._mlmd_connection, self._trainer, active_executions[0]) # Dequeue corresponding task. self._dequeue_and_test(use_task_queue, self._trainer, active_executions[0].id) with self.subTest(generate=7): self._generate_and_test( use_task_queue, num_initial_executions=7, num_tasks_generated=0, num_new_executions=0, num_active_executions=0) if use_task_queue: self.assertTrue(self._task_queue.is_empty()) if __name__ == '__main__': tf.test.main()
[]
[]
[ "TEST_UNDECLARED_OUTPUTS_DIR" ]
[]
["TEST_UNDECLARED_OUTPUTS_DIR"]
python
1
0
task/manage.py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.dev') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
Alerters/ses.py
# coding=utf-8 try: import boto3 boto3_available = True except ImportError: boto3_available = False import os from .alerter import Alerter class SESAlerter(Alerter): """Send email alerts using Amazon's SES service.""" def __init__(self, config_options): if not boto3_available: print("Boto3 package is not available, cannot use SESAlerter.") return Alerter.__init__(self, config_options) self.from_addr = Alerter.get_config_option( config_options, 'from', allow_empty=False ) self.to_addr = Alerter.get_config_option( config_options, 'to', allow_empty=False ) self.support_catchup = True self.ses_client_params = {} aws_region = Alerter.get_config_option(config_options, 'aws_region') if aws_region: os.environ["AWS_DEFAULT_REGION"] = aws_region aws_access_key = Alerter.get_config_option(config_options, 'aws_access_key') aws_secret_key = Alerter.get_config_option(config_options, 'aws_secret_access_key') if aws_access_key and aws_secret_key: self.ses_client_params['aws_access_key_id'] = aws_access_key self.ses_client_params['aws_secret_access_key'] = aws_secret_key def send_alert(self, name, monitor): """Send the email.""" type = self.should_alert(monitor) (days, hours, minutes, seconds) = self.get_downtime(monitor) if monitor.is_remote(): host = " on %s " % monitor.running_on else: host = " on host %s" % self.hostname mail = {'Source': self.from_addr} mail['Destination'] = {'ToAddresses': [self.to_addr]} if type == "": return elif type == "failure": message = {'Subject': {'Data': "[%s] Monitor %s Failed!" % (self.hostname, name)}} message['Body'] = {'Text': {'Data': """Monitor %s%s has failed. Failed at: %s Downtime: %d+%02d:%02d:%02d Virtual failure count: %d Additional info: %s Description: %s""" % ( name, host, self.format_datetime(monitor.first_failure_time()), days, hours, minutes, seconds, monitor.virtual_fail_count(), monitor.get_result(), monitor.describe()) }} try: if monitor.recover_info != "": message['Body']['Text']['Data'] += "\nRecovery info: %s" % monitor.recover_info except AttributeError: message['Body']['Text']['Data'] += "\nNo recovery info available" elif type == "success": message = {'Subject': {'Data': "[%s] Monitor %s succeeded" % (self.hostname, name)}} message['Body'] = {'Text': {'Data': "Monitor %s%s is back up.\nOriginally failed at: %s\nDowntime: %d+%02d:%02d:%02d\nDescription: %s" % (name, host, self.format_datetime(monitor.first_failure_time()), days, hours, minutes, seconds, monitor.describe())}} elif type == "catchup": message = {'Subject': {'Data': "[%s] Monitor %s failed earlier!" % (self.hostname, name)}} message['Body'] = {'Text': {'Data': "Monitor %s%s failed earlier while this alerter was out of hours.\nFailed at: %s\nVirtual failure count: %d\nAdditional info: %s\nDescription: %s" % (name, host, self.format_datetime(monitor.first_failure_time()), monitor.virtual_fail_count(), monitor.get_result(), monitor.describe())}} else: print("Unknown alert type %s" % type) return mail['Message'] = message if not self.dry_run: try: client = boto3.client('ses', **self.ses_client_params) client.send_email(**mail) except Exception as e: print("Couldn't send mail: %s" % e) self.available = False else: print("dry_run: would send email:") print("Subject: %s" % message['Subject']['Data']) print("Body: %s" % message['Body']['Text']['Data'])
[]
[]
[ "AWS_DEFAULT_REGION" ]
[]
["AWS_DEFAULT_REGION"]
python
1
0
store/datastore/store.go
package datastore import ( "database/sql" "os" "github.com/drone/drone/shared/envconfig" "github.com/drone/drone/store" "github.com/drone/drone/store/migration" _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" _ "github.com/mattn/go-sqlite3" "github.com/rubenv/sql-migrate" "github.com/russross/meddler" log "github.com/Sirupsen/logrus" ) // From creates a datastore from an existing database connection. func From(db *sql.DB) store.Store { return store.New( &nodestore{db}, &userstore{db}, &repostore{db}, &keystore{db}, &buildstore{db}, &jobstore{db}, &logstore{db}, ) } // Load opens a new database connection with the specified driver // and connection string specified in the environment variables. func Load(env envconfig.Env) store.Store { var ( driver = env.String("DATABASE_DRIVER", "sqlite3") config = env.String("DATABASE_CONFIG", "drone.sqlite") ) log.Infof("using database driver %s", driver) log.Infof("using database config %s", config) return From( Open(driver, config), ) } // Open opens a new database connection with the specified // driver and connection string and returns a store. func Open(driver, config string) *sql.DB { db, err := sql.Open(driver, config) if err != nil { log.Errorln(err) log.Fatalln("database connection failed") } setupMeddler(driver) if err := setupDatabase(driver, db); err != nil { log.Errorln(err) log.Fatalln("migration failed") } return db } // OpenTest opens a new database connection for testing purposes. // The database driver and connection string are provided by // environment variables, with fallback to in-memory sqlite. func openTest() *sql.DB { var ( driver = "sqlite3" config = ":memory:" ) if os.Getenv("DATABASE_DRIVER") != "" { driver = os.Getenv("DATABASE_DRIVER") config = os.Getenv("DATABASE_CONFIG") } return Open(driver, config) } // helper function to setup the databsae by performing // automated database migration steps. func setupDatabase(driver string, db *sql.DB) error { var migrations = &migrate.AssetMigrationSource{ Asset: migration.Asset, AssetDir: migration.AssetDir, Dir: driver, } _, err := migrate.Exec(db, driver, migrations, migrate.Up) return err } // helper function to setup the meddler default driver // based on the selected driver name. func setupMeddler(driver string) { switch driver { case "sqlite3": meddler.Default = meddler.SQLite case "mysql": meddler.Default = meddler.MySQL case "postgres": meddler.Default = meddler.PostgreSQL } }
[ "\"DATABASE_DRIVER\"", "\"DATABASE_DRIVER\"", "\"DATABASE_CONFIG\"" ]
[]
[ "DATABASE_DRIVER", "DATABASE_CONFIG" ]
[]
["DATABASE_DRIVER", "DATABASE_CONFIG"]
go
2
0
build/docs/NodeMonitorGuide/source/conf.py
# -*- coding: utf-8 -*- # # OpenSplice Node Monitor User Guide build configuration file, created by # ReST Editor on 08-Jun-2015 # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import time # import liteconfig # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. #extensions = ['sphinx.ext.todo'] #extensions = ['sphinx.ext.todo', 'numfig'] #extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig'] #def setup(app): # app.add_config_value('rmi_languages', '', True) #rmi_languages = 'C++ and Java' #rmi_languages = 'C++' #rst_prolog = """ #.. |rmi_langs| replace:: C++ and Java #.. |product_name| replace:: OpenSplice #""" # Add any paths that contain templates here, relative to this directory. templates_path = [u'_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = u'utf-8-sig' # The master toctree document. master_doc = u'index' # General information about the project. project = u'OpenSplice Node Monitor User Guide' #copyright = u'2015, PrismTech' this_year = time.strftime( '%Y' ) copyright = u'{y}, PrismTech'.format( y = this_year ) print 'Copyright string is:', copyright # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = u's' #version = liteconfig.version version = u'6.x' # The full version, including alpha/beta/rc tags. #release = u's' release = version #release = u'00' print 'Short version string is:', version print 'Full version string is:', release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = u'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # Force blank date with today = ' ' (space, not empty string) today = ' ' # *************** # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = u'sphinxdoc' html_theme = u'vortextheme' # LINUX PATH: html_theme_path = ['../../.'] # WINDOWS PATH: # html_theme_path = ['..\..\.'] #build theme directory in lite using environment variable, so shared amongst books # insight team can delete, #html_theme_path = [os.environ['VL_HOME'] + '/build/docs'] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None html_title = 'OpenSplice Node Monitor User Guide' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None #html_short_title = 'HTML short Title conf.py' #html_short_title = ' ' # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # ??????????????????????????????????????????????????????????????????? html_logo = './images/Vortex_logo_2014.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = [] html_static_path = [u'_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'OpenSplice_Node_Monitor_User_Guide' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). latex_paper_size = u'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = u'10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). # latex_documents = [('index', 'OpenSpliceGettingStartedGuide.tex', u'OpenSplice Getting Started Guide', u'', 'manual', True)] latex_documents = [('index', 'OpenSplice_NodeMonitorGuide.tex', u'Node Monitor User Guide', u'', 'manual', True)] # *************** # Note 'author' field empty # Added 'True' to end of generated line to suppress 'Index & Tables' # A dictionary that contains LaTeX snippets that override those Sphinx usually # puts into the generated .tex files. latex_elements = { 'babel': '\\usepackage[english]{babel}' } # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None latex_logo = 'images/Vortex-OpenSplice-Cover.png' # *************** # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # *************** # * THIS GETS RID OF BLANK PAGES AT ENDS OF CHAPTERS & ToC latex_elements = { 'classoptions': ',openany, oneside', 'babel': '\\usepackage[english]{babel}' } # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', 'OpenSplice_NodeMonitor_Guide', u'OpenSplice_NodeMonitor_Guide', [u'PrismTech'], 1)] # * NOT TESTED # -- Additional options -------------------------------------------------------- todo_include_todos = True # * NOT TESTED
[]
[]
[ "VL_HOME" ]
[]
["VL_HOME"]
python
1
0
modules/data_query.py
import os from google.cloud import bigquery # Define credentials os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'bigquery-credentials.json' client = bigquery.Client() def _fetch_data_bigquery(query): """ Take SQL query in Standard SQL and returns a Pandas DataFrame of results ref: https://cloud.google.com/bigquery/docs/reference/standard-sql/enabling-standard-sql """ return client.query(query, location="US").to_dataframe() class DataQuery: """ Data fetcher """ def __init__(self, name, query, year): """ name: a given name for the query query: string standard SQL query name: name of the FR name year: year """ self.name = name self.year = year self.query = query % {'name': self.name, 'year': self.year} def get_data(self): # Repalce name and year in the query print('running', self.name, self.year) # Get data from BigQuery return _fetch_data_bigquery(self.query)
[]
[]
[ "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_APPLICATION_CREDENTIALS"]
python
1
0
main.go
package main import ( "context" "os" "os/signal" "strings" "syscall" "github.com/bwmarrin/discordgo" _ "github.com/go-sql-driver/mysql" "github.com/joho/godotenv" log "github.com/sirupsen/logrus" "lang.pkg/cmd" "lang.pkg/ent" "lang.pkg/router" ) func init() { if err := godotenv.Load(); err != nil { log.Fatal("Error loading .env file") } } func main() { client, err := ent.Open("mysql", os.Getenv("DB_URI")) if err != nil { log.Fatalf("Failed opening connection to mysql: %v", err) } defer client.Close() if err := client.Schema.Create(context.Background()); err != nil { log.Fatalf("Failed creating schema resources: %v", err) } cmd.Bootstrap(client) discord, err := discordgo.New("Bot " + os.Getenv("TOKEN")) if err != nil { log.Fatal("Client generating error") } discord.AddHandler(messageCreate) discord.AddHandlerOnce(func(s *discordgo.Session, r *discordgo.Ready) { s.UpdateListeningStatus("👻 !help / 영어듣기평가") }) if err := discord.Open(); err != nil { log.Fatal("Opening connecting error") } log.Info("Discord bot is running") sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) <-sc discord.Close() } func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) { if !strings.HasPrefix(m.Content, "!") { return } if m.Author.Bot { return } router.Run(s, m) }
[ "\"DB_URI\"", "\"TOKEN\"" ]
[]
[ "DB_URI", "TOKEN" ]
[]
["DB_URI", "TOKEN"]
go
2
0
discord/bot2.py
import discord import os from discord.ext import commands from discord.utils import get client = commands.Bot(command_prefix='.') client.remove_command('help') TOKEN = os.getenv("CHAT") guild = None @client.event async def on_ready(): global guild print('[ + ] Started {0.user}'.format(client)) await client.wait_until_ready() guild = client.get_guild(747233433511788637) await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Anime")) @client.command(aliases=['purge'], help='Clears certin number of messages') @commands.has_permissions(manage_messages=True) async def clear(ctx, amount: int): await ctx.channel.purge(limit=amount+1) await ctx.send(f'Cleared {amount} messages', delete_after=5) @clear.error async def clear_error(ctx, error): if isinstance(error, commands.MissingRequiredArgument): await ctx.send('Please specify an amount of messages to delete.') @client.command(help='Wiki Table of Contents') async def wiki(ctx): embed = discord.Embed(title='📚 **[ UMORIA WIKI ]**', description=' > Stack up on useless information\n > The wiki is organized into Discord threads. \n > Click one of the links to jump to an archived thread. ', color=discord.Color(0x8FB6AB)) embed.add_field(inline=True, name='Chapters', value='**I. Introduction**\nBasic Information About Umoria\n\n**II. Reference**\nGood to know\n\n**III. Server**\nInformation about gameplay\n\n**IV. Player**') embed.add_field(inline=True, name='Sections', value='i : [info](https://discord.com/channels/747233433511788637/910695916359544912/910695919475904583) \nii : [faq](https://discord.com/channels/747233433511788637/910695995866767380/910695998777618493) \n\nr1 : [numeral](https://discord.com/channels/747233433511788637/910696220069097542/910696222887641118)\nr2 : [value](https://discord.com/channels/747233433511788637/910696273626152980/910696277782708254)\n\ni : [blessing](https://discord.com/channels/747233433511788637/910696370292264981/910696374192980068)\nii: [npc](https://discord.com/channels/747233433511788637/910696408431075369/910696411769765888)\n\ni : [armor](https://discord.com/channels/747233433511788637/910696495215415336/910696498054967396)\nii : [tools](https://discord.com/channels/747233433511788637/910696547455496252/910696550915797002)') embed.add_field(inline=True, name='\u200b', value='iii : [ip](https://discord.com/channels/747233433511788637/910696029488304198/910696031350566963) \niv : [discord](https://discord.com/channels/747233433511788637/910696074358960209/910696076753920050)\n\nr3 : [commands](https://discord.com/channels/747233433511788637/910696329607528459/910696332811968593)\n\n\niii : [quests](https://discord.com/channels/747233433511788637/910696450697093141/910696453490495488)')#'') await ctx.send(embed=embed) @client.event async def on_message(message): if "discord.gg/" in message.content or "discord.com/invite/" in message.content: if not message.author.name == 'Java' and not message.author.discriminator == 3865: await message.delete() if "Blessing" in message.content and message.channel.name == 'portal': channel = guild.get_channel(873434537802207273) msg = message.content name = msg.replace('```', '') await channel.edit(name=name) if ".link" in message.content and message.channel.name != "link": await message.delete() await message.channel.send('```This command can only be used in #link```', delete_after=3) await client.process_commands(message) if __name__ == '__main__': client.run(TOKEN)
[]
[]
[ "CHAT" ]
[]
["CHAT"]
python
1
0
config/global_earthkitrc.go
package config import ( "flag" "github.com/rakyll/globalconf" "github.com/opslabjpl/goamz/aws" "os" "path" "time" ) var DOCKER_PATH = flag.String("docker_path", "/usr/bin/docker", "Path to the docker binary (usually /usr/bin/docker).") var DOCKER_HTTP_PORT = flag.Int("docker_http_port", -1, "HTTP port that the docker daemon is listening on (defaults to UNIX socket if unspecified).") var AMI = flag.String("ami", "todo", "AMI to use") var SECURITY_GROUP = flag.String("security_group", "todo", "Security group to use") var SUBNET = flag.String("subnet", "todo", "Subnet to use") var EC2_KEY_PAIR = flag.String("ec2_key_pair", "todo", "EC2 keyname to use") var AWS_ACCESS_KEY = flag.String("aws_access_key", "todo", "AWS ACCESS KEY to use") var AWS_SECRET_KEY = flag.String("aws_secret_key", "todo", "AWS SECRET KEY to use") var S3_BUCKET = flag.String("s3_bucket", "earthkit-cli", "S3 bucket to use") var S3_KEY_PREFIX = flag.String("s3_key_prefix", ".earthkit", "S3 key prefix to use") var AWS_REGION = flag.String("aws_region", "us-gov-west-1", "AWS Region to use") var DOCKER_REGISTRY = flag.String("docker_registry", "", "Docker registry to use") var ETCDQ_S3_PATH = flag.String("etcdq_s3_path", "bin/ubuntu/etcdq", "S3 path to etcdq binary") var DOCKER_CONF_S3_PATH = flag.String("docker_conf_s3_path", "conf/docker.conf", "S3 path to docker conf file") var DOCKER_INSTALL_S3_PATH = flag.String("docker_install_s3_path", "bin/get.docker.io.sh", "S3 path to script for isntall docker") var DATA_DIR = flag.String("data_dir", "/mnt/data/earthkit", "Directory to mount EBS volume to for cloud processing") var CACHE_LIMIT = flag.Int64("cache_limit", 5368709120, "Cache limit (in bytes)") var EKIT_IMG = flag.String("earthkit_img", "earthkit-cli", "Docker image containing earhtkit-cli command") var Verbose = flag.Bool("v", false, "enables verbose output") var Region aws.Region = aws.Regions[*AWS_REGION] func WorkspacePrefix(workspace string) string { return path.Join(*S3_KEY_PREFIX, workspace) // return path.Join(S3KeyPrefix, workspace) } func AWSAuth() (auth aws.Auth) { auth, err := aws.GetAuth(*AWS_ACCESS_KEY, *AWS_SECRET_KEY, "", time.Time{}) if err != nil { auth, err = aws.EnvAuth() if err != nil { panic(err.Error()) } } return } func Load() { homeDir := os.Getenv("HOME") opts := globalconf.Options{Filename: homeDir + "/.earthkitrc"} conf, err := globalconf.NewWithOptions(&opts) if err != nil { // Just parse command line arguments at the root level if the configuration file doesn't exist flag.Parse() return } conf.ParseAll() Region = aws.Regions[*AWS_REGION] }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
pkg/controller/acicontainersoperator.go
// Copyright 2020 Cisco Systems, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package controller import ( "context" "encoding/base64" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "reflect" "sync" "time" operators "github.com/noironetworks/aci-containers/pkg/acicontainersoperator/apis/aci.ctrl/v1alpha1" operatorclientset "github.com/noironetworks/aci-containers/pkg/acicontainersoperator/clientset/versioned" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" routesv1 "github.com/openshift/api/route/v1" routesClientset "github.com/openshift/client-go/route/clientset/versioned" log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" ) // AciResources is a struct for handeling the resources of aci fabric type AciResources struct { Deployment *appsv1.Deployment HostDaemonset *appsv1.DaemonSet OvsDaemonset *appsv1.DaemonSet } // Controller here defines the Operator code handler which list watch the AciContainerOperator // Object and apply the aci_deployment.yaml in the cluster after creation/updation type Controller struct { Logger *log.Entry indexMutex sync.Mutex Operator_Clientset operatorclientset.Interface K8s_Clientset kubernetes.Interface Operator_Queue workqueue.RateLimitingInterface Deployment_Queue workqueue.RateLimitingInterface Daemonset_Queue workqueue.RateLimitingInterface Node_Queue workqueue.RateLimitingInterface Route_Queue workqueue.RateLimitingInterface Informer_Operator cache.SharedIndexInformer Informer_Deployment cache.SharedIndexInformer Informer_Daemonset cache.SharedIndexInformer Informer_Node cache.SharedIndexInformer Informer_Route cache.SharedIndexInformer Resources AciResources DnsOperatorClient client.Client // This client is specific dnsopenshift operator RoutesClient routesClientset.Interface // This client is specific routes openshift operator Openshiftflavor bool routes map[string]bool // local cache to check the routes } var Version = map[string]bool{ "openshift-4.3": true, "cloud": true, "openshift-4.4-esx": true, "openshift-4.4-openstack": true, } var Dnsoper = map[string]bool{ "openshift-4.3": true, } const aciContainersController = "aci-containers-controller" const aciContainersHostDaemonset = "aci-containers-host" const aciContainersOvsDaemonset = "aci-containers-openvswitch" func NewAciContainersOperator( acicnioperatorclient operatorclientset.Interface, k8sclient kubernetes.Interface) *Controller { log.Info("Setting up the Queue") operator_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) deployment_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) daemonset_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) node_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) route_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) log.Info("Intializing Informer") aci_operator_informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return acicnioperatorclient.AciV1alpha1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return acicnioperatorclient.AciV1alpha1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options) }, }, &operators.AciContainersOperator{}, 0, cache.Indexers{}, ) aci_deployment_informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return k8sclient.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return k8sclient.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options) }, }, &appsv1.Deployment{}, 0, cache.Indexers{}, ) aci_daemonset_informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return k8sclient.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return k8sclient.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options) }, }, &appsv1.DaemonSet{}, 0, cache.Indexers{}, ) node_informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return k8sclient.CoreV1().Nodes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return k8sclient.CoreV1().Nodes().Watch(context.TODO(), options) }, }, &v1.Node{}, 0, cache.Indexers{}, ) var routesClient routesClientset.Interface var route_informer cache.SharedIndexInformer flavor := os.Getenv("ACC_PROVISION_FLAVOR") opflavor := false // intializes route watchers for Openshift flavor if Dnsoper[flavor] { restconfig, err := restclient.InClusterConfig() if err != nil { log.Error("Failed to intialize the restConfig: ", err) } else { routesClient, err = routesClientset.NewForConfig(restconfig) if err != nil { log.Error("Failed to intialize OpenshiftRoute client: ", err) } else { opflavor = true log.Info("Intializing the route informer") route_informer = cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return routesClient.RouteV1().Routes(metav1.NamespaceAll).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return routesClient.RouteV1().Routes(metav1.NamespaceAll).Watch(context.TODO(), options) }, }, &routesv1.Route{}, time.Duration(5)*time.Minute, cache.Indexers{}, ) } } } controller := &Controller{ Logger: log.NewEntry(log.New()), Operator_Clientset: acicnioperatorclient, K8s_Clientset: k8sclient, Informer_Operator: aci_operator_informer, Informer_Deployment: aci_deployment_informer, Informer_Daemonset: aci_daemonset_informer, Informer_Node: node_informer, Informer_Route: route_informer, Operator_Queue: operator_queue, Deployment_Queue: deployment_queue, Daemonset_Queue: daemonset_queue, Node_Queue: node_queue, Route_Queue: route_queue, Resources: AciResources{}, DnsOperatorClient: nil, RoutesClient: routesClient, Openshiftflavor: opflavor, routes: make(map[string]bool), } log.Info("Adding Event Handlers") aci_operator_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Added acicontainersoperator key: ", key) if err == nil { operator_queue.Add(key) } }, UpdateFunc: func(prevObj, currentObj interface{}) { key, err := cache.MetaNamespaceKeyFunc(currentObj) log.Debug("Updated acicontainersoperator key: ", key) if err == nil { operator_queue.Add(key) } }, DeleteFunc: func(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) log.Debug("Deleted acicontainersoperator key: ", key) if err == nil { operator_queue.Add(key) } }, }) aci_deployment_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { dep_obj := obj.(*appsv1.Deployment) if dep_obj.Name == aciContainersController { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Added Deployment key :", key) if err == nil { deployment_queue.Add(key) } } }, UpdateFunc: func(prevObj, currentObj interface{}) { dep_obj := currentObj.(*appsv1.Deployment) if dep_obj.Name == aciContainersController { log.Debug("In UpdateFunc for Deployment") controller.handledeploymentUpdate(prevObj, currentObj, deployment_queue) } }, DeleteFunc: func(obj interface{}) { dep_obj := obj.(*appsv1.Deployment) if dep_obj.Name == aciContainersController { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Deleted Deployment key is :", key) if err == nil { deployment_queue.Add(key) } } }, }) aci_daemonset_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("The daemonset key: ", key) if err == nil { daemonset_queue.Add(key) } }, UpdateFunc: func(prevObj, currentObj interface{}) { log.Debug("In UpdateFunc for Daemonset") controller.handledaemonsetUpdate(prevObj, currentObj, daemonset_queue) }, DeleteFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Deleted daemonset key is :", key) if err == nil { daemonset_queue.Add(key) } }, }) node_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("The Node key: ", key) if err == nil { node_queue.Add(key) } }, UpdateFunc: func(prevObj, currentObj interface{}) { //@TODO need to handle update log.Debug("In UpdateFunc for Node") controller.handleNodeUpdate(prevObj, currentObj, node_queue) }, DeleteFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Deleted Node key is :", key) if err == nil { node_queue.Add(key) } }, }) if opflavor { //openshift flavor route_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Add Route key: ", key) if err == nil { route_queue.Add(key) } }, UpdateFunc: func(prevObj, currentObj interface{}) { //@TODO need to handle update log.Debug("In UpdateFunc for Route") }, DeleteFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) log.Debug("Deleted route key: ", key) if err == nil { route_queue.Add(key) } }, }) } return controller } func (c *Controller) handledeploymentUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) { old_dep := oldobj.(*appsv1.Deployment) new_dep := newobj.(*appsv1.Deployment) if !reflect.DeepEqual(old_dep.OwnerReferences, new_dep.OwnerReferences) { key, err := cache.MetaNamespaceKeyFunc(newobj) if err == nil { queue.Add(key) } } else { log.Info("Owner Reference is intact for ", new_dep.Name) } } func (c *Controller) handledaemonsetUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) { old_ds := oldobj.(*appsv1.DaemonSet) new_ds := newobj.(*appsv1.DaemonSet) if !reflect.DeepEqual(old_ds.OwnerReferences, new_ds.OwnerReferences) { key, err := cache.MetaNamespaceKeyFunc(newobj) if err == nil { queue.Add(key) } } else { log.Info("Owner Reference is intact for ", new_ds.Name) } } func (c *Controller) GetAciContainersOperatorCR() (*operators.AciContainersOperator, error) { var options metav1.GetOptions acicnioperator, er := c.Operator_Clientset.AciV1alpha1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Get(context.TODO(), "acicnioperator", options) if er != nil { return acicnioperator, er } return acicnioperator, nil } func (c *Controller) CreateAciContainersOperatorCR() error { log.Info("Reading the Config Map providing CR") obj := &operators.AciContainersOperator{ ObjectMeta: metav1.ObjectMeta{ Name: "acicnioperator", Namespace: os.Getenv("SYSTEM_NAMESPACE")}, } obj.Status.Status = true //Setting it default true raw, err := ioutil.ReadFile("/usr/local/etc/aci-containers/aci-operator.conf") if err != nil { log.Error(err) return err } log.Debug("acicnioperator CR is ", string(raw)) log.Info("Unmarshalling the Config-Map...") err = json.Unmarshal(raw, &obj.Spec) if err != nil { log.Error(err) return err } log.Info("Unmarshalling Successful....") log.Debug("acicnioperator CR recieved is", (obj.Spec)) if err = wait.PollInfinite(time.Second*2, func() (bool, error) { _, er := c.Operator_Clientset.AciV1alpha1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Create(context.TODO(), obj, metav1.CreateOptions{}) if er != nil { if errors.IsAlreadyExists(er) { //Happens due to etcd timeout log.Info(er) return true, nil } else { log.Info("Waiting for CRD to get registered to etcd....: ", err) return false, nil } } return true, nil }); err != nil { return err } return nil } func (c *Controller) Run(stopCh <-chan struct{}) { c.Logger.Info("Controller.Run: initiating") log.Info("Checking if acicnioperator CR already present") _, err := c.GetAciContainersOperatorCR() if err != nil { log.Info("Not Present ..Creating acicnioperator CR") er := c.CreateAciContainersOperatorCR() if er != nil { log.Error(err) } } if err == nil { log.Info("acicnioperator CR already present") } // Run informer to start watching and listening go c.Informer_Operator.Run(stopCh) go c.Informer_Deployment.Run(stopCh) go c.Informer_Daemonset.Run(stopCh) go c.Informer_Node.Run(stopCh) // Sync the current resources if !cache.WaitForCacheSync(stopCh, c.Informer_Operator.HasSynced, c.Informer_Deployment.HasSynced, c.Informer_Daemonset.HasSynced, c.Informer_Node.HasSynced) { utilruntime.HandleError(fmt.Errorf("Controller.Sync: Error syncing the cache")) } c.Logger.Info("Controller.Sync: Cache sync complete") // Run queue for each Informer go c.processQueue(c.Operator_Queue, c.Informer_Operator.GetIndexer(), func(obj interface{}) bool { return c.handleOperatorCreate(obj) }, func(obj interface{}) bool { return c.handleOperatorDelete(obj) }, stopCh) go c.processQueue(c.Deployment_Queue, c.Informer_Deployment.GetIndexer(), func(obj interface{}) bool { return c.handleDeploymentCreate(obj) }, func(obj interface{}) bool { return c.handleDeploymentDelete(obj) }, stopCh) go c.processQueue(c.Daemonset_Queue, c.Informer_Daemonset.GetIndexer(), func(obj interface{}) bool { return c.handleDaemonsetCreate(obj) }, func(obj interface{}) bool { return c.handleDaemonsetDelete(obj) }, stopCh) go c.processQueue(c.Node_Queue, c.Informer_Node.GetIndexer(), func(obj interface{}) bool { return c.handleNodeCreate(obj) }, func(obj interface{}) bool { return c.handleNodeDelete(obj) }, stopCh) if c.Openshiftflavor { c.enableRouteInformer(stopCh) } } func (c *Controller) processQueue(queue workqueue.RateLimitingInterface, store cache.Store, createhandler func(interface{}) bool, deletehandler func(interface{}) bool, stopCh <-chan struct{}) { go wait.Until(func() { log.Info("Starting the handlers....") for { key, quit := queue.Get() if quit { break } var requeue bool switch key := key.(type) { case chan struct{}: close(key) case string: obj, exists, err := store.GetByKey(key) if err == nil && exists { log.Info("Controller.processNextItem: object Creation detected:", key) requeue = createhandler(obj) } if !exists { log.Info("Controller.processNextItem: object deleted detected:", key) deletehandler(key) } } if requeue { log.Info("Adding the key back to the queue ", key) queue.AddRateLimited(key) } else { queue.Forget(key) } queue.Done(key) } }, time.Second, stopCh) <-stopCh queue.ShutDown() } func (c *Controller) CheckOwnerReference(reference []metav1.OwnerReference) bool { for _, ownerRef := range reference { if ownerRef.Kind == "AciContainersOperator" { log.Debug("OwnerReference Already Present") return true } } return false } func (c *Controller) UpdateDeploymentOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool { deploymentsClient := c.K8s_Clientset.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE")) if deploymentsClient == nil { log.Info("Error in Fetching deploymentsClient...") return true } c.Resources.Deployment, _ = deploymentsClient.Get(context.TODO(), aciContainersController, metav1.GetOptions{}) if c.Resources.Deployment == nil { log.Infof("%s deployment is nil..returning", aciContainersController) return false } if !c.CheckOwnerReference(c.Resources.Deployment.ObjectMeta.OwnerReferences) { c.Resources.Deployment.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")), } _, err := deploymentsClient.Update(context.TODO(), c.Resources.Deployment, metav1.UpdateOptions{}) if err != nil { log.Error(err.Error()) return false } log.Infof("Successfully updated owner reference to the %s deployment", aciContainersController) } else { log.Infof("Owner reference is intact for %s", aciContainersController) } return true } func (c *Controller) UpdateHostDaemonsetOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool { hostdaemonsetclient := c.K8s_Clientset.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")) if hostdaemonsetclient == nil { log.Info("Error in Fetching hostdaemonsetclient...") return true } c.Resources.HostDaemonset, _ = hostdaemonsetclient.Get(context.TODO(), aciContainersHostDaemonset, metav1.GetOptions{}) if c.Resources.HostDaemonset == nil { log.Infof("%s daemonset is nil.....returning", aciContainersHostDaemonset) return false } if !c.CheckOwnerReference(c.Resources.HostDaemonset.OwnerReferences) { c.Resources.HostDaemonset.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")), } _, err := hostdaemonsetclient.Update(context.TODO(), c.Resources.HostDaemonset, metav1.UpdateOptions{}) if err != nil { log.Error(err.Error()) return false } log.Infof("Successfully updated owner reference to the %s daemonset", aciContainersHostDaemonset) } else { log.Infof("Owner reference is intact for %s", aciContainersHostDaemonset) } return true } func (c *Controller) UpdateOvsDaemonsetOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool { ovsdaemonsetclient := c.K8s_Clientset.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")) if ovsdaemonsetclient == nil { log.Infof("Error in Fetching ovsdaemonsetclient...") return true } c.Resources.OvsDaemonset, _ = ovsdaemonsetclient.Get(context.TODO(), aciContainersOvsDaemonset, metav1.GetOptions{}) if c.Resources.OvsDaemonset == nil { log.Infof("%s daemonset is nil.....returning", aciContainersOvsDaemonset) return false } if !c.CheckOwnerReference(c.Resources.OvsDaemonset.OwnerReferences) { c.Resources.OvsDaemonset.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")), } _, err := ovsdaemonsetclient.Update(context.TODO(), c.Resources.OvsDaemonset, metav1.UpdateOptions{}) if err != nil { log.Error(err.Error()) return false } log.Infof("Successfully updated owner reference to the %s daemonset", aciContainersOvsDaemonset) } else { log.Infof("Owner reference is intact for %s", aciContainersOvsDaemonset) } return true } func (c *Controller) handleOperatorCreate(obj interface{}) bool { log.Info("OperatorHandler.ObjectCreated") acicontainersoperator := obj.(*operators.AciContainersOperator) log.Debug(acicontainersoperator.Spec.Config) if acicontainersoperator.Spec.Config == "" { log.Info("acicnioperator CR config is Nil...Exiting") acicontainersoperator.Status.Status = false _, er := c.Operator_Clientset.AciV1alpha1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Update(context.TODO(), acicontainersoperator, metav1.UpdateOptions{}) if er != nil { log.Error(er) } return false } dec, err := base64.StdEncoding.DecodeString(acicontainersoperator.Spec.Config) if err != nil { log.Error(err) return true } f, err := os.Create("aci-deployment.yaml") if err != nil { log.Error(err) return true } if _, err := f.Write(dec); err != nil { log.Error(err) return true } if err := f.Sync(); err != nil { log.Error(err) return true } if err := f.Close(); err != nil { log.Error(err) return true } log.Info("Platform flavor is ", acicontainersoperator.Spec.Flavor) if Version[acicontainersoperator.Spec.Flavor] { clusterConfig := &configv1.Network{ TypeMeta: metav1.TypeMeta{APIVersion: configv1.GroupVersion.String(), Kind: "Network"}, ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, } cfg, err := config.GetConfig() scheme := runtime.NewScheme() err = configv1.Install(scheme) if err != nil { log.Error(err) return true } rclient, err := client.New(cfg, client.Options{Scheme: scheme}) if err != nil { return true } err = rclient.Get(context.TODO(), types.NamespacedName{ Name: "cluster", }, clusterConfig) if err != nil { log.Info(err) return true } log.Info("Current Configuration Spec of type Network is ", clusterConfig.Spec) log.Info("Current status of type Network is ", clusterConfig.Status) if !reflect.DeepEqual(clusterConfig.Status.ClusterNetwork, clusterConfig.Spec.ClusterNetwork) || !reflect.DeepEqual(clusterConfig.Status.NetworkType, clusterConfig.Spec.NetworkType) || !reflect.DeepEqual(clusterConfig.Status.NetworkType, clusterConfig.Spec.NetworkType) { log.Info("Updating status field of openshift resource of type network ....") clusterConfig.Status.ClusterNetwork = clusterConfig.Spec.ClusterNetwork clusterConfig.Status.NetworkType = clusterConfig.Spec.NetworkType clusterConfig.Status.ServiceNetwork = clusterConfig.Spec.ServiceNetwork log.Info("Updated clusterConfig.Status is ", clusterConfig.Status) ctx := context.TODO() err = rclient.Update(ctx, clusterConfig) if err != nil { log.Info(err) return true } } } log.Info("Applying Aci Deployment") //Currently the Kubectl version is v.1.14. This will be updated by the acc-provision according //to the platform specification cmd := exec.Command("kubectl", "apply", "-f", "aci-deployment.yaml") log.Debug(cmd) _, err = cmd.Output() if err != nil { log.Error(err) return true } log.Info("Adding Aci Operator OwnerRefrence to resources ....") c.indexMutex.Lock() if !(c.UpdateDeploymentOwnerReference(acicontainersoperator)) { log.Info("Error Updating Deployment Owner Reference") c.indexMutex.Unlock() return true } if !(c.UpdateHostDaemonsetOwnerReference(acicontainersoperator)) { log.Info("Error Updating HostAgent Daemonset Owner Reference") c.indexMutex.Unlock() return true } if !(c.UpdateOvsDaemonsetOwnerReference(acicontainersoperator)) { log.Info("Error Updating Ovs Daemonset Owner Reference") c.indexMutex.Unlock() return true } c.indexMutex.Unlock() return false } func (c *Controller) handleOperatorDelete(obj interface{}) bool { log.Info("ACI CNI OperatorHandler.ObjectDeleted") return false } func (c *Controller) handleDeploymentCreate(obj interface{}) bool { acicontainersoperator, err := c.GetAciContainersOperatorCR() if err != nil { log.Info("Not Present ..Creating acicnioperator CR") return true } c.indexMutex.Lock() if !(c.UpdateDeploymentOwnerReference(acicontainersoperator)) { log.Info("Error Updating Deployment Owner Reference") c.indexMutex.Unlock() return true } c.indexMutex.Unlock() return false } func (c *Controller) handleDeploymentDelete(obj interface{}) bool { log.Infof("%s Deployment Deleted", aciContainersController) return false } func (c *Controller) handleDaemonsetCreate(obj interface{}) bool { daemonset := obj.(*appsv1.DaemonSet) acicontainersoperator, err := c.GetAciContainersOperatorCR() if err != nil { log.Info("Not Present ..Creating acicnioperator CR") return true } c.indexMutex.Lock() if daemonset.Name == aciContainersHostDaemonset { if !(c.UpdateHostDaemonsetOwnerReference(acicontainersoperator)) { log.Info("Error Updating HostDaemonset Owner Reference") c.indexMutex.Unlock() return true } } else { if !(c.UpdateOvsDaemonsetOwnerReference(acicontainersoperator)) { log.Info("Error Updating OvsDaemonset Owner Reference") c.indexMutex.Unlock() return true } } c.indexMutex.Unlock() return false } func (c *Controller) handleDaemonsetDelete(obj interface{}) bool { log.Infof("aci-containers Daemonset Deleted") return false } // intialize the dnsoperator client, // computes the dnsSpec. // local cache for all the routes will be updated. // if there is change in the dns Spec, triggers the update func (c *Controller) updatednsOperator() error { log.Info("Update dnsoperator cr") dnsInfo := &operatorv1.DNS{ TypeMeta: metav1.TypeMeta{APIVersion: operatorv1.GroupVersion.String(), Kind: "DNS"}, ObjectMeta: metav1.ObjectMeta{Name: "default"}, } if c.DnsOperatorClient == nil { cfg, err := config.GetConfig() scheme := runtime.NewScheme() err = operatorv1.Install(scheme) if err != nil { return err } c.DnsOperatorClient, err = client.New(cfg, client.Options{Scheme: scheme}) if err != nil { return err } } err := c.DnsOperatorClient.Get(context.TODO(), types.NamespacedName{ Name: "default"}, dnsInfo) if err != nil { return err } if c.RoutesClient == nil { log.Info("Route client is nil") return nil } var options metav1.ListOptions routes, err := c.RoutesClient.RouteV1().Routes(metav1.NamespaceAll).List(context.TODO(), options) if err != nil { return err } if len(routes.Items) == 0 { return nil } var nodeAddress []string nodeAddress, err = c.getNodeAddress() if err != nil { return err } if len(nodeAddress) == 0 { return nil } log.Info("NodeAddress: ", nodeAddress) // compute the dns servers info var servers []operatorv1.Server for _, route := range routes.Items { var server operatorv1.Server key := route.ObjectMeta.Namespace + "/" + route.ObjectMeta.Name server.Name = key server.Zones = append(server.Zones, route.Spec.Host) server.ForwardPlugin.Upstreams = nodeAddress servers = append(servers, server) } if !reflect.DeepEqual(dnsInfo.Spec.Servers, servers) { dnsInfo.Spec.Servers = servers err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo) if err != nil { return err } } c.indexMutex.Lock() for _, route := range routes.Items { key := route.ObjectMeta.Namespace + "/" + route.ObjectMeta.Name log.Infof("Route added to cache: %s", key) c.routes[key] = true } c.indexMutex.Unlock() log.Infof("Updated dnsInfo: %+v", dnsInfo) return nil } func (c *Controller) getNodeAddress() ([]string, error) { var options metav1.ListOptions nodelist, err := c.K8s_Clientset.CoreV1().Nodes().List(context.TODO(), options) if err != nil { log.Info("Failed to List the nodes: ", err) return []string{}, err } var nodeAddress []string for _, node := range nodelist.Items { if node.DeletionTimestamp != nil { continue } if _, ok := node.ObjectMeta.Labels["node-role.kubernetes.io/master"]; ok { continue } address := node.Status.Addresses for _, val := range address { if val.Type == v1.NodeInternalIP { nodeAddress = append(nodeAddress, val.Address) } } } return nodeAddress, nil } func (c *Controller) getDnsInfo() (*operatorv1.DNS, error) { dnsInfo := &operatorv1.DNS{ TypeMeta: metav1.TypeMeta{APIVersion: operatorv1.GroupVersion.String(), Kind: "DNS"}, ObjectMeta: metav1.ObjectMeta{Name: "default"}, } err := c.DnsOperatorClient.Get(context.TODO(), types.NamespacedName{ Name: "default"}, dnsInfo) if err != nil { log.Info(err) return nil, err } return dnsInfo, nil } // it reads all the node ip address. // updates if there is any changes in the address computed func (c *Controller) updateDnsOperatorSpec(add bool) bool { if c.DnsOperatorClient == nil || !c.Openshiftflavor { return false } dnsInfo, err := c.getDnsInfo() if err != nil { return true } // Add and no servers present compute for all the routes if add && len(dnsInfo.Spec.Servers) == 0 { err = c.updatednsOperator() if err != nil { log.Info("Failed to update the dnsOperatorCr: ", err) return true } return false } var nodeAddress []string nodeAddress, err = c.getNodeAddress() if err != nil { return true } if !reflect.DeepEqual(dnsInfo.Spec.Servers[0].ForwardPlugin.Upstreams, nodeAddress) { // This is node delete case when there is no worker nodes present // set the spec to nil if !add && len(nodeAddress) == 0 { dnsInfo.Spec = operatorv1.DNSSpec{} } else { for _, server := range dnsInfo.Spec.Servers { server.ForwardPlugin.Upstreams = nodeAddress } } err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo) if err != nil { log.Info("Failed to update the dnsInfo: ", err) return true } } log.Infof("Updated dnsInfo: %+v", dnsInfo) return false } // handle node create to update the dnsOperatorSpec func (c *Controller) handleNodeCreate(obj interface{}) bool { log.Infof("node created") return c.updateDnsOperatorSpec(true) } // handle node delete func (c *Controller) handleNodeDelete(obj interface{}) bool { log.Infof("node Deleted") return c.updateDnsOperatorSpec(false) } // handle route create // local route cache will be updated // if route is already present it will ignore silently as it isupdate happend in operator create func (c *Controller) handleRouteCreate(obj interface{}) bool { route := obj.(*routesv1.Route) log.Infof("route created: %s", route.ObjectMeta.Name) if c.DnsOperatorClient == nil { return false } key, _ := cache.MetaNamespaceKeyFunc(obj) c.indexMutex.Lock() _, ok := c.routes[key] c.indexMutex.Unlock() if ok { return false } dnsInfo, err := c.getDnsInfo() if err != nil { return true } // Check if already exists in dnsInfo then no need to update dnsinfo for _, server := range dnsInfo.Spec.Servers { if key == server.Name { return false } } var server operatorv1.Server server.Name = key server.Zones = append(server.Zones, route.Spec.Host) // if already computed update the cache if len(dnsInfo.Spec.Servers) > 0 { server.ForwardPlugin.Upstreams = dnsInfo.Spec.Servers[0].ForwardPlugin.Upstreams } else { // compute the node ip's fresh nodeaddr, err := c.getNodeAddress() if err != nil { return true } if len(nodeaddr) == 0 { return false } server.ForwardPlugin.Upstreams = nodeaddr } dnsInfo.Spec.Servers = append(dnsInfo.Spec.Servers, server) err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo) if err != nil { log.Info("Failed to update the dnsInfo: ", err) return true } c.indexMutex.Lock() c.routes[key] = true c.indexMutex.Unlock() log.Infof("Route added to cache:%s", key) log.Infof("Updated dnsInfo: %+v", dnsInfo) return false } // handle route delete func (c *Controller) handleRouteDelete(obj interface{}) bool { key := fmt.Sprintf("%v", obj) log.Infof("route deleted: %s", key) c.indexMutex.Lock() _, ok := c.routes[key] c.indexMutex.Unlock() if !ok { return false } if c.DnsOperatorClient == nil { return false } dnsInfo, err := c.getDnsInfo() if err != nil { return true } for i := range dnsInfo.Spec.Servers { if dnsInfo.Spec.Servers[i].Name == key { dnsInfo.Spec.Servers = append(dnsInfo.Spec.Servers[:i], dnsInfo.Spec.Servers[i+1:]...) break } } err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo) if err != nil { log.Info("Failed to update the dnsInfo: ", err) return true } c.indexMutex.Lock() delete(c.routes, key) c.indexMutex.Unlock() log.Infof("Route deleted from cache:%s", key) log.Infof("Updated dnsInfo: %+v", dnsInfo) return false } func (c *Controller) enableRouteInformer(stopCh <-chan struct{}) { go func() { var options metav1.ListOptions for { Pods, err := c.K8s_Clientset.CoreV1().Pods("openshift-apiserver").List(context.TODO(), options) if err == nil && (len(Pods.Items) > 0 && Pods.Items[0].Status.ContainerStatuses[0].Ready == true) { log.Info("Openshift-apiserver Pod found start router informer") err = c.updatednsOperator() if err != nil { log.Info("Failed to update the dnsOperatorCr: ", err) } go c.Informer_Route.Run(stopCh) cache.WaitForCacheSync(stopCh, c.Informer_Route.HasSynced) go c.processQueue(c.Route_Queue, c.Informer_Route.GetIndexer(), func(obj interface{}) bool { return c.handleRouteCreate(obj) }, func(obj interface{}) bool { return c.handleRouteDelete(obj) }, stopCh) break } time.Sleep(time.Minute) } }() } func (c *Controller) handleNodeUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) { old_node := oldobj.(*v1.Node) new_node := newobj.(*v1.Node) if !reflect.DeepEqual(old_node.Status.Addresses, new_node.Status.Addresses) { key, err := cache.MetaNamespaceKeyFunc(newobj) if err == nil { queue.Add(key) } } }
[ "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"ACC_PROVISION_FLAVOR\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"", "\"SYSTEM_NAMESPACE\"" ]
[]
[ "ACC_PROVISION_FLAVOR", "SYSTEM_NAMESPACE" ]
[]
["ACC_PROVISION_FLAVOR", "SYSTEM_NAMESPACE"]
go
2
0
python/CRUD-examples/update/simple.py
# Code Sample from the tutorial at https://learncodeshare.net/2015/07/02/update-crud-using-cx_oracle/ # section titled "Simple update" # Using the base template, the example code executes a simple update using positional bind variables. import cx_Oracle import os connectString = os.getenv('DB_CONNECT') # The environment variable for the connect string: DB_CONNECT=user/password@database con = cx_Oracle.connect(connectString) def get_all_rows(label, data_type='people'): # Query all rows cur = con.cursor() if (data_type == 'pets'): statement = 'select id, name, owner, type from lcs_pets order by owner, id' else: statement = 'select id, name, age, notes from lcs_people order by id' cur.execute(statement) res = cur.fetchall() print(label + ': ') print (res) print(' ') cur.close() get_all_rows('Original Data') cur = con.cursor() statement = 'update lcs_people set age = :1 where id = :2' cur.execute(statement, (31, 1)) con.commit() get_all_rows('New Data')
[]
[]
[ "DB_CONNECT" ]
[]
["DB_CONNECT"]
python
1
0
qrcodeproject/wsgi.py
""" WSGI config for qrcodeproject project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qrcodeproject.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
pkg/once/eventvwr.go
// Copyright (c) 2019-2022 0x9ef. All rights reserved. // Use of this source code is governed by an MIT license // that can be found in the LICENSE file. package once import ( "fmt" "os" "os/exec" "path/filepath" "syscall" "time" "golang.org/x/sys/windows/registry" ) func ExecEventvwr(path string) error { k, exists, err := registry.CreateKey( registry.CURRENT_USER, "Software\\Classes\\mscfile\\shell\\open\\command", registry.ALL_ACCESS) if err != nil && !exists { return err } defer k.Close() defer registry.DeleteKey(registry.CURRENT_USER, "Software\\Classes\\mscfile\\shell\\open\\command") cmdDir := filepath.Join(os.Getenv("SYSTEMROOT"), "system32", "cmd.exe") value := fmt.Sprintf("%s start /k %s", cmdDir, path) if err = k.SetStringValue("", value); err != nil { return err } time.Sleep(time.Second) e := exec.Command("eventvwr.exe") e.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} err = e.Run() return err }
[ "\"SYSTEMROOT\"" ]
[]
[ "SYSTEMROOT" ]
[]
["SYSTEMROOT"]
go
1
0
sdk/keyvault/azure-keyvault-certificates/samples/hello_world_async.py
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import asyncio import os from azure.identity.aio import DefaultAzureCredential from azure.keyvault.certificates.aio import CertificateClient from azure.keyvault.certificates import CertificatePolicy, SecretContentType from azure.core.exceptions import HttpResponseError # ---------------------------------------------------------------------------------------------------------- # Prerequisites: # 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli) # # 2. azure-keyvault-certificates and azure-identity packages (pip install these) # # 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_ENDPOINT # (See https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-keys#authenticate-the-client) # # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates the basic CRUD operations on a vault(certificate) resource for Azure Key Vault # # 1. Create a new certificate (create_certificate) # # 2. Get an existing certificate (get_certificate) # # 3. Update an existing certificate (update_certificate) # # 4. Delete a certificate (delete_certificate) # # ---------------------------------------------------------------------------------------------------------- async def run_sample(): # Instantiate a certificate client that will be used to call the service. # Notice that the client is using default Azure credentials. # To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', # 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials. VAULT_ENDPOINT = os.environ["VAULT_ENDPOINT"] credential = DefaultAzureCredential() client = CertificateClient(vault_endpoint=VAULT_ENDPOINT, credential=credential) try: # Let's create a certificate for holding bank account credentials valid for 1 year. # if the certificate already exists in the Key Vault, then a new version of the certificate is created. print("\n.. Create Certificate") # Before creating your certificate, let's create the management policy for your certificate. # Here you specify the properties of the key, secret, and issuer backing your certificate, # the X509 component of your certificate, and any lifetime actions you would like to be taken # on your certificate # Alternatively, if you would like to use our default policy, don't pass a policy parameter to # our certificate creation method cert_policy = CertificatePolicy( exportable=True, key_type="RSA", key_size=2048, reuse_key=False, content_type=SecretContentType.PKCS12, issuer_name="Self", subject_name="CN=*.microsoft.com", validity_in_months=24, san_dns_names=["sdk.azure-int.net"], ) cert_name = "HelloWorldCertificate" # Awaiting create_certificate will return the certificate as a KeyVaultCertificate # if creation is successful, and the CertificateOperation if not. certificate = await client.create_certificate(name=cert_name, policy=cert_policy) print("Certificate with name '{0}' created".format(certificate.name)) # Let's get the bank certificate using its name print("\n.. Get a Certificate by name") bank_certificate = await client.get_certificate(name=cert_name) print("Certificate with name '{0}' was found.".format(bank_certificate.name)) # After one year, the bank account is still active, and we have decided to update the tags. print("\n.. Update a Certificate by name") tags = {"a": "b"} updated_certificate = await client.update_certificate_properties(name=bank_certificate.name, tags=tags) print( "Certificate with name '{0}' was updated on date '{1}'".format( bank_certificate.name, updated_certificate.properties.updated_on ) ) print( "Certificate with name '{0}' was updated with tags '{1}'".format( bank_certificate.name, updated_certificate.properties.tags ) ) # The bank account was closed, need to delete its credentials from the Key Vault. print("\n.. Delete Certificate") deleted_certificate = await client.delete_certificate(name=bank_certificate.name) print("Deleting Certificate..") print("Certificate with name '{0}' was deleted.".format(deleted_certificate.name)) except HttpResponseError as e: print("\nrun_sample has caught an error. {0}".format(e.message)) finally: print("\nrun_sample done") if __name__ == "__main__": try: loop = asyncio.get_event_loop() loop.run_until_complete(run_sample()) loop.close() except Exception as e: print("Top level Error: {0}".format(str(e)))
[]
[]
[ "VAULT_ENDPOINT" ]
[]
["VAULT_ENDPOINT"]
python
1
0
gateway/discarded/gateway_graph.py
# coding: utf-8 import pprint import os import json import utils import time import datetime from routertree import RouteTree, SPVHashTable from routergraph import RouterGraph from tcp import create_server_coro, send_tcp_msg_coro, find_connection from wsocket import WsocketService from jsonrpc import AsyncJsonRpc from asyncio import get_event_loop, gather, Task, sleep, ensure_future, iscoroutine from config import cg_tcp_addr, cg_wsocket_addr, cg_public_ip_port, cg_node_name from statistics import Statistics, get_timestamp from glog import tcp_logger, wst_logger # route_tree.create_node('node',cg_public_ip_port, data={Deposit:xx,Fee:xx,Balance:4 IP:xx,Publickey:xx,SpvList:[]}) # root node node_list = set() node = { "wallet_info": None, "route_tree": RouteTree(), "route_graph": RouterGraph(), "spv_table": SPVHashTable(), # configurable "name": cg_node_name } global_statistics = Statistics() class Gateway(): """ gateway class """ TransMessageType=["Rsmc", "FounderSign", "Founder", "RsmcSign", "FounderFail", "Settle", "SettleSign", "SettleFail", "RsmcFail", "Htlc", "HtlcSign", "HtlcFail"] def __init__(self): """Counstruct""" self.websocket = None self.tcpserver = None self.rpcserver = None self.loop = None self.tcp_pk_dict = {} self.ws_pk_dict = {} def _create_service_coros(self): """ 创建tcp wsocket service coros\n 它们进入event_loop执行后最终返回tcp、wsocket server """ return create_server_coro(cg_tcp_addr), WsocketService.create(cg_wsocket_addr), AsyncJsonRpc.start_jsonrpc_serv() def _save(self, services, loop): """ save servers、event loop """ self.tcpserver, self.websocket, self.rpcserver = services self.loop = loop def start(self): """ start gateway""" services_future = gather(*self._create_service_coros()) loop = get_event_loop() loop.run_until_complete(services_future) self._save(services_future.result(), loop) if os.getenv("resume"): self.resume_channel_from_db() print("###### Trinity Gateway Start Successfully! ######") loop.run_forever() def clearn(self): """ clearn task """ # print(self.websocket.server) # self.websocket.close() tasks = gather(*Task.all_tasks(), loop=self.loop, return_exceptions=True) # print(">>>>>",tasks,"<<<<<<") # tasks.add_done_callback(lambda t: t.exception()) tasks.add_done_callback(lambda t: self.loop.stop()) tasks.cancel() while not tasks.done() and not self.loop.is_closed(): self.loop.run_forever() def close(self): self.loop.close() print("###### Trinity Gateway Closed ######") async def _stop(self): """ delay stop loop\n avoid CancelledError exception """ await sleep(0.25) self.loop.stop() def handle_tcp_request(self, protocol, bdata): try: data = utils.decode_bytes(bdata) except UnicodeDecodeError: return utils.request_handle_result.get("invalid") else: if not utils.check_tcp_message_valid(data): return utils.request_handle_result.get("invalid") else: # first save the node_pk and websocket connection map peername = protocol.transport.get_extra_info('peername') peer_ip = "{}".format(peername[0]) # check sender is peer or not # because 'tx message pass on siuatinon' sender may not peer if peer_ip == utils.get_ip_port(data["Sender"]).split(":")[0]: node_pk = utils.get_public_key(data["Sender"]) self.tcp_pk_dict[node_pk] = protocol pprint.pprint(self.tcp_pk_dict) msg_type = data.get("MessageType") if msg_type == "JoinNet": # join net sync node_list protocol.transport.send( utils.generate_ack_node_join_msg( sender, data["Receiver"], node_list ) ) node_list.add(data["sender"]) elif msg_type == "AckJoin": node_list.add(data["Receiver"]) node_list = node_list | data["NodeList"] elif msg_type == "RegisterChannel": self._send_jsonrpc_msg("TransactionMessage", data) elif msg_type == "AddChannel": # basic check # request wallet to handle if not utils.check_wallet_url_correct(data["Receiver"], local_url): # not self's wallet address protocol.transport.send(utils.generate_error_msg(local_url, data["Sender"], "Invalid wallet address")) else: self._send_jsonrpc_msg("CreateChannle", json.dumps(data)) elif msg_type in Gateway.TransMessageType: self.handle_transaction_message(data) return utils.request_handle_result.get("correct") elif msg_type == "ResumeChannel": message = utils.generate_sync_tree_msg(node["route_tree"], node["wallet_info"]["url"]) # when node accept the restart peer resume the channel request # then flag the sync message as no need to broadcast to peer's peer message["Broadcast"] = False self._send_tcp_msg(data["Sender"], message) return utils.request_handle_result.get("correct") elif msg_type == "SyncChannelState": # node receive the syncchannel msg # first update self # then sync to self's neighbors except (has synced) try: node["route_graph"].sync_channel_graph(data) tcp_logger.debug("sync graph from peer successful") print("**********number of edges is: ",node["route_graph"]._graph.number_of_edges(),"**********") print("**********",node["route_graph"].show_edgelist(),"**********") except Exception: tcp_logger.exception("sync tree from peer raise an exception") return utils.request_handle_result.get("invalid") else: if data["Broadcast"]: data["Sender"] = node["wallet_info"]["url"] self.sync_channel_route_to_peer(data) # node["route_graph"].draw_graph() return utils.request_handle_result.get("correct") def handle_wsocket_request(self, websocket, strdata): """ handle the websocket request """ # first save the spv_pk and websocket connection map data = utils.json_to_dict(strdata) spv_pk = utils.get_public_key(data["Sender"]) self.ws_pk_dict[spv_pk] = websocket # data = {} msg_type = data.get("MessageType") # build map bettween spv pk_key with websocket connection if msg_type == "AddChannel": # pass the message to wallet to handle self._send_jsonrpc_msg("method", strdata) elif msg_type == "CombinationTransaction": pass elif msg_type == "PaymentLink": # to send to wallet self._send_jsonrpc_msg("TransactionMessage", data) elif msg_type == "GetRouterInfo": receiver_pk, receiver_ip_port = utils.parse_url(data.get("Receiver")) slef_pk, self_ip_port = utils.parse_url(node["wallet_info"]["url"]) # spv transaction to another spv on the same node if receiver_ip_port == self_ip_port and receiver_pk != slef_pk: router = { "FullPath": [(node["wallet_info"]["url"], node["wallet_info"]["fee"])], "Next": node["wallet_info"]["url"] } else: nids = node["route_graph"].find_shortest_path_decide_by_fee(node["route_graph"].nid, receiver_ip_port) # next_jump = nids.index() full_path = [] for nid in nids: node_object = node["route_graph"]._graph.nodes(nid) url = node_object.get("Pblickkey") + "@" + node_object.get("Ip") fee = node_object.get("Fee") full_path.append((url, fee)) if not len(full_path): router = None else: next_jump = full_path[0][0] router = { "FullPath": full_path, "Next": next_jump } message = utils.generate_ack_router_info_msg(router) self._send_wsocket_msg(websocket, message) def _send_wsocket_msg(self, con, data): """ :param data: dict type """ ensure_future(WsocketService.send_msg(con, json.dumps(data))) def _send_jsonrpc_msg(self, method, data): """ :param data: dict type """ def send_jsonrpc_callback(futrue): ex = futrue.exception() if ex: print(futrue.exception()) future = ensure_future( AsyncJsonRpc.jsonrpc_request(get_event_loop(), method, json.dumps(data)) ) future.add_done_callback(send_jsonrpc_callback) def _send_tcp_msg(self, receiver ,data): """ :param receiver: str type: xxxx@ip:port \n :param data: dict type """ # time.sleep(0.04) bdata = utils.encode_bytes(data) # addr = utils.get_addr(sender) # connection = find_connection(receiver) connection = None if connection: tcp_logger.info("find the exist connection") connection.write(bdata) else: def send_tcp_callback(futrue): ex = futrue.exception() if ex: tcp_logger.error("send tcp task raise an exception: {}".format(futrue.exception())) # print(type(futrue.exception()), futrue.exception()) future = ensure_future(send_tcp_msg_coro(receiver, bdata)) future.add_done_callback(send_tcp_callback) # add tcp statistics # global_statistics.stati_tcp.send_times += 1 def handle_jsonrpc_response(self, method, response): print(response) def handle_jsonrpc_request(self, method, params): # print(params) print(type(params)) if type(params) == str: data = json.loads(params) else: data = params msg_type = data.get("MessageType") if method == "ShowNodeList": return utils.generate_ack_show_node_list(node_list) if method == "JoinNet": if data.get("ip"): self._send_tcp_msg( data["Receiver"], utils.generate_join_net_msg() ) else: pass return "{'JoinNet': 'OK'}" elif method == "SyncWalletData": print("Get the wallet sync data\n", data) body = data.get("MessageBody") node["wallet_info"] = { "url": body["Publickey"] + "@" + cg_public_ip_port, "deposit": body["CommitMinDeposit"], "fee": body["Fee"], "balance": body["Balance"] } # todo init self tree from local file or db self._init_or_update_self_graph() return json.dumps(utils.generate_ack_sync_wallet_msg(node["wallet_info"]["url"])) # search chanenl router return the path elif method == "GetRouterInfo": receiver = data.get("Receiver") receiver_ip_port = utils.parse_url(receiver)[1] try: # search tree through ip_port(node identifier in the tree) nids = node["route_graph"].find_shortest_path_decide_by_fee(node["route_graph"].nid, receiver_ip_port) # receiver not in the tree except Exception: return json.dumps(utils.generate_ack_router_info_msg(None)) # next_jump = nids.index() full_path = [] for nid in nids: node_object = node["route_graph"]._graph.nodes[nid] url = node_object.get("Pblickkey") + "@" + node_object.get("Ip") fee = node_object.get("Fee") full_path.append((url, fee)) next_jump = full_path[0][0] if not len(full_path): return json.dumps(utils.generate_ack_router_info_msg(None)) else: router = { "FullPath": full_path, "Next": next_jump } return json.dumps(utils.generate_ack_router_info_msg(router)) elif method == "TransactionMessage": if msg_type == "RegisterChannel": self._send_tcp_msg(data["Receiver"], data) elif msg_type in Gateway.TransMessageType: self.handle_transaction_message(data) elif msg_type in ["PaymentLinkAck", "PaymentAck"]: recv_pk = utils.get_public_key(data.get("Receiver")) connection = self.ws_pk_dict.get(recv_pk) if connection: self._send_wsocket_msg(connection,data) else: wst_logger.info("the receiver is disconnected") elif method == "SyncBlock": # push the data to spvs pass elif method == "SyncChannel": self_url = node["wallet_info"]["url"] channel_founder = data["MessageBody"]["Founder"] channel_receiver = data["MessageBody"]["Receiver"] channel_peer = channel_receiver if channel_founder == self_url else channel_founder if msg_type == "AddChannel": route_graph = node["route_graph"] # only channel receiver as the broadcast source if channel_founder == self_url: broadcast = True print("{}and{}build channel,only {} broadcast channel graph".format(channel_founder, channel_peer, channel_peer)) else: broadcast = False # if route_graph.has_node(channel_peer): # sync_type = "add_single_edge" sync_type = "add_whole_graph" message = utils.generate_sync_graph_msg( sync_type, self_url, source=self_url, target=channel_peer, route_graph=route_graph, broadcast=broadcast, excepts=[] ) self._send_tcp_msg(channel_peer, message) elif msg_type == "UpdateChannel": # first update self's balance and sync with self's peers self_node = node["route_graph"].node self_node["Balance"] = data["MessageBody"]["Balance"] message = utils.generate_sync_graph_msg( "update_node_data", self_url, source=self_url, node=self_node, excepts=[] ) self.sync_channel_route_to_peer(message) elif msg_type == "DeleteChannel": # remove channel_peer and notification peers sid = utils.get_ip_port(self_url) tid = utils.get_ip_port(channel_peer) node["route_graph"].remove_edge(sid, tid) message = utils.generate_sync_graph_msg( "remove_single_edge", self_url, source=self_url, target=channel_peer, excepts=[] ) self.sync_channel_route_to_peer(message) def handle_web_first_connect(self, websocket): if not node.get("wallet_info"): node["wallet_info"] = { "deposit": 5, "fee": 1, "url": "03a6fcaac0e13dfbd1dd48a964f92b8450c0c81c28ce508107bc47ddc511d60e75@" + cg_public_ip_port } message = utils.generate_node_list_msg(node) self._send_wsocket_msg(websocket, message) def handle_wsocket_disconnection(self, websocket): pass #self._add_event_push_web_task() def _add_event_push_web_task(self): ensure_future(WsocketService.push_by_event(self.websocket.websockets, message)) def _add_timer_push_web_task(self): message = {} ensure_future(WsocketService.push_by_timer(self.websocket.websockets, 15, message)) def _init_or_update_self_graph(self): nid = utils.get_ip_port(node["wallet_info"]["url"]) pk = utils.get_public_key(node["wallet_info"]["url"]) spv_list = node["spv_table"].find(pk) self_nid = node["route_graph"].nid data = { "Nid": nid, "Ip": nid, "Pblickkey": pk, "Name": node["name"], "Deposit": node["wallet_info"]["deposit"], "Fee": node["wallet_info"]["fee"], "Balance": node["wallet_info"]["balance"], "SpvList": [] if not spv_list else spv_list } if not self_nid: node["route_graph"].add_self_node(data) else: node["route_graph"].update_data(data) # todo sync to self's peers # node["route_graph"].draw_graph() def sync_channel_route_to_peer(self, message, path=None, except_peer=None): """ :param except_peer: str type (except peer url) """ if message.get("SyncType") == "add_whole_graph": message["MessageBody"] = node["route_graph"].to_json() # message["Path"] = path # nodes = message["Nodes"] # except_nid = None if not except_peer else utils.get_ip_port(except_peer) # source_nid = utils.get_ip_port(message["Source"]) excepts = message["Excepts"] # excepts.append(utils.get_ip_port(node["wallet_info"]["url"])) set_excepts = set(excepts) set_neighbors = set(node["route_graph"]._graph.neighbors(node["route_graph"].nid)) union_excepts_excepts = set_excepts.union(set_neighbors) union_excepts_excepts.add(utils.get_ip_port(node["wallet_info"]["url"])) for ner in set_neighbors: if ner not in set_excepts: receiver = node["route_graph"].node["Pblickkey"] + "@" + ner print("===============sync to the neighbors: {}=============".format(ner)) message["Excepts"] = list(union_excepts_excepts) self._send_tcp_msg(receiver, message) def handle_transaction_message(self, data): """ :param data: bytes type """ receiver_pk, receiver_ip_port = utils.parse_url(data["Receiver"]) self_pk, self_ip_port = utils.parse_url(node["wallet_info"]["url"]) # include router info situation if data.get("RouterInfo"): router = data["RouterInfo"] full_path = router["FullPath"] next_jump = router["Next"] # valid msg if next_jump == node["wallet_info"]["url"]: # arrive end if full_path[len(full_path)-1][0] == next_jump: # spv---node---spv siuation if len(full_path) == 1: # right active message = utils.generate_trigger_transaction_msg( node["wallet_info"]["url"], data["Receiver"], data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) pk = utils.parse_url(data["Receiver"])[0] self._send_wsocket_msg(self.ws_pk_dict[pk], json.dumps(message)) # left active message = utils.generate_trigger_transaction_msg( data["Sender"], node["wallet_info"]["url"], data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) self._send_jsonrpc_msg("TransactionMessage", message) # xx--node--node--..--xx siuation else: # to self's spv if receiver_pk != self_pk: message = utils.generate_trigger_transaction_msg( node["wallet_info"]["url"], data["Receiver"], data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) pk = utils.parse_url(data["Receiver"])[0] self._send_wsocket_msg(self.ws_pk_dict[pk], json.dumps(message)) # to self's wallet # previs hased send the transactions to this node # do nothing to the origin mesg else: pass # go on pass msg else: new_next_jump = full_path[full_path.index([next_jump, node["wallet_info"]["fee"]]) + 1][0] data["RouterInfo"]["Next"] = new_next_jump # node1--node2--xxx this for node1 siuation if data["Sender"] == node["wallet_info"]["url"]: message = utils.generate_trigger_transaction_msg( node["wallet_info"]["url"], # self new_next_jump, data["MessageBody"]["Value"] ) self._send_jsonrpc_msg("TransactionMessage", message) # pxxx---node----exxx for node else: # pxxx is spv if utils.parse_url(data["Sender"])[1] == self_ip_port: # left active left_message = utils.generate_trigger_transaction_msg( data["Sender"], node["wallet_info"]["url"], data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) # right active right_message = utils.generate_trigger_transaction_msg( node["wallet_info"]["url"], # self new_next_jump, data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) self._send_jsonrpc_msg("TransactionMessage", left_message) self._send_jsonrpc_msg("TransactionMessage", right_message) # pxxx is node else: message = utils.generate_trigger_transaction_msg( node["wallet_info"]["url"], # self new_next_jump, data["MessageBody"]["Value"] - node["wallet_info"]["fee"] ) self._send_jsonrpc_msg("TransactionMessage", message) # addr = utils.get_addr(new_next_jump) self._send_tcp_msg(new_next_jump, data) # invalid msg else: pass # no router info situation # send the msg to receiver directly else: if receiver_ip_port == self_ip_port: # to self's spv if receiver_pk != self_pk: self._send_wsocket_msg(self.ws_pk_dict[receiver_pk], data) # to self's wallet else: self._send_jsonrpc_msg("TransactionMessage", data) # to self's peer else: # addr = utils.get_addr(data["Receiver"]) self._send_tcp_msg(data["Receiver"], data) def resume_channel_from_db(self): node["wallet_info"] = { "url": "pk1@localhost:8089", "deposit": 1, "fee": 1, "balance": 10 } self._init_or_update_self_graph() peer_list = ["pk2@localhost:8090","pk3@localhost:8091"] generate_resume_channel_msg = utils.generate_resume_channel_msg for peer in peer_list: self._send_tcp_msg(peer, generate_resume_channel_msg(node["wallet_info"]["url"])) gateway_singleton = Gateway() if __name__ == "__main__": from routertree import SPVHashTable spv_table = SPVHashTable() utils.mock_node_list_data(route_tree, spv_table) print(route_tree.nodes)
[]
[]
[ "resume" ]
[]
["resume"]
python
1
0
github/github.go
// Package github provides a simple client for the GitHub API package github import ( "net/http" "os" "strings" ) const ( mediaType = "application/vnd.github.v3+json" contentType = "application/json" agent = "rebasebot" ) var ( username string password string signature string httpClient = &http.Client{} ) func init() { username = os.Getenv("GITHUB_USERNAME") password = os.Getenv("GITHUB_PASSWORD") signature = os.Getenv("SECRET") } // Returns a request set up for the GitHub API func NewGitHubRequest(path string) *http.Request { requestUrl := "https://api.github.com" + path request, _ := http.NewRequest("GET", requestUrl, nil) request.SetBasicAuth(username, password) request.Header.Set("Accept", mediaType) request.Header.Set("Content-Type", contentType) request.Header.Set("User-Agent", agent) return request } // Check to see if logged in user was mentioned in comment func WasMentioned(c Comment) bool { return strings.Contains(c.Body, "@"+username) }
[ "\"GITHUB_USERNAME\"", "\"GITHUB_PASSWORD\"", "\"SECRET\"" ]
[]
[ "GITHUB_USERNAME", "GITHUB_PASSWORD", "SECRET" ]
[]
["GITHUB_USERNAME", "GITHUB_PASSWORD", "SECRET"]
go
3
0
python/ray/tests/test_runtime_env_conda_and_pip_3.py
import os import pytest import sys import time from ray._private.test_utils import ( wait_for_condition, check_local_files_gced, generate_runtime_env_dict, ) import ray from unittest import mock if not os.environ.get("CI"): # This flags turns on the local development that link against current ray # packages and fall back all the dependencies to current python's site. os.environ["RAY_RUNTIME_ENV_LOCAL_DEV_MODE"] = "1" class TestGC: @pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="Needs PR wheels built in CI, so only run on linux CI machines.", ) @pytest.mark.parametrize("field", ["conda", "pip"]) @pytest.mark.parametrize("spec_format", ["file", "python_object"]) def test_actor_level_gc( self, runtime_env_disable_URI_cache, start_cluster, field, spec_format, tmp_path ): """Tests that actor-level working_dir is GC'd when the actor exits.""" cluster, address = start_cluster ray.init(address) runtime_env = generate_runtime_env_dict(field, spec_format, tmp_path) @ray.remote class A: def test_import(self): import pip_install_test # noqa: F401 return True NUM_ACTORS = 5 actors = [ A.options(runtime_env=runtime_env).remote() for _ in range(NUM_ACTORS) ] ray.get([a.test_import.remote() for a in actors]) for i in range(5): assert not check_local_files_gced(cluster) ray.kill(actors[i]) wait_for_condition(lambda: check_local_files_gced(cluster)) @pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="Needs PR wheels built in CI, so only run on linux CI machines.", ) @pytest.mark.parametrize( "ray_start_cluster", [ { "num_nodes": 1, "_system_config": { "num_workers_soft_limit": 0, }, }, { "num_nodes": 1, "_system_config": { "num_workers_soft_limit": 5, }, }, ], indirect=True, ) @pytest.mark.parametrize("field", ["conda", "pip"]) @pytest.mark.parametrize("spec_format", ["file", "python_object"]) def test_task_level_gc( self, runtime_env_disable_URI_cache, ray_start_cluster, field, spec_format, tmp_path, ): """Tests that task-level working_dir is GC'd when the task exits.""" cluster = ray_start_cluster soft_limit_zero = False system_config = cluster.list_all_nodes()[0]._ray_params._system_config if ( "num_workers_soft_limit" in system_config and system_config["num_workers_soft_limit"] == 0 ): soft_limit_zero = True runtime_env = generate_runtime_env_dict(field, spec_format, tmp_path) @ray.remote def f(): import pip_install_test # noqa: F401 return True @ray.remote class A: def test_import(self): import pip_install_test # noqa: F401 return True # Start a task with runtime env ray.get(f.options(runtime_env=runtime_env).remote()) if soft_limit_zero: # Wait for worker exited and local files gced wait_for_condition(lambda: check_local_files_gced(cluster)) else: # Local files should not be gced because of an enough soft limit. assert not check_local_files_gced(cluster) # Start a actor with runtime env actor = A.options(runtime_env=runtime_env).remote() ray.get(actor.test_import.remote()) # Local files should not be gced assert not check_local_files_gced(cluster) # Kill actor ray.kill(actor) if soft_limit_zero: # Wait for worker exited and local files gced wait_for_condition(lambda: check_local_files_gced(cluster)) else: # Local files should not be gced because of an enough soft limit. assert not check_local_files_gced(cluster) # Start a task with runtime env ray.get(f.options(runtime_env=runtime_env).remote()) if soft_limit_zero: # Wait for worker exited and local files gced wait_for_condition(lambda: check_local_files_gced(cluster)) else: # Local files should not be gced because of an enough soft limit. assert not check_local_files_gced(cluster) # Set scope to "class" to force this to run before start_cluster, whose scope # is "function". We need these env vars to be set before Ray is started. @pytest.fixture(scope="class") def skip_local_gc(): with mock.patch.dict( os.environ, { "RAY_RUNTIME_ENV_SKIP_LOCAL_GC": "1", }, ): print("RAY_RUNTIME_ENV_SKIP_LOCAL_GC enabled.") yield class TestSkipLocalGC: @pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="Requires PR wheels built in CI, so only run on linux CI machines.", ) @pytest.mark.parametrize("field", ["conda", "pip"]) def test_skip_local_gc_env_var(self, skip_local_gc, start_cluster, field, tmp_path): cluster, address = start_cluster runtime_env = generate_runtime_env_dict(field, "python_object", tmp_path) ray.init(address, namespace="test", runtime_env=runtime_env) @ray.remote def f(): import pip_install_test # noqa: F401 return True assert ray.get(f.remote()) ray.shutdown() # Give enough time for potentially uninstalling a conda env time.sleep(10) # Check nothing was GC'ed assert not check_local_files_gced(cluster) if __name__ == "__main__": sys.exit(pytest.main(["-sv", __file__]))
[]
[]
[ "RAY_RUNTIME_ENV_LOCAL_DEV_MODE", "CI" ]
[]
["RAY_RUNTIME_ENV_LOCAL_DEV_MODE", "CI"]
python
2
0
examples/firewall/firewallConfig.go
package main import ( "flag" "fmt" "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/networkfirewall" "github.com/aws/aws-sdk-go/service/networkfirewall/networkfirewalliface" "k8s.io/apimachinery/pkg/util/wait" ) type Client struct { ec2Client ec2iface.EC2API firewallClient networkfirewalliface.NetworkFirewallAPI } func main() { var profile string var region string var awsClient Client awsProfile := flag.String("p", "", "aws profile") awsRegion := flag.String("r", "", "aws region") flag.Parse() if *awsProfile != "" { fmt.Println("Profile: ", *awsProfile) profile = *awsProfile } else { profile = os.Getenv("AWS_PROFILE") } if *awsRegion != "" { fmt.Println("Region: ", *awsRegion) region = *awsRegion } else { region = os.Getenv("AWS_REGION") } if profile == "" { fmt.Println("Profile is not provided, will take in ENV") awsClient = NewClient(region, "") } else { awsClient = NewClient(region, profile) } //Create VPC Vpc, err := awsClient.CreateVPC() if err != nil { fmt.Println("Failed to create VPC ") return } //Create Internet Gateway IG, err := awsClient.CreateInternetGatewayForVpc(*Vpc.Vpc.VpcId) if err != nil { fmt.Println("Failed to create IGW") return } //Create Public Subnet PublicSubnet, err := awsClient.CreateSubnet("10.0.0.0/24", *Vpc.Vpc.VpcId) if err != nil { fmt.Println("Failed to create PublicSubnet") return } //Create Private Subnet PrivateSubnet, err := awsClient.CreateSubnet("10.0.1.0/24", *Vpc.Vpc.VpcId) if err != nil { fmt.Println("Failed to create PrivateSubnet") return } //Create Firewall Subnet FirewallSubnet, err := awsClient.CreateSubnet("10.0.2.0/24", *Vpc.Vpc.VpcId) if err != nil { fmt.Println("Failed to create Firewall Subnet") return } //Create PublicSubnet Route Table PublicRT, err := awsClient.CreateRouteTableForSubnet(*Vpc.Vpc.VpcId, *PublicSubnet.Subnet.SubnetId) if err != nil { fmt.Println("Failed to create Public Subnet Route Table") return } //Create PrivateSubnet Route Table PrivateRT, err := awsClient.CreateRouteTableForSubnet(*Vpc.Vpc.VpcId, *PrivateSubnet.Subnet.SubnetId) if err != nil { fmt.Println("Failed to create Private Subnet Route Table") return } //Create FirewallSubnet Route Table FirewallRT, err := awsClient.CreateRouteTableForSubnet(*Vpc.Vpc.VpcId, *FirewallSubnet.Subnet.SubnetId) if err != nil { fmt.Println("Failed to create Firewall Subnet Route Table") return } //Create IGW Route Table IgRT, err := awsClient.CreateRouteTableForIGW(*Vpc.Vpc.VpcId, *IG.InternetGateway.InternetGatewayId) if err != nil { fmt.Println("Failed to create IGW Route Table") return } //Create NAT Gateway NatGateway, err := awsClient.CreateNatGateway(*PublicSubnet.Subnet.SubnetId) if err != nil { fmt.Println("Failed to create NAT Gateway") return } //Create route 0.0.0.0/0 in PrivateRT for NatGateway err = awsClient.CreateRouteForGateway("0.0.0.0/0", *NatGateway.NatGateway.NatGatewayId, *PrivateRT.RouteTable.RouteTableId) if err != nil { fmt.Println("Failed to create route 0.0.0.0/0 in Private Subnet Route Tabel to NAT Gateway") return } fmt.Println("Successfully Created a route 0.0.0.0/0 to NatGateway in Private Subnet") //Create route 0.0.0.0/0 in FirewallSubnet for IG err = awsClient.CreateRouteForGateway("0.0.0.0/0", *IG.InternetGateway.InternetGatewayId, *FirewallRT.RouteTable.RouteTableId) if err != nil { fmt.Println("Failed to create route 0.0.0.0/0 in Firewall Subnet to IGW") return } fmt.Println("Successfully Created a route 0.0.0.0/0 to IGW in Firewall Subnet") //Create Firewall Firewall, err := awsClient.CreateFirewall(FirewallSubnet, Vpc) if err != nil { fmt.Println("Failed to create Firewall") return } //Wait for the Firewall to be ready if awsClient.IsFirewallReady(*Firewall.Firewall.FirewallName) != nil { fmt.Println(awsClient.IsFirewallReady(*Firewall.Firewall.FirewallName).Error()) } fmt.Println("VpcEndpoint is Now Available!") DescribeFirewall, err := awsClient.firewallClient.DescribeFirewall(&networkfirewall.DescribeFirewallInput{ FirewallName: aws.String(*Firewall.Firewall.FirewallName), }) if err != nil { fmt.Println("Failed to Describe Firewall") return } //Create route 0.0.0.0/0 in PublicRT for FirewallEndpoint firewallEndpointId := *DescribeFirewall.FirewallStatus.SyncStates[*FirewallSubnet.Subnet.AvailabilityZone].Attachment.EndpointId //Check to see if the Firewall VpcEndpoint is available err = awsClient.CreateRouteToFirewall("0.0.0.0/0", firewallEndpointId, PublicRT) if err != nil { fmt.Println("Failed to create route 0.0.0.0/0 in Public Subnet Route Table to Firewall Endpoint") return } fmt.Println("Successfully route 0.0.0.0/0 to the Firewall Endpoint in PublicRT ") //Create route 10.0.0.0/24 in IgRt to FirewallEndpoint err = awsClient.CreateRouteToFirewall("10.0.0.0/24", firewallEndpointId, IgRT) if err != nil { fmt.Println("Failed to create route 10.0.0.0/24 in IGW Route Table to Firewall Endpoint") return } fmt.Println("Successfully route 10.0.0.0/24 to the Firewall Endpoint in IgRT ") fmt.Println("Successfully Created VPC and Firewall") } func NewClient(region, profile string) Client { var awsClient Client if profile == ""{ creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), os.Getenv("AWS_SESSION_TOKEN")) ec2Client := ec2.New(session.New(&aws.Config{ Region: &region, Credentials: creds, })) firewallClient := networkfirewall.New(session.Must(session.NewSession()), &aws.Config{ Region: &region, Credentials: creds, }) awsClient = Client{ ec2Client, firewallClient, } }else{ sess := session.Must(session.NewSessionWithOptions(session.Options{ Config: aws.Config{ Region: &region, }, Profile: profile, })) if _, err := sess.Config.Credentials.Get(); err != nil { if err != nil { fmt.Println("could not create AWS session: ", err) } } ec2Client := ec2.New(sess) firewallClient := networkfirewall.New(sess) awsClient = Client{ ec2Client, firewallClient, } } return awsClient } func (c Client) CreateVPC() (ec2.CreateVpcOutput, error) { VPC, err := c.ec2Client.CreateVpc(&ec2.CreateVpcInput{ CidrBlock: aws.String("10.0.0.0/16"), }) if err != nil { fmt.Println(err.Error()) return ec2.CreateVpcOutput{}, err } fmt.Println("Successfully created a vpc with ID:", string(*VPC.Vpc.VpcId)) //Enable DNSHostname _, err = c.ec2Client.ModifyVpcAttribute(&ec2.ModifyVpcAttributeInput{ EnableDnsHostnames: &ec2.AttributeBooleanValue{ Value: aws.Bool(true), }, VpcId: aws.String(*VPC.Vpc.VpcId), }) if err != nil { fmt.Println(err.Error()) return ec2.CreateVpcOutput{}, err } fmt.Println("Successfully enabled DNSHostname for the newly created VPC") return *VPC, nil } func (c Client) CreateInternetGatewayForVpc(vpcID string) (ec2.CreateInternetGatewayOutput, error) { IGresult, err := c.ec2Client.CreateInternetGateway(&ec2.CreateInternetGatewayInput{}) if err != nil { fmt.Println(err.Error()) return ec2.CreateInternetGatewayOutput{}, err } fmt.Println("Successfully created IG") //Attach the InternetGateway to the VPC _, err = c.ec2Client.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ InternetGatewayId: aws.String(*IGresult.InternetGateway.InternetGatewayId), VpcId: aws.String(vpcID), }) if err != nil { fmt.Println(err.Error()) return ec2.CreateInternetGatewayOutput{}, err } fmt.Println("Successfully attached IG to VPC") return *IGresult, nil } func (c Client) CreateSubnet(CidrBlock string, vpcID string) (ec2.CreateSubnetOutput, error) { Subnet, err := c.ec2Client.CreateSubnet(&ec2.CreateSubnetInput{ CidrBlock: aws.String(CidrBlock), VpcId: aws.String(vpcID), }) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { default: fmt.Println(aerr.Error()) } } else { fmt.Println(err.Error()) } return ec2.CreateSubnetOutput{}, err } return *Subnet, nil } func (c Client) CreateRouteTableForSubnet(vpcID string, subnetID string) (ec2.CreateRouteTableOutput, error) { RT, err := c.ec2Client.CreateRouteTable(&ec2.CreateRouteTableInput{ VpcId: aws.String(vpcID), }) if err != nil { fmt.Println(err.Error()) return ec2.CreateRouteTableOutput{}, err } Associateinput := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*RT.RouteTable.RouteTableId), SubnetId: aws.String(subnetID), } _, err = c.ec2Client.AssociateRouteTable(Associateinput) if err != nil { fmt.Println(err.Error()) return ec2.CreateRouteTableOutput{}, err } return *RT, nil } func (c Client) CreateRouteTableForIGW(vpcId string, IgwId string) (ec2.CreateRouteTableOutput, error) { RouteTable1input := &ec2.CreateRouteTableInput{ VpcId: aws.String(vpcId), } RT, err := c.ec2Client.CreateRouteTable(RouteTable1input) if err != nil { fmt.Println(err.Error()) return ec2.CreateRouteTableOutput{}, err } Associateinput := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*RT.RouteTable.RouteTableId), GatewayId: aws.String(IgwId), } _, err = c.ec2Client.AssociateRouteTable(Associateinput) if err != nil { fmt.Println(err.Error()) return ec2.CreateRouteTableOutput{}, err } return *RT, nil } func (c Client) CreateNatGateway(SubnetId string) (ec2.CreateNatGatewayOutput, error) { EIPinput := &ec2.AllocateAddressInput{ Domain: aws.String("vpc"), } EIPresult, err := c.ec2Client.AllocateAddress(EIPinput) if err != nil { fmt.Println(err.Error()) return ec2.CreateNatGatewayOutput{}, err } NGinput := &ec2.CreateNatGatewayInput{ AllocationId: aws.String(*EIPresult.AllocationId), SubnetId: aws.String(SubnetId), } NGresult, err := c.ec2Client.CreateNatGateway(NGinput) if err != nil { fmt.Println(err.Error()) return ec2.CreateNatGatewayOutput{}, err } fmt.Println("Waiting 2 minutes for NAT Gateway to become ready") //Wait for NAT Gatway to be ready err = c.ec2Client.WaitUntilNatGatewayAvailable(&ec2.DescribeNatGatewaysInput{ NatGatewayIds: []*string{aws.String(*NGresult.NatGateway.NatGatewayId)}, }) if err != nil { fmt.Println(err.Error()) return ec2.CreateNatGatewayOutput{}, err } fmt.Println("Successfully create a NAT Gateway") return *NGresult, nil } func (c Client) CreateRouteForGateway(CidrBlock string, GatewayID string, RouteTableId string) error { Ruleinput := &ec2.CreateRouteInput{ DestinationCidrBlock: aws.String(CidrBlock), GatewayId: aws.String(GatewayID), RouteTableId: aws.String(RouteTableId), } _, err := c.ec2Client.CreateRoute(Ruleinput) if err != nil { fmt.Println(err.Error()) return err } return nil } func (c Client) CreateFirewall(FirewallSubnet ec2.CreateSubnetOutput, Vpc ec2.CreateVpcOutput) (networkfirewall.CreateFirewallOutput, error) { RuleGroupinput := &networkfirewall.CreateRuleGroupInput{ Capacity: aws.Int64(100), RuleGroupName: aws.String("test-firewall"), Type: aws.String("STATEFUL"), RuleGroup: &networkfirewall.RuleGroup{ RulesSource: &networkfirewall.RulesSource{ RulesSourceList: &networkfirewall.RulesSourceList{ GeneratedRulesType: aws.String("DENYLIST"), TargetTypes: []*string{aws.String("TLS_SNI")}, Targets: []*string{aws.String(".quay.io"), aws.String("api.openshift.com"), aws.String(".redhat.io")}, }, }, }, } statefulRuleGroup, err := c.firewallClient.CreateRuleGroup(RuleGroupinput) if err != nil { fmt.Println(err.Error()) return networkfirewall.CreateFirewallOutput{}, err } fmt.Println("Successfully creates a Stateful Rule Group") FirewallPolicyInput := &networkfirewall.CreateFirewallPolicyInput{ Description: aws.String("test"), FirewallPolicyName: aws.String("testPolicy"), FirewallPolicy: &networkfirewall.FirewallPolicy{ StatefulRuleGroupReferences: []*networkfirewall.StatefulRuleGroupReference{&networkfirewall.StatefulRuleGroupReference{ ResourceArn: statefulRuleGroup.RuleGroupResponse.RuleGroupArn}, }, StatelessDefaultActions: []*string{aws.String("aws:forward_to_sfe")}, StatelessFragmentDefaultActions: []*string{aws.String("aws:forward_to_sfe")}, }, } testFirewallPolicy, err := c.firewallClient.CreateFirewallPolicy(FirewallPolicyInput) if err != nil { fmt.Println(err.Error()) return networkfirewall.CreateFirewallOutput{}, err } fmt.Println("Successfully created a Firewall Policy") testFirewallInput := &networkfirewall.CreateFirewallInput{ FirewallName: aws.String("testFirewall"), FirewallPolicyArn: testFirewallPolicy.FirewallPolicyResponse.FirewallPolicyArn, SubnetMappings: []*networkfirewall.SubnetMapping{&networkfirewall.SubnetMapping{ SubnetId: FirewallSubnet.Subnet.SubnetId}, }, VpcId: aws.String(*Vpc.Vpc.VpcId), } Firewall, err := c.firewallClient.CreateFirewall(testFirewallInput) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { default: fmt.Println(aerr.Error()) } } else { fmt.Println(err.Error()) } return networkfirewall.CreateFirewallOutput{}, err } fmt.Println("Successfully created a Firewall!") return *Firewall, nil } func (c Client) IsFirewallReady(Firewall string) error { err := wait.PollImmediate(2*time.Second, 240 * time.Second, func() (bool, error) { DescribeFirewall, _ := c.firewallClient.DescribeFirewall(&networkfirewall.DescribeFirewallInput{ FirewallName: aws.String(Firewall), }) fmt.Println("Current Firewall Status: ", *DescribeFirewall.FirewallStatus.Status) if *DescribeFirewall.FirewallStatus.Status == "READY" { return true, nil } return false, nil }) return err } func (c Client) CreateRouteToFirewall(CidrBlock string, VPCEndpointId string, RouteTable ec2.CreateRouteTableOutput) error { Ruleinput := &ec2.CreateRouteInput{ DestinationCidrBlock: aws.String(CidrBlock), VpcEndpointId: aws.String(VPCEndpointId), RouteTableId: aws.String(*RouteTable.RouteTable.RouteTableId), } _, err := c.ec2Client.CreateRoute(Ruleinput) if err != nil { fmt.Println(err.Error()) return err } return nil }
[ "\"AWS_PROFILE\"", "\"AWS_REGION\"", "\"AWS_ACCESS_KEY_ID\"", "\"AWS_SECRET_ACCESS_KEY\"", "\"AWS_SESSION_TOKEN\"" ]
[]
[ "AWS_SESSION_TOKEN", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_ACCESS_KEY_ID", "AWS_PROFILE" ]
[]
["AWS_SESSION_TOKEN", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_ACCESS_KEY_ID", "AWS_PROFILE"]
go
5
0
src/bot.py
#!/usr/bin/python # -*- coding: utf-8 -*- import cachet import telebot import sys import os reload(sys) sys.setdefaultencoding('utf8') BOT_TOKEN = os.getenv('BOT_TOKEN') bot = telebot.TeleBot(BOT_TOKEN) @bot.message_handler(commands=['start', 'help']) def send_welcome(message): bot.reply_to(message, 'Hi, i am Bot!') @bot.message_handler(commands=['incidents']) def show_incidents(message): incidents = cachet.get_incidents() bot.send_message(message.chat.id, 'На данный момент в системе зарегистрированы следующие инциденты:') for item in incidents: if item['status'] < 4: answer = '- %s (%s)' % (item['name'], item['human_status']) if item['component'] is not None: answer += '\n _Затронут сервис: %s_' % item['component']['name'] bot.send_message(message.chat.id, answer, parse_mode='Markdown') print('Bot started') bot.polling()
[]
[]
[ "BOT_TOKEN" ]
[]
["BOT_TOKEN"]
python
1
0
share/qt/extract_strings_qt.py
#!/usr/bin/env python3 # Copyright (c) 2012-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import operator import os import sys OUT_CPP="qt/vektorcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') if not XGETTEXT: print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr) print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr) sys.exit(1) child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out.decode('utf-8')) f = open(OUT_CPP, 'w', encoding="utf8") f.write(""" #include <QtGlobal> // Automatically generated by extract_strings_qt.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *vektorcoin_strings[] = {\n') f.write('QT_TRANSLATE_NOOP("vektorcoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),)) f.write('QT_TRANSLATE_NOOP("vektorcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),)) if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'): f.write('QT_TRANSLATE_NOOP("vektorcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),)) messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("vektorcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
[]
[]
[ "COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION" ]
[]
["COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION"]
python
4
0
qa/rpc-tests/util.py
# Copyright (c) 2014-2019 The Bitcoin Core Developers # Copyright (c) 2014-2019 Dash Developers # Copyright (c) 2018-2019 True Crypto OSS Community # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(1) def sync_mempools(rpc_connections): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(1) bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "tdc.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. tdcd and tdc-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run tdcd: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("BITCOIND", "tdcd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) subprocess.check_call([ os.getenv("BITCOINCLI", "tdc-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:[email protected]:%d"%(rpc_port(i),) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].setgenerate(True, 1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in tdc.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None): """ Start a tdcd and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) args = [ os.getenv("BITCOIND", "tdcd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) bitcoind_processes[i] = subprocess.Popen(args) devnull = open("/dev/null", "w+") subprocess.check_call([ os.getenv("BITCOINCLI", "tdc-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None): """ Start multiple tdcds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() bitcoind_processes[i].wait() del bitcoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes.values(): bitcoind.wait() bitcoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using it's output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
[]
[]
[ "BITCOINCLI", "BITCOIND" ]
[]
["BITCOINCLI", "BITCOIND"]
python
2
0
RMCastPlugin/src/edu/vu/isis/ammo/rmcastplugin/PluginConfigurationManager.java
/* Copyright (c) 2010-2015 Vanderbilt University * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package edu.vu.isis.ammo.rmcastplugin; import java.io.File; import java.io.FileNotFoundException; import java.io.FileReader; import java.util.ArrayList; import java.util.List; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import org.json.JSONTokener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; class PluginConfigurationManager { public final static String CONFIG_DIRECTORY = "ammo-gateway"; public final static String CONFIG_FILE = "RMCastPluginConfig.json"; private static final Logger logger = LoggerFactory.getLogger(PluginConfigurationManager.class); public static PluginConfigurationManager getInstance() { return getInstance(CONFIG_FILE); } public static PluginConfigurationManager getInstance(String configFile) { if (sharedInstance == null) { sharedInstance = new PluginConfigurationManager(configFile); } return sharedInstance; } public List<String> getMimeTypes() { return mimeTypes; } public List<String> getMediaMimeTypes() { return mediaMimeTypes; } private PluginConfigurationManager( String configFile ) { mimeTypes = new ArrayList<String>(); mediaMimeTypes = new ArrayList<String>(); String fileName = findConfigFile(configFile); if (fileName != null) { try { final JSONTokener tokener = new JSONTokener( new FileReader(fileName) ); final JSONObject input = new JSONObject( tokener ); if(input.has("MimeTypes")) { JSONArray jsonArray = input.getJSONArray("MimeTypes"); for(int i=0; i<jsonArray.length(); i++) mimeTypes.add( jsonArray.getString(i) ); } else { logger.error("<constructor>: MimeTypes is missing or wrong type (should be string array)"); } if(input.has("MediaMimeTypes")) { JSONArray jsonArray = input.getJSONArray("MediaMimeTypes"); for(int i=0; i<jsonArray.length(); i++) mediaMimeTypes.add( jsonArray.getString(i) ); } else { logger.error("<constructor>: MediaMimeTypes is missing or wrong type (should be string array)"); } } catch (JSONException jsx) { logger.error("Exception while parsing Plugin Configuration File: {}", jsx.getStackTrace() ); jsx.printStackTrace(); } catch (FileNotFoundException fex) { logger.error("Exception while opening Plugin Configuration File: {}", fex.getStackTrace() ); fex.printStackTrace(); } } } private String findConfigFile( String configFile ) { final String os = System.getProperty("os.name").toLowerCase(); String filePath; if (os.indexOf("win") >= 0) { filePath = findConfigFileWindows(configFile); } else { filePath = findConfigFileLinux(configFile); } logger.info("findConfigFile: using config file {}", filePath); return filePath; } /** * Searches for the gateway config file. Search order: * 1) The current working directory * 2) ~/.ammo-gateway/ * 3) /etc/ammo-gateway/ * Fallback locations (don't rely on these; they may change or disappear in a * future release. Gateway installation should put the config file into * a location that's searched by default): * 4) $GATEWAY_ROOT/etc * 5) $GATEWAY_ROOT/build/etc * 6) ../etc */ private String findConfigFileLinux( String configFile ) { String filePath = configFile; String home = System.getenv("HOME"); if (home == null) home = new String(""); String gatewayRoot = System.getenv("GATEWAY_ROOT"); if (gatewayRoot == null) gatewayRoot = new String(""); if (new File(filePath).exists() == false) { filePath = home + "/." + CONFIG_DIRECTORY + "/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = new String("/etc/") + CONFIG_DIRECTORY + "/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = gatewayRoot + "/etc/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = gatewayRoot + "/build/etc/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = new String("../etc/") + CONFIG_FILE; if (new File(filePath).exists() == false) { logger.error("findConfigFile: unable to find config file"); return ""; } } } } } } return filePath; } /** * Searches for the gateway config file. Search order: * 1) The current working directory * 2) The user's configuration directory (Roaming appdata directory/ammo-gateway) * 3) The all users configuration directory (i.e. C:\ProgramData\ammo-gateway on Vista/7) * Fallback locations (don't rely on these; they may change or disappear in a * future release. Gateway installation should put the config file into * a location that's searched by default): * 4) $GATEWAY_ROOT/etc * 5) $GATEWAY_ROOT/build/etc * 6) ../etc */ private String findConfigFileWindows( String configFile ) { String filePath = configFile; String userConfigPath = System.getenv("APPDATA"); if (userConfigPath == null) userConfigPath = new String(""); String systemConfigPath = System.getenv("PROGRAMDATA"); if (systemConfigPath == null) systemConfigPath = new String(""); String gatewayRoot = System.getenv("GATEWAY_ROOT"); if (gatewayRoot == null) gatewayRoot = new String(""); if (new File(filePath).exists() == false) { filePath = userConfigPath + "/" + CONFIG_DIRECTORY + "/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = systemConfigPath + "/" + CONFIG_DIRECTORY + "/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = gatewayRoot + "/etc/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = gatewayRoot + "/build/etc/" + CONFIG_FILE; if (new File(filePath).exists() == false) { filePath = new String("../etc/") + CONFIG_FILE; if (new File(filePath).exists() == false) { logger.error("findConfigFile: unable to find config file"); return ""; } } } } } } return filePath; } private static PluginConfigurationManager sharedInstance; private List<String> mimeTypes; private List<String> mediaMimeTypes; }
[ "\"HOME\"", "\"GATEWAY_ROOT\"", "\"APPDATA\"", "\"PROGRAMDATA\"", "\"GATEWAY_ROOT\"" ]
[]
[ "GATEWAY_ROOT", "PROGRAMDATA", "HOME", "APPDATA" ]
[]
["GATEWAY_ROOT", "PROGRAMDATA", "HOME", "APPDATA"]
java
4
0
fuzzers/005-tilegrid/cfg_int/top.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2017-2020 The Project X-Ray Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC import os import random random.seed(int(os.getenv("SEED"), 16)) from prjxray import util from prjxray.db import Database def gen_sites(): db = Database(util.get_db_root(), util.get_part()) grid = db.grid() for tile_name in sorted(grid.tiles()): loc = grid.loc_of_tilename(tile_name) gridinfo = grid.gridinfo_at_loc(loc) if gridinfo.tile_type != 'CFG_CENTER_MID': continue sites = {} for site_name, site_type in gridinfo.sites.items(): if site_type not in sites: sites[site_type] = [] sites[site_type].append(site_name) for site_type in sites: sites[site_type].sort() int_grid_x = loc.grid_x + 3 int_tile_type = 'INT_L' int_tile_locs = [] for dy in range(-9, 12): # Skip the VBREAK tile. if dy != 6: int_tile_locs.append((int_grid_x, loc.grid_y + dy), ) int_tiles = [] for int_tile_loc in int_tile_locs: int_gridinfo = grid.gridinfo_at_loc(int_tile_loc) assert int_gridinfo.tile_type == int_tile_type, ( int_gridinfo.tile_type, int_tile_type) int_tiles.append(grid.tilename_at_loc(int_tile_loc)) yield tile_name, sites, int_tiles def write_params(params): pinstr = 'tile,val\n' for tile, (val) in sorted(params.items()): pinstr += '%s,%s\n' % (tile, val) open('params.csv', 'w').write(pinstr) def run(): print(''' module top(); ''') sites = list(gen_sites()) # Only on CFG_CENTER_MID expected. assert len(sites) == 1 tile_name, sites, int_tiles = sites[0] assert len(sites['ICAP']) == 2, len(sites['ICAP']) # int_tiles[6]: # IMUX43 -> ICAP1_I31 = 0 # IMUX42 -> ICAP1_I30 = toggle 0/1 # int_tiles[7]: # IMUX43 -> ICAP1_I15 = 0 # IMUX42 -> ICAP1_I14 = toggle 0/1 # int_tiles[8]: # IMUX43 -> ICAP1_CSIB = 0 # IMUX42 -> ICAP1_RDWRB = toggle 0/1 ICAP1_I30 = random.randint(0, 1) ICAP1_I14 = random.randint(0, 1) ICAP1_RDWRB = random.randint(0, 1) params = {} params[int_tiles[6]] = ICAP1_I30 params[int_tiles[7]] = ICAP1_I14 params[int_tiles[8]] = ICAP1_RDWRB print( """ wire [31:0] icap_i_{site}; wire icap_rdwrd_{site}; wire icap_csib_{site}; assign icap_i_{site}[31] = 0; assign icap_i_{site}[30] = {ICAP1_I30}; assign icap_i_{site}[15] = 0; assign icap_i_{site}[14] = {ICAP1_I14}; assign icap_csib_{site} = 0; assign icap_rdwrb_{site} = {ICAP1_RDWRB}; (* KEEP, DONT_TOUCH, LOC = "{site}" *) ICAPE2 icap_{site} ( .I(icap_i_{site}), .RDWRB(icap_rdwrb_{site}), .CSIB(icap_csib_{site}) ); """.format( site=sites['ICAP'][1], ICAP1_I30=ICAP1_I30, ICAP1_I14=ICAP1_I14, ICAP1_RDWRB=ICAP1_RDWRB)) print("endmodule") write_params(params) if __name__ == '__main__': run()
[]
[]
[ "SEED" ]
[]
["SEED"]
python
1
0
lawbreaker/web/database.py
import os from contextlib import contextmanager from datetime import datetime, timedelta from psycopg2.pool import ThreadedConnectionPool from lawbreaker.exceptions import NoResultsFound MIN_CONNECTIONS = 1 MAX_CONNECTIONS = 10 class Database(object): _instance = None def __call__(cls, *args, **kwargs): if cls._instance is None: cls._instance = super().__call__(*args, **kwargs) return cls._instance def __init__(self): """ Query to aggregate expiry details grouped by day and month, SELECT extract(day from expiry) AS day, extract(month from expiry) AS month, extract(year from expiry) AS year, count(*) FROM characters GROUP BY year, month, day ORDER BY year, month, day; Can be created into a view with, CREATE VIEW name AS query """ self._pool = None @contextmanager def connectionpool(self, *args, **kwargs): if self._pool is None: self._pool = ThreadedConnectionPool(MIN_CONNECTIONS, MAX_CONNECTIONS, dsn=os.environ['DATABASE_URL']) self.create_db() conn = self._pool.getconn() cursor = conn.cursor() try: yield cursor finally: conn.commit() cursor.close() self._pool.putconn(conn) def create_db(self): with self.connectionpool() as cursor: cursor.execute('''CREATE TABLE IF NOT EXISTS characters (character_id text PRIMARY KEY UNIQUE, character_json text, expiry timestamp)''') def clear_expired(self): with self.connectionpool() as cursor: print('Deleting expired permalinks') cursor.execute('''DELETE FROM characters WHERE expiry < now()''') def select(self, character_id): with self.connectionpool() as cursor: cursor.execute( "SELECT character_json FROM characters WHERE character_id=%s", (character_id,)) result = cursor.fetchone() if result is None: raise NoResultsFound else: cursor.execute("UPDATE characters SET expiry=%s where character_id=%s", (datetime.utcnow()+timedelta(days=30), character_id)) return result[0] def insert(self, character_id, character_json): with self.connectionpool() as cursor: cursor.execute("""INSERT INTO characters (character_id, character_json, expiry) VALUES (%s, %s, %s)""", (character_id, character_json, datetime.utcnow()+timedelta(days=2)))
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
services/searchengine/bleveengine/bleve_test.go
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. package bleveengine import ( "io/ioutil" "os" "testing" "github.com/blevesearch/bleve" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/zgordan-vv/zacmm-server/model" "github.com/zgordan-vv/zacmm-server/services/searchengine" "github.com/zgordan-vv/zacmm-server/store/searchlayer" "github.com/zgordan-vv/zacmm-server/store/searchtest" "github.com/zgordan-vv/zacmm-server/store/sqlstore" "github.com/zgordan-vv/zacmm-server/store/storetest" "github.com/zgordan-vv/zacmm-server/testlib" ) type BleveEngineTestSuite struct { suite.Suite SQLSettings *model.SqlSettings SQLSupplier *sqlstore.SqlSupplier SearchEngine *searchengine.Broker Store *searchlayer.SearchStore BleveEngine *BleveEngine IndexDir string } func TestBleveEngineTestSuite(t *testing.T) { suite.Run(t, new(BleveEngineTestSuite)) } func (s *BleveEngineTestSuite) setupIndexes() { indexDir, err := ioutil.TempDir("", "mmbleve") if err != nil { s.Require().FailNow("Cannot setup bleveengine tests: %s", err.Error()) } s.IndexDir = indexDir } func (s *BleveEngineTestSuite) setupStore() { driverName := os.Getenv("MM_SQLSETTINGS_DRIVERNAME") if driverName == "" { driverName = model.DATABASE_DRIVER_POSTGRES } s.SQLSettings = storetest.MakeSqlSettings(driverName) s.SQLSupplier = sqlstore.NewSqlSupplier(*s.SQLSettings, nil) cfg := &model.Config{} cfg.SetDefaults() cfg.BleveSettings.EnableIndexing = model.NewBool(true) cfg.BleveSettings.EnableSearching = model.NewBool(true) cfg.BleveSettings.EnableAutocomplete = model.NewBool(true) cfg.BleveSettings.IndexDir = model.NewString(s.IndexDir) cfg.SqlSettings.DisableDatabaseSearch = model.NewBool(true) s.SearchEngine = searchengine.NewBroker(cfg, nil) s.Store = searchlayer.NewSearchLayer(&testlib.TestStore{Store: s.SQLSupplier}, s.SearchEngine, cfg) s.BleveEngine = NewBleveEngine(cfg, nil) s.BleveEngine.indexSync = true s.SearchEngine.RegisterBleveEngine(s.BleveEngine) if err := s.BleveEngine.Start(); err != nil { s.Require().FailNow("Cannot start bleveengine: %s", err.Error()) } } func (s *BleveEngineTestSuite) SetupSuite() { s.setupIndexes() s.setupStore() } func (s *BleveEngineTestSuite) TearDownSuite() { os.RemoveAll(s.IndexDir) s.SQLSupplier.Close() storetest.CleanupSqlSettings(s.SQLSettings) } func (s *BleveEngineTestSuite) TestBleveSearchStoreTests() { searchTestEngine := &searchtest.SearchTestEngine{ Driver: searchtest.ENGINE_BLEVE, } s.Run("TestSearchChannelStore", func() { searchtest.TestSearchChannelStore(s.T(), s.Store, searchTestEngine) }) s.Run("TestSearchUserStore", func() { searchtest.TestSearchUserStore(s.T(), s.Store, searchTestEngine) }) s.Run("TestSearchPostStore", func() { searchtest.TestSearchPostStore(s.T(), s.Store, searchTestEngine) }) } func (s *BleveEngineTestSuite) TestDeleteChannelPosts() { s.Run("Should remove all the posts that belongs to a channel", func() { s.BleveEngine.PurgeIndexes() teamID := model.NewId() userID := model.NewId() channelID := model.NewId() channelToAvoidID := model.NewId() posts := make([]*model.Post, 0) for i := 0; i < 10; i++ { post := createPost(userID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID) require.Nil(s.T(), appErr) posts = append(posts, post) } postToAvoid := createPost(userID, channelToAvoidID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID) require.Nil(s.T(), appErr) s.SearchEngine.BleveEngine.DeleteChannelPosts(channelID) doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id) require.Nil(s.T(), err) require.Equal(s.T(), postToAvoid.Id, doc.ID) numberDocs, err := s.BleveEngine.PostIndex.DocCount() require.Nil(s.T(), err) require.Equal(s.T(), 1, int(numberDocs)) }) s.Run("Shouldn't do anything if there is not posts for the selected channel", func() { s.BleveEngine.PurgeIndexes() teamID := model.NewId() userID := model.NewId() channelID := model.NewId() channelToDeleteID := model.NewId() post := createPost(userID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID) require.Nil(s.T(), appErr) s.SearchEngine.BleveEngine.DeleteChannelPosts(channelToDeleteID) _, err := s.BleveEngine.PostIndex.Document(post.Id) require.Nil(s.T(), err) numberDocs, err := s.BleveEngine.PostIndex.DocCount() require.Nil(s.T(), err) require.Equal(s.T(), 1, int(numberDocs)) }) } func (s *BleveEngineTestSuite) TestDeleteUserPosts() { s.Run("Should remove all the posts that belongs to a user", func() { s.BleveEngine.PurgeIndexes() teamID := model.NewId() userID := model.NewId() userToAvoidID := model.NewId() channelID := model.NewId() posts := make([]*model.Post, 0) for i := 0; i < 10; i++ { post := createPost(userID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID) require.Nil(s.T(), appErr) posts = append(posts, post) } postToAvoid := createPost(userToAvoidID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID) require.Nil(s.T(), appErr) s.SearchEngine.BleveEngine.DeleteUserPosts(userID) doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id) require.Nil(s.T(), err) require.Equal(s.T(), postToAvoid.Id, doc.ID) numberDocs, err := s.BleveEngine.PostIndex.DocCount() require.Nil(s.T(), err) require.Equal(s.T(), 1, int(numberDocs)) }) s.Run("Shouldn't do anything if there is not posts for the selected user", func() { s.BleveEngine.PurgeIndexes() teamID := model.NewId() userID := model.NewId() userToDeleteID := model.NewId() channelID := model.NewId() post := createPost(userID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID) require.Nil(s.T(), appErr) s.SearchEngine.BleveEngine.DeleteUserPosts(userToDeleteID) _, err := s.BleveEngine.PostIndex.Document(post.Id) require.Nil(s.T(), err) numberDocs, err := s.BleveEngine.PostIndex.DocCount() require.Nil(s.T(), err) require.Equal(s.T(), 1, int(numberDocs)) }) } func (s *BleveEngineTestSuite) TestDeletePosts() { s.BleveEngine.PurgeIndexes() teamID := model.NewId() userID := model.NewId() userToAvoidID := model.NewId() channelID := model.NewId() posts := make([]*model.Post, 0) for i := 0; i < 10; i++ { post := createPost(userID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(post, teamID) require.Nil(s.T(), appErr) posts = append(posts, post) } postToAvoid := createPost(userToAvoidID, channelID, "test one two three") appErr := s.SearchEngine.BleveEngine.IndexPost(postToAvoid, teamID) require.Nil(s.T(), appErr) query := bleve.NewTermQuery(userID) query.SetField("UserId") search := bleve.NewSearchRequest(query) count, err := s.BleveEngine.deletePosts(search, 1) require.Nil(s.T(), err) require.Equal(s.T(), 10, int(count)) doc, err := s.BleveEngine.PostIndex.Document(postToAvoid.Id) require.Nil(s.T(), err) require.Equal(s.T(), postToAvoid.Id, doc.ID) numberDocs, err := s.BleveEngine.PostIndex.DocCount() require.Nil(s.T(), err) require.Equal(s.T(), 1, int(numberDocs)) }
[ "\"MM_SQLSETTINGS_DRIVERNAME\"" ]
[]
[ "MM_SQLSETTINGS_DRIVERNAME" ]
[]
["MM_SQLSETTINGS_DRIVERNAME"]
go
1
0
aprscot/constants.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """APRS Cursor-on-Target Constants.""" import logging import os import re __author__ = 'Greg Albrecht W2GMD <[email protected]>' __copyright__ = 'Copyright 2021 Greg Albrecht' __license__ = 'Apache License, Version 2.0' __source__ = 'https://github.com/ampledata/aprscot' if bool(os.environ.get('DEBUG')): LOG_LEVEL = logging.DEBUG LOG_FORMAT = logging.Formatter( ('%(asctime)s aprscot %(levelname)s %(name)s.%(funcName)s:%(lineno)d ' ' - %(message)s')) logging.debug('aprscot Debugging Enabled via DEBUG Environment Variable.') else: LOG_LEVEL = logging.INFO LOG_FORMAT = logging.Formatter( ('%(asctime)s aprscot - %(message)s')) # 3833.55N/12248.93W LL_REX = re.compile( r"(?P<aprs_lat>\d{4}\.\d{2})[NS][^\n]{1}(?P<aprs_lng>\d{5}\.\d{2})[EW]" ) DEFAULT_APRSIS_PORT: int = 14580 DEFAULT_APRSIS_HOST: str = "rotate.aprs.net" DEFAULT_APRSIS_CALLSIGN: str = "APRSCOT" DEFAULT_APRSIS_FILTER: str = "f/SUNSET/50" DEFAULT_COT_TYPE: str = "a-f-G-I-U-T-r" DEFAULT_COT_STALE: str = 3600
[]
[]
[ "DEBUG" ]
[]
["DEBUG"]
python
1
0
pkg/integrations/postgres_exporter/postgres_exporter.go
// Package postgres_exporter embeds https://github.com/prometheus/postgres_exporter package postgres_exporter //nolint:golint import ( "fmt" "os" "strings" config_util "github.com/prometheus/common/config" "github.com/go-kit/log" "github.com/grafana/agent/pkg/integrations" integrations_v2 "github.com/grafana/agent/pkg/integrations/v2" "github.com/grafana/agent/pkg/integrations/v2/metricsutils" "github.com/lib/pq" "github.com/prometheus-community/postgres_exporter/exporter" ) // Config controls the postgres_exporter integration. type Config struct { // DataSourceNames to use to connect to Postgres. DataSourceNames []config_util.Secret `yaml:"data_source_names,omitempty"` DisableSettingsMetrics bool `yaml:"disable_settings_metrics,omitempty"` AutodiscoverDatabases bool `yaml:"autodiscover_databases,omitempty"` ExcludeDatabases []string `yaml:"exclude_databases,omitempty"` IncludeDatabases []string `yaml:"include_databases,omitempty"` DisableDefaultMetrics bool `yaml:"disable_default_metrics,omitempty"` QueryPath string `yaml:"query_path,omitempty"` } // Name returns the name of the integration this config is for. func (c *Config) Name() string { return "postgres_exporter" } // NewIntegration converts this config into an instance of a configuration. func (c *Config) NewIntegration(l log.Logger) (integrations.Integration, error) { return New(l, c) } // InstanceKey returns a simplified DSN of the first postgresql DSN, or an error if // not exactly one DSN is provided. func (c *Config) InstanceKey(_ string) (string, error) { dsn, err := c.getDataSourceNames() if err != nil { return "", err } if len(dsn) != 1 { return "", fmt.Errorf("can't automatically determine a value for `instance` with %d DSN. either use 1 DSN or manually assign a value for `instance` in the integration config", len(dsn)) } s, err := parsePostgresURL(dsn[0]) if err != nil { return "", fmt.Errorf("cannot parse DSN: %w", err) } // Assign default values to s. // // PostgreSQL hostspecs can contain multiple host pairs. We'll assign a host // and port by default, but otherwise just use the hostname. if _, ok := s["host"]; !ok { s["host"] = "localhost" s["port"] = "5432" } hostport := s["host"] if p, ok := s["port"]; ok { hostport += fmt.Sprintf(":%s", p) } return fmt.Sprintf("postgresql://%s/%s", hostport, s["dbname"]), nil } func parsePostgresURL(url string) (map[string]string, error) { raw, err := pq.ParseURL(url) if err != nil { return nil, err } res := map[string]string{} unescaper := strings.NewReplacer(`\'`, `'`, `\\`, `\`) for _, keypair := range strings.Split(raw, " ") { parts := strings.SplitN(keypair, "=", 2) if len(parts) != 2 { panic(fmt.Sprintf("unexpected keypair %s from pq", keypair)) } key := parts[0] value := parts[1] // Undo all the transformations ParseURL did: remove wrapping // quotes and then unescape the escaped characters. value = strings.TrimPrefix(value, "'") value = strings.TrimSuffix(value, "'") value = unescaper.Replace(value) res[key] = value } return res, nil } // getDataSourceNames loads data source names from the config or from the // environment, if set. func (c *Config) getDataSourceNames() ([]string, error) { dsn := c.DataSourceNames var stringDsn []string if len(dsn) == 0 { stringDsn = append(stringDsn, strings.Split(os.Getenv("POSTGRES_EXPORTER_DATA_SOURCE_NAME"), ",")...) } else { for _, d := range dsn { stringDsn = append(stringDsn, string(d)) } } if len(stringDsn) == 0 { return nil, fmt.Errorf("cannot create postgres_exporter; neither postgres_exporter.data_source_name or $POSTGRES_EXPORTER_DATA_SOURCE_NAME is set") } return stringDsn, nil } func init() { integrations.RegisterIntegration(&Config{}) integrations_v2.RegisterLegacy(&Config{}, integrations_v2.TypeMultiplex, metricsutils.NewNamedShim("postgres")) } // New creates a new postgres_exporter integration. The integration scrapes // metrics from a postgres process. func New(log log.Logger, c *Config) (integrations.Integration, error) { dsn, err := c.getDataSourceNames() if err != nil { return nil, err } e := exporter.NewExporter( dsn, log, exporter.DisableDefaultMetrics(c.DisableDefaultMetrics), exporter.WithUserQueriesPath(c.QueryPath), exporter.DisableSettingsMetrics(c.DisableSettingsMetrics), exporter.AutoDiscoverDatabases(c.AutodiscoverDatabases), exporter.ExcludeDatabases(strings.Join(c.ExcludeDatabases, ",")), exporter.IncludeDatabases(strings.Join(c.IncludeDatabases, ",")), exporter.MetricPrefix("pg"), ) return integrations.NewCollectorIntegration(c.Name(), integrations.WithCollectors(e)), nil }
[ "\"POSTGRES_EXPORTER_DATA_SOURCE_NAME\"" ]
[]
[ "POSTGRES_EXPORTER_DATA_SOURCE_NAME" ]
[]
["POSTGRES_EXPORTER_DATA_SOURCE_NAME"]
go
1
0
tests/tests.py
__authors__ = ["Tobias Marschall", "Marcel Martin", "Johannes Köster"] __copyright__ = "Copyright 2015-2019, Johannes Köster" __email__ = "[email protected]" __license__ = "MIT" import sys import os import shutil from os.path import join from subprocess import call import tempfile import hashlib import urllib from shutil import rmtree, which from shlex import quote import pytest import subprocess from snakemake import snakemake from snakemake.shell import shell def dpath(path): """get path to a data file (relative to the directory this test lives in)""" return os.path.realpath(join(os.path.dirname(__file__), path)) def md5sum(filename): data = open(filename, "rb").read() return hashlib.md5(data).hexdigest() # test skipping def is_connected(): try: urllib.request.urlopen("http://www.google.com", timeout=1) return True except urllib.request.URLError: return False def is_ci(): return "CI" in os.environ def has_gcloud_service_key(): return "GCLOUD_SERVICE_KEY" in os.environ def has_gcloud_cluster(): return "GCLOUD_CLUSTER" in os.environ gcloud = pytest.mark.skipif( not is_connected() or not has_gcloud_service_key() or not has_gcloud_cluster(), reason="Skipping GCLOUD tests because not on " "CI, no inet connection or not logged " "in to gcloud.", ) connected = pytest.mark.skipif(not is_connected(), reason="no internet connection") ci = pytest.mark.skipif(not is_ci(), reason="not in CI") not_ci = pytest.mark.skipif(is_ci(), reason="skipped in CI") def copy(src, dst): if os.path.isdir(src): shutil.copytree(src, os.path.join(dst, os.path.basename(src))) else: shutil.copy(src, dst) def run( path, shouldfail=False, snakefile="Snakefile", subpath=None, no_tmpdir=False, check_md5=True, cores=3, set_pythonpath=True, cleanup=True, **params ): """ Test the Snakefile in path. There must be a Snakefile in the path and a subdirectory named expected-results. If cleanup is False, we return the temporary directory to the calling test for inspection, and the test should clean it up. """ if set_pythonpath: # Enforce current workdir (the snakemake source dir) to also be in PYTHONPATH # when subprocesses are invoked in the tempdir defined below. os.environ["PYTHONPATH"] = os.getcwd() else: del os.environ["PYTHONPATH"] results_dir = join(path, "expected-results") snakefile = join(path, snakefile) assert os.path.exists(snakefile) assert os.path.exists(results_dir) and os.path.isdir( results_dir ), "{} does not exist".format(results_dir) # If we need to further check results, we won't cleanup tmpdir tmpdir = next(tempfile._get_candidate_names()) tmpdir = os.path.join(tempfile.gettempdir(), "snakemake-%s" % tmpdir) os.mkdir(tmpdir) config = {} # handle subworkflow if subpath is not None: # set up a working directory for the subworkflow and pass it in `config` # for now, only one subworkflow is supported assert os.path.exists(subpath) and os.path.isdir( subpath ), "{} does not exist".format(subpath) subworkdir = os.path.join(tmpdir, "subworkdir") os.mkdir(subworkdir) # copy files for f in os.listdir(subpath): copy(os.path.join(subpath, f), subworkdir) config["subworkdir"] = subworkdir # copy files for f in os.listdir(path): print(f) copy(os.path.join(path, f), tmpdir) # run snakemake success = snakemake( snakefile, cores=cores, workdir=path if no_tmpdir else tmpdir, stats="stats.txt", config=config, verbose=True, **params ) if shouldfail: assert not success, "expected error on execution" else: assert success, "expected successful execution" for resultfile in os.listdir(results_dir): if resultfile in [".gitignore", ".gitkeep"] or not os.path.isfile( os.path.join(results_dir, resultfile) ): # this means tests cannot use directories as output files continue targetfile = join(tmpdir, resultfile) expectedfile = join(results_dir, resultfile) assert os.path.exists(targetfile), 'expected file "{}" not produced'.format( resultfile ) if check_md5: # if md5sum(targetfile) != md5sum(expectedfile): # import pdb; pdb.set_trace() if md5sum(targetfile) != md5sum(expectedfile): with open(targetfile) as target: content = target.read() assert False, 'wrong result produced for file "{}":\n{}'.format( resultfile, content ) if not cleanup: return tmpdir shutil.rmtree(tmpdir) def test_delete_all_output(): run(dpath("test_delete_all_output")) def test_github_issue_14(): """Add cleanup_scripts argument to allow the user to keep scripts""" # Return temporary directory for inspection - we should keep scripts here tmpdir = run(dpath("test_github_issue_14"), cleanup=False, cleanup_scripts=False) assert os.listdir(os.path.join(tmpdir, ".snakemake", "scripts")) shutil.rmtree(tmpdir) # And not here tmpdir = run(dpath("test_github_issue_14"), cleanup=False) assert not os.listdir(os.path.join(tmpdir, ".snakemake", "scripts")) shutil.rmtree(tmpdir) def test_issue956(): run(dpath("test_issue956")) def test01(): run(dpath("test01")) def test02(): run(dpath("test02")) def test03(): run(dpath("test03"), targets=["test.out"]) def test04(): run(dpath("test04"), targets=["test.out"]) def test05(): run(dpath("test05")) def test06(): run(dpath("test06"), targets=["test.bla.out"]) def test07(): run(dpath("test07"), targets=["test.out", "test2.out"]) def test08(): run(dpath("test08"), targets=["test.out", "test2.out"]) def test09(): run(dpath("test09"), shouldfail=True) def test10(): run(dpath("test10")) def test11(): run(dpath("test11")) def test12(): run(dpath("test12")) def test13(): run(dpath("test13")) def test14(): run(dpath("test14"), snakefile="Snakefile.nonstandard", cluster="./qsub") def test15(): run(dpath("test15")) def test_directory(): run( dpath("test_directory"), targets=[ "downstream", "symlinked_input", "child_to_input", "some/dir-child", "some/shadow", ], ) run(dpath("test_directory"), targets=["file_expecting_dir"], shouldfail=True) run(dpath("test_directory"), targets=["dir_expecting_file"], shouldfail=True) run(dpath("test_directory"), targets=["child_to_other"], shouldfail=True) def test_ancient(): run(dpath("test_ancient"), targets=["D", "old_file"]) def test_list_untracked(): run(dpath("test_list_untracked")) def test_report(): run(dpath("test_report"), report="report.html", check_md5=False) def test_dynamic(): run(dpath("test_dynamic")) def test_params(): run(dpath("test_params")) def test_same_wildcard(): run(dpath("test_same_wildcard")) def test_conditional(): run( dpath("test_conditional"), targets="test.out test.0.out test.1.out test.2.out".split(), ) def test_unpack_dict(): run(dpath("test_unpack_dict")) def test_unpack_list(): run(dpath("test_unpack_list")) def test_shell(): run(dpath("test_shell")) def test_temp(): run(dpath("test_temp"), cluster="./qsub", targets="test.realigned.bam".split()) def test_keyword_list(): run(dpath("test_keyword_list")) def test_subworkflows(): run(dpath("test_subworkflows"), subpath=dpath("test02")) def test_globwildcards(): run(dpath("test_globwildcards")) def test_local_import(): run(dpath("test_local_import")) def test_ruledeps(): run(dpath("test_ruledeps")) def test_persistent_dict(): try: import pytools run(dpath("test_persistent_dict")) except ImportError: pass @connected def test_url_include(): run(dpath("test_url_include")) def test_touch(): run(dpath("test_touch")) def test_config(): run(dpath("test_config")) def test_update_config(): run(dpath("test_update_config")) def test_wildcard_keyword(): run(dpath("test_wildcard_keyword")) def test_benchmark(): run(dpath("test_benchmark"), check_md5=False) def test_temp_expand(): run(dpath("test_temp_expand")) def test_wildcard_count_ambiguity(): run(dpath("test_wildcard_count_ambiguity")) def test_srcdir(): run(dpath("test_srcdir")) def test_multiple_includes(): run(dpath("test_multiple_includes")) def test_yaml_config(): run(dpath("test_yaml_config")) def test_remote(): run(dpath("test_remote"), cores=1) def test_cluster_sync(): run(dpath("test14"), snakefile="Snakefile.nonstandard", cluster_sync="./qsub") @pytest.mark.skip(reason="This does not work reliably in CircleCI.") def test_symlink_temp(): run(dpath("test_symlink_temp"), shouldfail=True) def test_empty_include(): run(dpath("test_empty_include")) def test_script(): run(dpath("test_script"), use_conda=True) def test_shadow(): run(dpath("test_shadow")) def test_shadow_prefix(): run(dpath("test_shadow_prefix"), shadow_prefix="shadowdir") run(dpath("test_shadow_prefix"), shadow_prefix="shadowdir", cluster="./qsub") def test_until(): run( dpath("test_until"), until=[ "leveltwo_first", # rule name "leveltwo_second.txt", # file name "second_wildcard", ], ) # wildcard rule def test_omitfrom(): run( dpath("test_omitfrom"), omit_from=[ "leveltwo_first", # rule name "leveltwo_second.txt", # file name "second_wildcard", ], ) # wildcard rule def test_nonstr_params(): run(dpath("test_nonstr_params")) def test_delete_output(): run(dpath("test_delete_output"), cores=1) def test_input_generator(): run(dpath("test_input_generator")) def test_symlink_time_handling(): # See Snakefile for notes on why this fails on some systems if os.utime in os.supports_follow_symlinks: run(dpath("test_symlink_time_handling")) def test_protected_symlink_output(): run(dpath("test_protected_symlink_output")) def test_issue328(): try: import pytools run(dpath("test_issue328"), forcerun=["split"]) except ImportError: # skip test if import fails pass def test_conda(): if conda_available(): run(dpath("test_conda"), use_conda=True) def test_conda_custom_prefix(): if conda_available(): run( dpath("test_conda_custom_prefix"), use_conda=True, conda_prefix="custom", set_pythonpath=False, ) def test_wrapper(): if conda_available(): run(dpath("test_wrapper"), use_conda=True) def conda_available(): return which("conda") def test_get_log_none(): run(dpath("test_get_log_none")) def test_get_log_both(): run(dpath("test_get_log_both")) def test_get_log_stderr(): run(dpath("test_get_log_stderr")) def test_get_log_stdout(): run(dpath("test_get_log_stdout")) def test_get_log_complex(): run(dpath("test_get_log_complex")) def test_spaces_in_fnames(): run( dpath("test_spaces_in_fnames"), # cluster="./qsub", targets=["test bam file realigned.bam"], printshellcmds=True, ) # TODO deactivate because of problems with moto and boto3. # def test_static_remote(): # import importlib # try: # importlib.reload(boto3) # importlib.reload(moto) # # only run the remote file test if the dependencies # # are installed, otherwise do nothing # run(dpath("test_static_remote"), cores=1) # except ImportError: # pass @connected def test_remote_ncbi_simple(): try: import Bio # only run the remote file test if the dependencies # are installed, otherwise do nothing run(dpath("test_remote_ncbi_simple")) except ImportError: pass @connected def test_remote_ncbi(): try: import Bio # only run the remote file test if the dependencies # are installed, otherwise do nothing run(dpath("test_remote_ncbi")) except ImportError: pass @ci def test_remote_irods(): run(dpath("test_remote_irods")) def test_deferred_func_eval(): run(dpath("test_deferred_func_eval")) def test_format_params(): run(dpath("test_format_params"), check_md5=True) def test_rule_defined_in_for_loop(): # issue 257 run(dpath("test_rule_defined_in_for_loop")) def test_issue381(): run(dpath("test_issue381")) def test_format_wildcards(): run(dpath("test_format_wildcards")) def test_with_parentheses(): run(dpath("test (with parentheses)")) def test_dup_out_patterns(): """Duplicate output patterns should emit an error Duplicate output patterns can be detected on the rule level """ run(dpath("test_dup_out_patterns"), shouldfail=True) def test_restartable_job_cmd_exit_1_no_restart(): """Test the restartable job feature on ``exit 1`` The shell snippet in the Snakemake file will fail the first time and succeed the second time. """ run( dpath("test_restartable_job_cmd_exit_1"), cluster="./qsub", restart_times=0, shouldfail=True, ) def test_restartable_job_cmd_exit_1_one_restart(): # Restarting once is enough run( dpath("test_restartable_job_cmd_exit_1"), cluster="./qsub", restart_times=1, printshellcmds=True, ) def test_restartable_job_qsub_exit_1(): """Test the restartable job feature when qsub fails The qsub in the sub directory will fail the first time and succeed the second time. """ # Even two consecutive times should fail as files are cleared run( dpath("test_restartable_job_qsub_exit_1"), cluster="./qsub", restart_times=0, shouldfail=True, ) run( dpath("test_restartable_job_qsub_exit_1"), cluster="./qsub", restart_times=0, shouldfail=True, ) # Restarting once is enough run( dpath("test_restartable_job_qsub_exit_1"), cluster="./qsub", restart_times=1, shouldfail=False, ) def test_threads(): run(dpath("test_threads"), cores=20) def test_dynamic_temp(): run(dpath("test_dynamic_temp")) # TODO this currently hangs. Has to be investigated (issue #660). # def test_ftp_immediate_close(): # try: # import ftputil # # # only run the remote file test if the dependencies # # are installed, otherwise do nothing # run(dpath("test_ftp_immediate_close")) # except ImportError: # pass def test_issue260(): run(dpath("test_issue260")) @not_ci def test_default_remote(): run( dpath("test_default_remote"), cores=1, default_remote_provider="S3Mocked", default_remote_prefix="test-remote-bucket", ) def test_run_namedlist(): run(dpath("test_run_namedlist")) @connected @not_ci def test_remote_gs(): run(dpath("test_remote_gs")) @pytest.mark.skip(reason="We need free azure access to test this in CircleCI.") @connected @ci def test_remote_azure(): run(dpath("test_remote_azure")) def test_remote_log(): run(dpath("test_remote_log"), shouldfail=True) @connected @pytest.mark.xfail def test_remote_http(): run(dpath("test_remote_http")) @connected @pytest.mark.xfail def test_remote_http_cluster(): run(dpath("test_remote_http"), cluster=os.path.abspath(dpath("test14/qsub"))) def test_profile(): run(dpath("test_profile")) @connected def test_singularity(): run(dpath("test_singularity"), use_singularity=True) def test_singularity_invalid(): run( dpath("test_singularity"), targets=["invalid.txt"], use_singularity=True, shouldfail=True, ) @connected def test_singularity_conda(): run(dpath("test_singularity_conda"), use_singularity=True, use_conda=True) def test_issue612(): run(dpath("test_issue612"), dryrun=True) def test_bash(): run(dpath("test_bash")) def test_inoutput_is_path(): run(dpath("test_inoutput_is_path")) def test_archive(): run(dpath("test_archive"), archive="workflow-archive.tar.gz") def test_log_input(): run(dpath("test_log_input")) @pytest.fixture(scope="module") def gcloud_cluster(): class Cluster: def __init__(self): self.cluster = os.environ["GCLOUD_CLUSTER"] self.bucket_name = "snakemake-testing-{}".format(self.cluster) shell( """ $GCLOUD container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type f1-micro $GCLOUD container clusters get-credentials {self.cluster} --zone us-central1-a $GSUTIL mb gs://{self.bucket_name} """ ) def delete(self): shell( """ $GCLOUD container clusters delete {self.cluster} --zone us-central1-a --quiet || true $GSUTIL rm -r gs://{self.bucket_name} || true """ ) def run(self, test="test_kubernetes", **kwargs): try: run( dpath(test), kubernetes="default", default_remote_provider="GS", default_remote_prefix=self.bucket_name, no_tmpdir=True, **kwargs ) except Exception as e: shell( "for p in `kubectl get pods | grep ^snakejob- | cut -f 1 -d ' '`; do kubectl logs $p; done" ) raise e def reset(self): shell("$GSUTIL rm -r gs://{self.bucket_name}/* || true") cluster = Cluster() yield cluster cluster.delete() @gcloud @pytest.mark.skip( reason="reenable once we have figured out how to fail if available core hours per month are exceeded" ) @pytest.mark.xfail def test_gcloud_plain(gcloud_cluster): gcloud_cluster.reset() gcloud_cluster.run() @gcloud @pytest.mark.skip(reason="need a faster cloud compute instance to run this") def test_gcloud_conda(gcloud_cluster): gcloud_cluster.reset() gcloud_cluster.run(use_conda=True) @gcloud @pytest.mark.skip(reason="need a faster cloud compute instance to run this") def test_gcloud_singularity(gcloud_cluster): gcloud_cluster.reset() gcloud_cluster.run(use_singularity=True) @gcloud @pytest.mark.skip(reason="need a faster cloud compute instance to run this") def test_gcloud_conda_singularity(gcloud_cluster): gcloud_cluster.reset() gcloud_cluster.run(use_singularity=True, use_conda=True) @gcloud() @pytest.mark.skip(reason="need a faster cloud compute instance to run this") def test_issue1041(gcloud_cluster): gcloud_cluster.reset() gcloud_cluster.run(test="test_issue1041") @connected def test_cwl(): run(dpath("test_cwl")) @connected def test_cwl_singularity(): run(dpath("test_cwl"), use_singularity=True) def test_issue805(): run(dpath("test_issue805"), shouldfail=True) def test_pathlib(): run(dpath("test_pathlib")) def test_pathlib_missing_file(): run(dpath("test_pathlib_missing_file"), shouldfail=True) def test_group_jobs(): run(dpath("test_group_jobs"), cluster="./qsub") def test_group_job_fail(): run(dpath("test_group_job_fail"), cluster="./qsub", shouldfail=True) def test_pipes(): run(dpath("test_pipes")) def test_pipes_fail(): run(dpath("test_pipes_fail"), shouldfail=True) def test_validate(): run(dpath("test_validate")) def test_validate_fail(): run( dpath("test_validate"), configfiles=[dpath("test_validate/config.fail.yaml")], shouldfail=True, ) def test_issue854(): # output and benchmark have inconsistent wildcards # this should fail when parsing run(dpath("test_issue854"), shouldfail=True) def test_issue850(): run(dpath("test_issue850"), cluster="./qsub") def test_issue860(): run(dpath("test_issue860"), cluster="./qsub", targets=["done"]) def test_issue894(): run(dpath("test_issue894")) def test_issue584(): run(dpath("test_issue584")) def test_issue912(): run(dpath("test_issue912")) def test_job_properties(): run(dpath("test_job_properties"), cluster="./qsub.py") def test_issue916(): run(dpath("test_issue916")) def test_issue930(): run(dpath("test_issue930"), cluster="./qsub") def test_issue635(): run(dpath("test_issue635"), use_conda=True, check_md5=False) # TODO remove skip @pytest.mark.skip( reason="Temporarily disable until the stable container image becomes available again." ) def test_convert_to_cwl(): workdir = dpath("test_convert_to_cwl") # run(workdir, export_cwl=os.path.join(workdir, "workflow.cwl")) shell( "cd {workdir}; PYTHONPATH={src} python -m snakemake --export-cwl workflow.cwl", src=os.getcwd(), ) shell("cd {workdir}; cwltool --singularity workflow.cwl") assert os.path.exists(os.path.join(workdir, "test.out")) def test_issue1037(): run(dpath("test_issue1037"), dryrun=True, cluster="qsub", targets=["Foo_A.done"]) def test_issue1046(): run(dpath("test_issue1046")) def test_checkpoints(): run(dpath("test_checkpoints")) def test_checkpoints_dir(): run(dpath("test_checkpoints_dir")) def test_issue1092(): run(dpath("test_issue1092")) def test_issue1093(): run(dpath("test_issue1093"), use_conda=True) def test_issue958(): run(dpath("test_issue958"), cluster="dummy", dryrun=True) def test_issue471(): run(dpath("test_issue471")) def test_issue1085(): run(dpath("test_issue1085"), shouldfail=True) def test_issue1083(): run(dpath("test_issue1083"), use_singularity=True) def test_pipes2(): run(dpath("test_pipes2")) @pytest.mark.skip( reason="The AWS Access Key Id you provided does not exist in our records." ) def test_tibanna(): workdir = dpath("test_tibanna") subprocess.check_call(["python", "cleanup.py"], cwd=workdir) run( workdir, use_conda=True, configfiles=[os.path.join(workdir, "config.json")], default_remote_prefix="snakemake-tibanna-test/1", tibanna_sfn="tibanna_unicorn_johannes", ) def test_expand_flag(): run(dpath("test_expand_flag"), shouldfail=True) def test_default_resources(): from snakemake.resources import DefaultResources run( dpath("test_default_resources"), default_resources=DefaultResources( ["mem_mb=max(2*input.size, 1000)", "disk_mb=max(2*input.size, 1000)"] ), ) def test_issue1284(): run(dpath("test_issue1284")) def test_issue1281(): run(dpath("test_issue1281")) def test_filegraph(): workdir = dpath("test_filegraph") dot_path = os.path.abspath("fg.dot") pdf_path = "fg.pdf" # make sure the calls work shell("cd {workdir}; python -m snakemake --filegraph > {dot_path}") # make sure the output can be interpreted by dot with open(dot_path, "rb") as dot_file, open(pdf_path, "wb") as pdf_file: pdf_file.write( subprocess.check_output(["dot", "-Tpdf"], stdin=dot_file, cwd=workdir) ) # make sure the generated pdf file is not empty assert os.stat(pdf_path).st_size > 0 def test_batch(): from snakemake.dag import Batch run(dpath("test_batch"), batch=Batch("aggregate", 1, 2)) def test_batch_final(): from snakemake.dag import Batch run(dpath("test_batch_final"), batch=Batch("aggregate", 1, 1)) def test_batch_fail(): from snakemake.dag import Batch run(dpath("test_batch"), batch=Batch("aggregate", 2, 2), shouldfail=True) def test_github_issue52(): run(dpath("test_github_issue52"), shouldfail=True) run(dpath("test_github_issue52"), snakefile="other.smk", shouldfail=True) def test_github_issue78(): run(dpath("test_github_issue78"), use_singularity=True) def test_github_issue105(): run(dpath("test_github_issue105")) def test_output_file_cache(): test_path = dpath("test_output_file_cache") os.environ["SNAKEMAKE_OUTPUT_CACHE"] = os.path.join(test_path, "cache") run(test_path, cache=["a", "b", "c"]) run(test_path, cache=["invalid_multi"], targets="invalid1.txt", shouldfail=True) def test_output_file_cache_remote(): test_path = dpath("test_output_file_cache_remote") os.environ["SNAKEMAKE_OUTPUT_CACHE"] = "cache" run( test_path, cache=["a", "b", "c"], default_remote_provider="S3Mocked", default_remote_prefix="test-remote-bucket", )
[]
[]
[ "GCLOUD_CLUSTER", "PYTHONPATH", "SNAKEMAKE_OUTPUT_CACHE" ]
[]
["GCLOUD_CLUSTER", "PYTHONPATH", "SNAKEMAKE_OUTPUT_CACHE"]
python
3
0
pkg/auth/manager.go
package auth import ( "errors" "fmt" "github.com/golang-jwt/jwt/v4" "os" "time" ) type Manager struct { signinKey []byte tokenTTL time.Duration } func NewManager(tokenTTL time.Duration) (*Manager, error) { var key string if key = os.Getenv("SIGNINKEY"); key == "" { return nil, errors.New("empty signin key passed") } return &Manager{signinKey: []byte(key), tokenTTL: tokenTTL}, nil } func (m *Manager) CreateJWT(userId string) (string, error) { claims := jwt.MapClaims{} claims["exp"] = time.Now().Add(m.tokenTTL).Unix() claims["iss_at"] = time.Now().Unix() claims["user_id"] = userId token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) return token.SignedString(m.signinKey) } func (m *Manager) ParseJWT(tokenString string) (user_id string, err error) { token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) } return m.signinKey, nil }) if err != nil { return "", err } claims, ok := token.Claims.(jwt.MapClaims) if !ok || !token.Valid || claims["user_id"] == nil { return "", fmt.Errorf("error get user claims from token") } return claims["user_id"].(string), nil }
[ "\"SIGNINKEY\"" ]
[]
[ "SIGNINKEY" ]
[]
["SIGNINKEY"]
go
1
0
credulous.go
package main import ( "crypto/x509" "encoding/pem" "errors" "fmt" "io/ioutil" "log" "os" "path" "path/filepath" "regexp" "strings" "code.google.com/p/go.crypto/ssh" "code.google.com/p/gopass" "github.com/codegangsta/cli" ) const ENV_PATTERN string = "^[A-Za-z_][A-Za-z0-9_]*=.*" func decryptPEM(pemblock *pem.Block, filename string) ([]byte, error) { var err error if _, err = fmt.Fprintf(os.Stderr, "Enter passphrase for %s: ", filename); err != nil { return []byte(""), err } // we already emit the prompt to stderr; GetPass only emits to stdout var passwd string passwd, err = gopass.GetPass("") fmt.Fprintln(os.Stderr, "") if err != nil { return []byte(""), err } var decryptedBytes []byte if decryptedBytes, err = x509.DecryptPEMBlock(pemblock, []byte(passwd)); err != nil { return []byte(""), err } pemBytes := pem.Block{ Type: "RSA PRIVATE KEY", Bytes: decryptedBytes, } decryptedPEM := pem.EncodeToMemory(&pemBytes) return decryptedPEM, nil } func getPrivateKey(c *cli.Context) (filename string) { if c.String("key") == "" { filename = filepath.Join(os.Getenv("HOME"), "/.ssh/id_rsa") } else { filename = c.String("key") } return filename } func splitUserAndAccount(arg string) (string, string, error) { atpos := strings.LastIndex(arg, "@") if atpos < 1 { err := errors.New("Invalid account format; please specify <username>@<account>") return "", "", err } // pull off everything before the last '@' return arg[atpos+1:], arg[0:atpos], nil } func getAccountAndUserName(c *cli.Context) (string, string, error) { if len(c.Args()) > 0 { user, acct, err := splitUserAndAccount(c.Args()[0]) if err != nil { return "", "", err } return user, acct, nil } if c.String("credentials") != "" { user, acct, err := splitUserAndAccount(c.String("credentials")) if err != nil { return "", "", err } return user, acct, nil } else { return c.String("account"), c.String("username"), nil } } func parseUserAndAccount(c *cli.Context) (username string, account string, err error) { if (c.String("username") == "" || c.String("account") == "") && c.Bool("force") { err = errors.New("Must specify both username and account with force") return "", "", err } // if username OR account were specified, but not both, complain if (c.String("username") != "" && c.String("account") == "") || (c.String("username") == "" && c.String("account") != "") { if c.Bool("force") { err = errors.New("Must specify both username and account for force save") } else { err = errors.New("Must use force save when specifying username or account") } return "", "", err } // if username/account were specified, but force wasn't set, complain if c.String("username") != "" && c.String("account") != "" { if !c.Bool("force") { err = errors.New("Cannot specify username and/or account without force") return "", "", err } else { log.Print("WARNING: saving credentials without verifying username or account alias") username = c.String("username") account = c.String("account") } } return username, account, nil } func parseEnvironmentArgs(c *cli.Context) (map[string]string, error) { if len(c.StringSlice("env")) == 0 { return nil, nil } envMap := make(map[string]string) for _, arg := range c.StringSlice("env") { match, err := regexp.Match(ENV_PATTERN, []byte(arg)) if err != nil { return nil, err } if !match { log.Print("WARNING: Skipping env argument " + arg + " -- not in NAME=value format") continue } parts := strings.SplitN(arg, "=", 2) envMap[parts[0]] = parts[1] } return envMap, nil } func readSSHPubkeyFile(filename string) (pubkey ssh.PublicKey, err error) { pubkeyString, err := ioutil.ReadFile(filename) if err != nil { return nil, err } pubkey, _, _, _, err = ssh.ParseAuthorizedKey([]byte(pubkeyString)) if err != nil { return nil, err } return pubkey, nil } func parseKeyArgs(c *cli.Context) (pubkeys []ssh.PublicKey, err error) { // no args, so just use the default if len(c.StringSlice("key")) == 0 { pubkey, err := readSSHPubkeyFile(filepath.Join(os.Getenv("HOME"), "/.ssh/id_rsa.pub")) if err != nil { return nil, err } pubkeys = append(pubkeys, pubkey) return pubkeys, nil } for _, arg := range c.StringSlice("key") { pubkey, err := readSSHPubkeyFile(arg) if err != nil { return nil, err } pubkeys = append(pubkeys, pubkey) } return pubkeys, nil } // parseLifetimeArgs attempts to be a little clever in determining what credential // lifetime you've chosen. It returns a number of hours and an error. It assumes that // the argument was passed in as hours. func parseLifetimeArgs(c *cli.Context) (lifetime int, err error) { // the default is zero, which is our default if c.Int("lifetime") < 0 { return 0, nil } return c.Int("lifetime"), nil } func parseRepoArgs(c *cli.Context) (repo string, err error) { // the default is 'local' which is set below, so not much to do here if c.String("repo") == "local" { repo = path.Join(getRootPath(), "local") } else { repo = c.String("repo") } return repo, nil } func parseSaveArgs(c *cli.Context) (cred Credential, username, account string, pubkeys []ssh.PublicKey, lifetime int, repo string, err error) { pubkeys, err = parseKeyArgs(c) if err != nil { return Credential{}, "", "", nil, 0, "", err } username, account, err = parseUserAndAccount(c) if err != nil { return Credential{}, "", "", nil, 0, "", err } envmap, err := parseEnvironmentArgs(c) if err != nil { return Credential{}, "", "", nil, 0, "", err } lifetime, err = parseLifetimeArgs(c) if err != nil { return Credential{}, "", "", nil, 0, "", err } repo, err = parseRepoArgs(c) if err != nil { return Credential{}, "", "", nil, 0, "", err } AWSAccessKeyId := os.Getenv("AWS_ACCESS_KEY_ID") AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") if AWSAccessKeyId == "" || AWSSecretAccessKey == "" { err := errors.New("Can't save, no credentials in the environment") if err != nil { return Credential{}, "", "", nil, 0, "", err } } cred = Credential{ KeyId: AWSAccessKeyId, SecretKey: AWSSecretAccessKey, EnvVars: envmap, } return cred, username, account, pubkeys, lifetime, repo, nil } func main() { app := cli.NewApp() app.Name = "credulous" app.Usage = "Secure AWS Credential Management" app.Version = "0.2.2" app.Commands = []cli.Command{ { Name: "save", Usage: "Save AWS credentials", Flags: []cli.Flag{ cli.StringSliceFlag{ Name: "key, k", Value: &cli.StringSlice{}, Usage: "\n SSH public keys for encryption", }, cli.StringSliceFlag{ Name: "env, e", Value: &cli.StringSlice{}, Usage: "\n Environment variables to set in the form VAR=value", }, cli.IntFlag{ Name: "lifetime, l", Value: 0, Usage: "\n Credential lifetime in seconds (0 means forever)", }, cli.BoolFlag{ Name: "force, f", Usage: "\n Force saving without validating username or account." + "\n You MUST specify -u username -a account", }, cli.StringFlag{ Name: "username, u", Value: "", Usage: "\n Username (for use with '--force')", }, cli.StringFlag{ Name: "account, a", Value: "", Usage: "\n Account alias (for use with '--force')", }, cli.StringFlag{ Name: "repo, r", Value: "local", Usage: "\n Repository location ('local' by default)", }, }, Action: func(c *cli.Context) { cred, username, account, pubkeys, lifetime, repo, err := parseSaveArgs(c) panic_the_err(err) err = SaveCredentials(SaveData{ cred: cred, username: username, alias: account, pubkeys: pubkeys, lifetime: lifetime, force: c.Bool("force"), repo: repo, }) panic_the_err(err) }, }, { Name: "source", Usage: "Source AWS credentials", Flags: []cli.Flag{ cli.StringFlag{ Name: "account, a", Value: "", Usage: "\n AWS Account alias or id", }, cli.StringFlag{ Name: "key, k", Value: "", Usage: "\n SSH private key", }, cli.StringFlag{ Name: "username, u", Value: "", Usage: "\n IAM User", }, cli.StringFlag{ Name: "credentials, c", Value: "", Usage: "\n Credentials, for example username@account", }, cli.BoolFlag{ Name: "force, f", Usage: "\n Force sourcing of credentials without validating username or account", }, cli.StringFlag{ Name: "repo, r", Value: "local", Usage: "\n Repository location ('local' by default)", }, }, Action: func(c *cli.Context) { keyfile := getPrivateKey(c) account, username, err := getAccountAndUserName(c) if err != nil { panic_the_err(err) } repo, err := parseRepoArgs(c) if err != nil { panic_the_err(err) } creds, err := RetrieveCredentials(repo, account, username, keyfile) if err != nil { panic_the_err(err) } if !c.Bool("force") { err = creds.ValidateCredentials(account, username) if err != nil { panic_the_err(err) } } creds.Display(os.Stdout) }, }, { Name: "current", Usage: "Show the username and alias of the currently-loaded credentials", Action: func(c *cli.Context) { AWSAccessKeyId := os.Getenv("AWS_ACCESS_KEY_ID") AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") if AWSAccessKeyId == "" || AWSSecretAccessKey == "" { err := errors.New("No amazon credentials are currently in your environment") panic_the_err(err) } cred := Credential{ KeyId: AWSAccessKeyId, SecretKey: AWSSecretAccessKey, } username, alias, err := getAWSUsernameAndAlias(cred) if err != nil { panic_the_err(err) } fmt.Printf("%s@%s\n", username, alias) }, }, { Name: "display", Usage: "Display loaded AWS credentials", Action: func(c *cli.Context) { AWSAccessKeyId := os.Getenv("AWS_ACCESS_KEY_ID") AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") fmt.Printf("AWS_ACCESS_KEY_ID: %s\n", AWSAccessKeyId) fmt.Printf("AWS_SECRET_ACCESS_KEY: %s\n", AWSSecretAccessKey) }, }, { Name: "list", Usage: "List available AWS credentials", Action: func(c *cli.Context) { rootDir, err := os.Open(getRootPath()) if err != nil { panic_the_err(err) } set, err := listAvailableCredentials(rootDir) if err != nil { panic_the_err(err) } for _, cred := range set { fmt.Println(cred) } }, }, { Name: "rotate", Usage: "Rotate current AWS credentials, deleting the oldest", Flags: []cli.Flag{ cli.IntFlag{ Name: "lifetime, l", Value: 0, Usage: "\n New credential lifetime in seconds (0 means forever)", }, cli.StringSliceFlag{ Name: "key, k", Value: &cli.StringSlice{}, Usage: "\n SSH public keys for encryption", }, cli.StringSliceFlag{ Name: "env, e", Value: &cli.StringSlice{}, Usage: "\n Environment variables to set in the form VAR=value", }, cli.StringFlag{ Name: "repo, r", Value: "local", Usage: "\n Repository location ('local' by default)", }, }, Action: func(c *cli.Context) { cred, _, _, pubkeys, lifetime, repo, err := parseSaveArgs(c) panic_the_err(err) username, account, err := getAWSUsernameAndAlias(cred) panic_the_err(err) err = (&cred).rotateCredentials(username) panic_the_err(err) err = SaveCredentials(SaveData{ cred: cred, username: username, alias: account, pubkeys: pubkeys, lifetime: lifetime, force: c.Bool("force"), repo: repo, }) panic_the_err(err) }, }, } app.Run(os.Args) } func rotate(cred Credential) (err error) { return nil }
[ "\"HOME\"", "\"HOME\"", "\"AWS_ACCESS_KEY_ID\"", "\"AWS_SECRET_ACCESS_KEY\"", "\"AWS_ACCESS_KEY_ID\"", "\"AWS_SECRET_ACCESS_KEY\"", "\"AWS_ACCESS_KEY_ID\"", "\"AWS_SECRET_ACCESS_KEY\"" ]
[]
[ "HOME", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY" ]
[]
["HOME", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
go
3
0
utils/common.py
import os import random from os import mkdir from os.path import exists from shutil import rmtree from tarfile import open as tar_open from typing import List import dgl import numpy as np import torch from tqdm.auto import tqdm SOS = '<SOS>' EOS = '<EOS>' PAD = '<PAD>' UNK = '<UNK>' NAN = 'NAN' METHOD_NAME = 'METHOD_NAME' SELF = '<SELF>' def get_device() -> torch.device: # CUDA for PyTorch use_cuda = torch.cuda.is_available() device = torch.device('cuda:0' if use_cuda else 'cpu') return device def fix_seed(seed: int = 7) -> None: os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) dgl.random.seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def extract_tar_gz(tar_path: str, extract_path: str) -> None: def tqdm_progress(members): extract_progress_bar = tqdm(total=len(list(members.getnames()))) for member in members: extract_progress_bar.update() yield member extract_progress_bar.close() with tar_open(tar_path, 'r:gz') as tarball: tarball.extractall(extract_path, members=tqdm_progress(tarball)) def create_folder(path: str, is_clean: bool = True) -> None: if is_clean and exists(path): rmtree(path) if not exists(path): mkdir(path) def segment_sizes_to_slices(sizes: List) -> List: cum_sums = np.cumsum(sizes) start_of_segments = np.append([0], cum_sums[:-1]) return [slice(start, end) for start, end in zip(start_of_segments, cum_sums)] def is_step_match(current_step: int, template: int, ignore_zero: bool = True) -> bool: match_template = template != -1 and current_step % template == 0 if ignore_zero: return match_template and current_step != 0 return match_template
[]
[]
[ "PYTHONHASHSEED" ]
[]
["PYTHONHASHSEED"]
python
1
0
habitat/core/utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json from typing import List import numpy as np import quaternion from habitat.utils.geometry_utils import quaternion_to_list # Internals from inner json library needed for patching functionality in # DatasetFloatJSONEncoder. try: from _json import encode_basestring_ascii except ImportError: encode_basestring_ascii = None try: from _json import encode_basestring except ImportError: encode_basestring = None def tile_images(images: List[np.ndarray]) -> np.ndarray: r"""Tile multiple images into single image Args: images: list of images where each image has dimension (height x width x channels) Returns: tiled image (new_height x width x channels) """ assert len(images) > 0, "empty list of images" np_images = np.asarray(images) n_images, height, width, n_channels = np_images.shape new_height = int(np.ceil(np.sqrt(n_images))) new_width = int(np.ceil(float(n_images) / new_height)) # pad with empty images to complete the rectangle np_images = np.array( images + [images[0] * 0 for _ in range(n_images, new_height * new_width)] ) # img_HWhwc out_image = np_images.reshape( new_height, new_width, height, width, n_channels ) # img_HhWwc out_image = out_image.transpose(0, 2, 1, 3, 4) # img_Hh_Ww_c out_image = out_image.reshape( new_height * height, new_width * width, n_channels ) return out_image def not_none_validator(self, attribute, value): if value is None: raise ValueError(f"Argument '{attribute.name}' must be set") def try_cv2_import(): r"""The PyRobot python3 version which is a dependency of Habitat-PyRobot integration relies on ROS running in python2.7. In order to import cv2 in python3 we need to remove the python2.7 path from sys.path. To use the Habitat-PyRobot integration the user needs to export environment variable ROS_PATH which will look something like: /opt/ros/kinetic/lib/python2.7/dist-packages """ import sys import os ros_path = os.environ.get("ROS_PATH") if ros_path is not None and ros_path in sys.path: sys.path.remove(ros_path) import cv2 sys.path.append(ros_path) else: import cv2 return cv2 class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__( *args, **kwargs ) return cls._instances[cls] def center_crop(obs, new_shape): top_left = ( (obs.shape[0] // 2) - (new_shape[0] // 2), (obs.shape[1] // 2) - (new_shape[1] // 2), ) bottom_right = ( (obs.shape[0] // 2) + (new_shape[0] // 2), (obs.shape[1] // 2) + (new_shape[1] // 2), ) obs = obs[top_left[0] : bottom_right[0], top_left[1] : bottom_right[1], :] return obs class DatasetFloatJSONEncoder(json.JSONEncoder): r"""JSON Encoder that sets a float precision for a space saving purpose and encodes ndarray and quaternion. The encoder is compatible with JSON version 2.0.9. """ def default(self, object): # JSON doesn't support numpy ndarray and quaternion if isinstance(object, np.ndarray): return object.tolist() if isinstance(object, np.quaternion): return quaternion_to_list(object) quaternion return object.__dict__ # Overriding method to inject own `_repr` function for floats with needed # precision. def iterencode(self, o, _one_shot=False): if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring def floatstr( o, allow_nan=self.allow_nan, _repr=lambda x: format(x, ".5f"), _inf=float("inf"), _neginf=-float("inf"), ): if o != o: text = "NaN" elif o == _inf: text = "Infinity" elif o == _neginf: text = "-Infinity" else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o) ) return text _iterencode = json.encoder._make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot, ) return _iterencode(o, 0)
[]
[]
[ "ROS_PATH" ]
[]
["ROS_PATH"]
python
1
0
providers/github/github_test.go
package github_test import ( "fmt" "os" "testing" "github.com/AchievementNetwork/goth" "github.com/AchievementNetwork/goth/providers/github" "github.com/stretchr/testify/assert" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) provider := githubProvider() a.Equal(provider.ClientKey, os.Getenv("GITHUB_KEY")) a.Equal(provider.Secret, os.Getenv("GITHUB_SECRET")) a.Equal(provider.CallbackURL, "/foo") } func Test_NewCustomisedURL(t *testing.T) { t.Parallel() a := assert.New(t) p := urlCustomisedURLProvider() session, err := p.BeginAuth("test_state") s := session.(*github.Session) a.NoError(err) a.Contains(s.AuthURL, "http://authURL") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), githubProvider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) provider := githubProvider() session, err := provider.BeginAuth("test_state") s := session.(*github.Session) a.NoError(err) a.Contains(s.AuthURL, "github.com/login/oauth/authorize") a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", os.Getenv("GITHUB_KEY"))) a.Contains(s.AuthURL, "state=test_state") a.Contains(s.AuthURL, "scope=user") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) provider := githubProvider() s, err := provider.UnmarshalSession(`{"AuthURL":"http://github.com/auth_url","AccessToken":"1234567890"}`) a.NoError(err) session := s.(*github.Session) a.Equal(session.AuthURL, "http://github.com/auth_url") a.Equal(session.AccessToken, "1234567890") } func githubProvider() *github.Provider { return github.New(os.Getenv("GITHUB_KEY"), os.Getenv("GITHUB_SECRET"), "/foo", "user") } func urlCustomisedURLProvider() *github.Provider { return github.NewCustomisedURL(os.Getenv("GITHUB_KEY"), os.Getenv("GITHUB_SECRET"), "/foo", "http://authURL", "http://tokenURL", "http://profileURL", "http://emailURL") }
[ "\"GITHUB_KEY\"", "\"GITHUB_SECRET\"", "\"GITHUB_KEY\"", "\"GITHUB_KEY\"", "\"GITHUB_SECRET\"", "\"GITHUB_KEY\"", "\"GITHUB_SECRET\"" ]
[]
[ "GITHUB_KEY", "GITHUB_SECRET" ]
[]
["GITHUB_KEY", "GITHUB_SECRET"]
go
2
0
main.go
package main import ( "flag" "fmt" "log" "net/http" "os" ) func main() { var ( flagSet = flag.NewFlagSet("sink", flag.ExitOnError) ip = flagSet.String("ip", os.Getenv("DOCKER_IP"), "local ipv4 to report for `/latest/meta-data/local-ipv4`") port = flagSet.Int("port", 8080, "port to listen to") ) if err := flagSet.Parse(os.Args[1:]); err != nil { log.Fatal(err) } log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Connection", "Close") fmt.Fprintf(w, "%s", *ip) }))) }
[ "\"DOCKER_IP\"" ]
[]
[ "DOCKER_IP" ]
[]
["DOCKER_IP"]
go
1
0
algorithms/144.BinaryTreePreorderTraversal/btree-144.go
package main import "fmt" type TreeNode struct { Val int Left *TreeNode Right *TreeNode } /** 递归思想 先输出当前节点,再输出左节点,最后右节点 */ //func preorderTraversal(root *TreeNode) []int { // result := make([]int,0,1) // // if root == nil { // return result // } // // result = append(result,root.Val) // if root.Left != nil { // result = append(result,preorderTraversal(root.Left)...) // } // // // if root.Right != nil { // result = append(result,preorderTraversal(root.Right)...) // } // // return result //} /** 栈实现 递归的本质就是压栈 */ func preorderTraversal(root *TreeNode) []int { result := make([]int, 0, 0) stack := &Stack{ Data: make([]*TreeNode, 0, 1), Size: 0, } stack.Push(root) node := stack.Pop() for node != nil { result = append(result, node.Val) if node.Right != nil { stack.Push(node.Right) } if node.Left != nil { stack.Push(node.Left) } node = stack.Pop() } return result } type Stack struct { Data []*TreeNode Size int } func (s *Stack) Push(val *TreeNode) bool { s.Data = append(s.Data, val) s.Size++ return true } func (s *Stack) Pop() *TreeNode { if s.Size == 0 { return nil } s.Size-- ret := s.Data[s.Size] s.Data = s.Data[:s.Size] return ret } func main() { data := &TreeNode{ Val: 3, Left: &TreeNode{ Val: 1, Left: nil, Right: nil, }, Right: &TreeNode{ Val: 2, Left: nil, Right: nil, }, } d := preorderTraversal(data) fmt.Println(d) }
[]
[]
[]
[]
[]
go
null
null
null
recognition/partial_fc/mxnet/evaluation/ijb.py
import argparse import os import pickle import timeit import warnings from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import mxnet as mx import numpy as np import pandas as pd import sklearn from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap from mxnet.gluon.data import Dataset, DataLoader from prettytable import PrettyTable from skimage import transform as trans from sklearn import preprocessing from sklearn.metrics import roc_curve, auc from tqdm import tqdm matplotlib.use('Agg') warnings.filterwarnings("ignore") parser = argparse.ArgumentParser(description='do ijb test') # general parser.add_argument('--model-prefix', default='', help='path to load model.') parser.add_argument('--model-epoch', default=1, type=int, help='') parser.add_argument('--image-path', default='', type=str, help='') parser.add_argument('--result-dir', default='.', type=str, help='') parser.add_argument('--gpu', default='0', type=str, help='gpu id') parser.add_argument('--batch-size', default=128, type=int, help='') parser.add_argument('--job', default='insightface', type=str, help='job name') parser.add_argument('-es', '--emb-size', type=int, help='embedding size') parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu target = args.target model_path = args.model_prefix image_path = args.image_path result_dir = args.result_dir epoch = args.model_epoch use_norm_score = True # if Ture, TestMode(N1) use_detector_score = True # if Ture, TestMode(D1) use_flip_test = True # if Ture, TestMode(F1) job = args.job batch_size = args.batch_size class DatasetIJB(Dataset): def __init__(self, root, lines, align=True): self.src = np.array( [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041]], dtype=np.float32) self.src[:, 0] += 8.0 self.lines = lines self.img_root = root self.align = align def __len__(self): return len(self.lines) def __getitem__(self, idx): each_line = self.lines[idx] name_lmk_score = each_line.strip().split(' ') # "name lmk score" img_name = os.path.join(self.img_root, name_lmk_score[0]) img = cv2.imread(img_name) if self.align: landmark = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32) landmark = landmark.reshape((5, 2)) # assert landmark.shape[0] == 68 or landmark.shape[0] == 5 assert landmark.shape[1] == 2 if landmark.shape[0] == 68: landmark5 = np.zeros((5, 2), dtype=np.float32) landmark5[0] = (landmark[36] + landmark[39]) / 2 landmark5[1] = (landmark[42] + landmark[45]) / 2 landmark5[2] = landmark[30] landmark5[3] = landmark[48] landmark5[4] = landmark[54] else: landmark5 = landmark # tform = trans.SimilarityTransform() tform.estimate(landmark5, self.src) # M = tform.params[0:2, :] img = cv2.warpAffine(img, M, (112, 112), borderValue=0.0) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_flip = np.fliplr(img) img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB img_flip = np.transpose(img_flip, (2, 0, 1)) input_blob = np.zeros((2, 3, 112, 112), dtype=np.uint8) input_blob[0] = img input_blob[1] = img_flip return mx.nd.array(input_blob) def extract_parallel(prefix, epoch, dataset, batch_size, size): # init model_list = list() num_ctx = len(os.environ['CUDA_VISIBLE_DEVICES'].split(",")) num_iter = 0 feat_mat = mx.nd.zeros(shape=(len(dataset), 2 * size)) def batchify_fn(data): return mx.nd.concat(*data, dim=0) data_loader = DataLoader(dataset, batch_size, last_batch='keep', num_workers=8, thread_pool=True, prefetch=16, batchify_fn=batchify_fn) symbol, arg_params, aux_params = mx.module.module.load_checkpoint( prefix, epoch) all_layers = symbol.get_internals() symbol = all_layers['fc1_output'] # init model list for i in range(num_ctx): model = mx.mod.Module(symbol, context=mx.gpu(i), label_names=None) model.bind(for_training=False, data_shapes=[('data', (2 * batch_size, 3, 112, 112))]) model.set_params(arg_params, aux_params) model_list.append(model) # extract parallel and async num_model = len(model_list) for image in tqdm(data_loader): data_batch = mx.io.DataBatch(data=(image, )) model_list[num_iter % num_model].forward(data_batch, is_train=False) feat = model_list[num_iter % num_model].get_outputs(merge_multi_context=True)[0] feat = mx.nd.L2Normalization(feat) feat = mx.nd.reshape(feat, (-1, size * 2)) feat_mat[batch_size * num_iter:batch_size * num_iter + feat.shape[0], :] = feat.as_in_context(mx.cpu()) num_iter += 1 #if num_iter % 20 == 0: # mx.nd.waitall() return feat_mat.asnumpy() # 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[] def divideIntoNstrand(listTemp, n): twoList = [[] for i in range(n)] for i, e in enumerate(listTemp): twoList[i % n].append(e) return twoList def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values templates = ijb_meta[:, 1].astype(np.int) medias = ijb_meta[:, 2].astype(np.int) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values t1 = pairs[:, 0].astype(np.int) t2 = pairs[:, 1].astype(np.int) label = pairs[:, 2].astype(np.int) return t1, t2, label def read_image_feature(path): with open(path, 'rb') as fid: img_feats = pickle.load(fid) return img_feats def image2template_feature(img_feats=None, templates=None, medias=None): # ========================================================== # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim] # 2. compute media feature. # 3. compute template feature. # ========================================================== unique_templates = np.unique(templates) template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) for count_template, uqt in enumerate(unique_templates): (ind_t, ) = np.where(templates == uqt) face_norm_feats = img_feats[ind_t] face_medias = medias[ind_t] unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True) media_norm_feats = [] for u, ct in zip(unique_medias, unique_media_counts): (ind_m, ) = np.where(face_medias == u) if ct == 1: media_norm_feats += [face_norm_feats[ind_m]] else: # image features from the same video will be aggregated into one feature media_norm_feats += [ np.mean(face_norm_feats[ind_m], axis=0, keepdims=True) ] media_norm_feats = np.array(media_norm_feats) # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True)) template_feats[count_template] = np.sum(media_norm_feats, axis=0) if count_template % 2000 == 0: print('Finish Calculating {} template features.'.format( count_template)) # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True)) template_norm_feats = sklearn.preprocessing.normalize(template_feats) # print(template_norm_feats.shape) return template_norm_feats, unique_templates # In[ ]: def verification(template_norm_feats=None, unique_templates=None, p1=None, p2=None): # ========================================================== # Compute set-to-set Similarity Score. # ========================================================== template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) for count_template, uqt in enumerate(unique_templates): template2id[uqt] = count_template score = np.zeros((len(p1), )) # save cosine distance between pairs total_pairs = np.array(range(len(p1))) batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation sublists = [ total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) ] total_sublists = len(sublists) for c, s in enumerate(sublists): feat1 = template_norm_feats[template2id[p1[s]]] feat2 = template_norm_feats[template2id[p2[s]]] similarity_score = np.sum(feat1 * feat2, -1) score[s] = similarity_score.flatten() if c % 10 == 0: print('Finish {}/{} pairs.'.format(c, total_sublists)) return score # In[ ]: def verification2(template_norm_feats=None, unique_templates=None, p1=None, p2=None): template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) for count_template, uqt in enumerate(unique_templates): template2id[uqt] = count_template score = np.zeros((len(p1), )) # save cosine distance between pairs total_pairs = np.array(range(len(p1))) batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation sublists = [ total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) ] total_sublists = len(sublists) for c, s in enumerate(sublists): feat1 = template_norm_feats[template2id[p1[s]]] feat2 = template_norm_feats[template2id[p2[s]]] similarity_score = np.sum(feat1 * feat2, -1) score[s] = similarity_score.flatten() if c % 10 == 0: print('Finish {}/{} pairs.'.format(c, total_sublists)) return score def read_score(path): with open(path, 'rb') as fid: img_feats = pickle.load(fid) return img_feats # # Step1: Load Meta Data assert target == 'IJBC' or target == 'IJBB' # ============================================================= # load image and template relationships for template feature embedding # tid --> template id, mid --> media id # format: # image_name tid mid # ============================================================= start = timeit.default_timer() templates, medias = read_template_media_list( os.path.join('%s/meta' % image_path, '%s_face_tid_mid.txt' % target.lower())) stop = timeit.default_timer() print('Time: %.2f s. ' % (stop - start)) # ============================================================= # load template pairs for template-to-template verification # tid : template id, label : 1/0 # format: # tid_1 tid_2 label # ============================================================= start = timeit.default_timer() p1, p2, label = read_template_pair_list( os.path.join('%s/meta' % image_path, '%s_template_pair_label.txt' % target.lower())) stop = timeit.default_timer() print('Time: %.2f s. ' % (stop - start)) # # Step 2: Get Image Features # ============================================================= # load image features # format: # img_feats: [image_num x feats_dim] (227630, 512) # ============================================================= start = timeit.default_timer() img_path = '%s/loose_crop' % image_path img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower()) img_list = open(img_list_path) files = img_list.readlines() dataset = DatasetIJB(root=img_path, lines=files, align=True) img_feats = extract_parallel(args.model_prefix, args.model_epoch, dataset, args.batch_size, size=args.emb_size) faceness_scores = [] for each_line in files: name_lmk_score = each_line.split() faceness_scores.append(name_lmk_score[-1]) faceness_scores = np.array(faceness_scores).astype(np.float32) stop = timeit.default_timer() print('Time: %.2f s. ' % (stop - start)) print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], img_feats.shape[1])) # # Step3: Get Template Features # In[ ]: # ============================================================= # compute template features from image features. # ============================================================= start = timeit.default_timer() # ========================================================== # Norm feature before aggregation into template feature? # Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face). # ========================================================== # 1. FaceScore (Feature Norm) # 2. FaceScore (Detector) if use_flip_test: # concat --- F1 # img_input_feats = img_feats # add --- F2 img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] + img_feats[:, img_feats.shape[1] // 2:] else: img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] if use_norm_score: img_input_feats = img_input_feats else: # normalise features to remove norm information img_input_feats = img_input_feats / np.sqrt( np.sum(img_input_feats**2, -1, keepdims=True)) if use_detector_score: print(img_input_feats.shape, faceness_scores.shape) # img_input_feats = img_input_feats * np.matlib.repmat(faceness_scores[:,np.newaxis], 1, img_input_feats.shape[1]) img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] else: img_input_feats = img_input_feats template_norm_feats, unique_templates = image2template_feature( img_input_feats, templates, medias) stop = timeit.default_timer() print('Time: %.2f s. ' % (stop - start)) # # Step 4: Get Template Similarity Scores # In[ ]: # ============================================================= # compute verification scores between template pairs. # ============================================================= start = timeit.default_timer() score = verification(template_norm_feats, unique_templates, p1, p2) stop = timeit.default_timer() print('Time: %.2f s. ' % (stop - start)) # In[ ]: save_path = result_dir + '/%s_result' % target if not os.path.exists(save_path): os.makedirs(save_path) score_save_file = os.path.join(save_path, "%s.npy" % job) np.save(score_save_file, score) # # Step 5: Get ROC Curves and TPR@FPR Table # In[ ]: files = [score_save_file] methods = [] scores = [] for file in files: methods.append(Path(file).stem) scores.append(np.load(file)) methods = np.array(methods) scores = dict(zip(methods, scores)) colours = dict( zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) # x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)] x_labels = [10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1] tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) fig = plt.figure() for method in methods: fpr, tpr, _ = roc_curve(label, scores[method]) roc_auc = auc(fpr, tpr) fpr = np.flipud(fpr) tpr = np.flipud(tpr) # select largest tpr at same fpr plt.plot(fpr, tpr, color=colours[method], lw=1, label=('[%s (AUC = %0.4f %%)]' % (method.split('-')[-1], roc_auc * 100))) tpr_fpr_row = [] tpr_fpr_row.append("%s-%s" % (method, target)) for fpr_iter in np.arange(len(x_labels)): _, min_index = min( list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) # tpr_fpr_row.append('%.4f' % tpr[min_index]) tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) tpr_fpr_table.add_row(tpr_fpr_row) plt.xlim([10**-6, 0.1]) plt.ylim([0.3, 1.0]) plt.grid(linestyle='--', linewidth=1) plt.xticks(x_labels) plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) plt.xscale('log') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC on IJB') plt.legend(loc="lower right") # plt.show() fig.savefig(os.path.join(save_path, '%s.pdf' % job)) print(tpr_fpr_table)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
example_app/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_app.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main()
[]
[]
[]
[]
[]
python
0
0
cmd/stub-broker/main.go
/* * Copyright 2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "bytes" "fmt" "io/ioutil" "net/http" "os" "strings" "github.com/go-martini/martini" ) var ( // TODO make thread safe subscriptions = make(map[string]map[string]struct{}) exists = struct{}{} broker = os.Getenv("BROKER_NAME") forwardHeaders = []string{ "content-type", "x-request-id", "x-b3-traceid", "x-b3-spanid", "x-b3-parentspanid", "x-b3-sampled", "x-b3-flags", "x-ot-span-context", } ) func splitStreamName(host string) string { chunks := strings.Split(host, ".") stream := chunks[0] return stream } func main() { m := martini.Classic() m.Post("/", func(req *http.Request, res http.ResponseWriter) { host := req.Host fmt.Printf("Recieved request for %s\n", host) stream := splitStreamName(host) subscribers, ok := subscriptions[stream] if !ok { res.WriteHeader(http.StatusNotFound) return } body, err := ioutil.ReadAll(req.Body) if err != nil { res.WriteHeader(http.StatusInternalServerError) return } res.WriteHeader(http.StatusAccepted) go func() { fmt.Printf("Making subscribed requests for %s\n", stream) // make upstream requests client := &http.Client{} for subscribed := range subscribers { go func(subscribed string) { fmt.Printf("Making subscribed request to %s for %s\n", subscribed, stream) url := fmt.Sprintf("http://%s/", subscribed) request, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body)) if err != nil { fmt.Printf("Unable to create subscriber request %v", err) } request.Header.Set("x-broker", broker) request.Header.Set("x-stream", stream) for _, header := range forwardHeaders { if value := req.Header.Get(header); value != "" { request.Header.Set(header, value) } } _, err = client.Do(request) if err != nil { fmt.Printf("Unable to complete subscriber request %v", err) } }(subscribed) } }() }) m.Group("/streams/:stream", func(r martini.Router) { r.Put("", func(params martini.Params, res http.ResponseWriter) { stream := params["stream"] fmt.Printf("Create stream %s\n", stream) if _, ok := subscriptions[stream]; !ok { subscriptions[stream] = map[string]struct{}{} } res.WriteHeader(http.StatusAccepted) }) r.Delete("", func(params martini.Params, res http.ResponseWriter) { stream := params["stream"] fmt.Printf("Delete stream %s\n", stream) delete(subscriptions, stream) res.WriteHeader(http.StatusAccepted) }) r.Group("/subscriptions/:subscription", func(r martini.Router) { r.Put("", func(params martini.Params, res http.ResponseWriter) { stream := params["stream"] subscription := params["subscription"] subscribers, ok := subscriptions[stream] if !ok { res.WriteHeader(http.StatusNotFound) return } fmt.Printf("Create subscription %s for stream %s\n", subscription, stream) // TODO store subscription params subscribers[subscription] = exists res.WriteHeader(http.StatusAccepted) }) r.Delete("", func(params martini.Params, res http.ResponseWriter) { stream := params["stream"] subscription := params["subscription"] subscribers, ok := subscriptions[stream] if !ok { res.WriteHeader(http.StatusNotFound) return } fmt.Printf("Delete subscription %s for stream %s\n", subscription, stream) delete(subscribers, subscription) res.WriteHeader(http.StatusAccepted) }) }) }) m.Run() }
[ "\"BROKER_NAME\"" ]
[]
[ "BROKER_NAME" ]
[]
["BROKER_NAME"]
go
1
0
equipment/equipment_api_test.go
// +build integration // This file is generated by go-dnd5eapi/gen/api_gen.go. DO NOT EDIT THIS FILE. Generation parameters are: // Package Name: equipment // Endpoint: equipment // API Name: Equipment // LC API Name: equipment package equipment import ( json "github.com/json-iterator/go" "fmt" "log" "net/http" "os" "testing" "github.com/kjkondratuk/go-dnd5eapi/api" "github.com/stretchr/testify/assert" ) var ( APIBaseURL = os.Getenv("API_ROOT") Client = NewClient(api.NewBasicsProvider(&http.Client{}, APIBaseURL)) ) func TestMain(t *testing.M) { t.Run() } func TestEquipment_GetList_IT(t *testing.T) { resp, err := Client.GetList() r, _ := json.Marshal(resp) log.Print(fmt.Sprintf("Response: %s", r)) assert.Nil(t, err, "Should not receive an error contacting API.") assert.True(t, true, "Should complete successfully!") } func TestEquipment_QueryList_IT(t *testing.T) { query := make(map[string]string, 1) query["index"] = "abacus" resp, err := Client.QueryList(query) r, _ := json.Marshal(resp) log.Print(fmt.Sprintf("Response: %s", r)) assert.Nil(t, err, "Should not receive an error contacting API.") assert.True(t, true, "Should complete successfully!") } func TestEquipment_GetByIndex_IT(t *testing.T) { resp, err := Client.GetByIndex("abacus") r, _ := json.Marshal(resp) log.Print(fmt.Sprintf("Response: %s", r)) assert.Nil(t, err, "Should not receive an error contacting API.") assert.True(t, true, "Should complete successfully!") }
[ "\"API_ROOT\"" ]
[]
[ "API_ROOT" ]
[]
["API_ROOT"]
go
1
0
python/put-static-route.py
""" put-static-route. adds static route to XR parameter: * ODL IP address * Peer XR NETCONF node * prefix * length * next-hop uses HTTP PUT with JSON payload """ import sys import os import requests request_template = ''' { "vrf-prefix": [ { "prefix": "%s", "prefix-length": %s, "vrf-route": { "vrf-next-hop-table": { "vrf-next-hop-next-hop-address": [ { "next-hop-address": "%s" } ] } } } ] } ''' # check args length if (len(sys.argv) != 6): print "usage %s ODL_IP_address NETCONF-Node prefix length nexthop" % \ sys.argv[0] sys.exit(1) odl_user = os.environ.get('ODL_USER', 'admin') odl_pass = os.environ.get('ODL_PASS', 'admin') req_hdrs = { 'Content-Type' : 'application/json' } req_body = request_template % (sys.argv[3], sys.argv[4], sys.argv[5]) url = 'http://' + sys.argv[1] + ':8181' + \ '/restconf/config/network-topology:network-topology/topology' + \ '/topology-netconf/node/' + sys.argv[2] + '/yang-ext:mount' + \ '/Cisco-IOS-XR-ip-static-cfg:router-static/default-vrf' + \ '/address-family/vrfipv4/vrf-unicast/vrf-prefixes/vrf-prefix/' + \ sys.argv[3] + '/' + sys.argv[4] resp = requests.put(url, data=req_body, headers=req_hdrs, auth=(odl_user, odl_pass)) print resp
[]
[]
[ "ODL_PASS", "ODL_USER" ]
[]
["ODL_PASS", "ODL_USER"]
python
2
0
tests/svg_path_test.py
from picosvg.svg_transform import Affine2D from picosvg.svg_types import SVGPath from nanoemoji.svg_path import SVGPathPen, draw_svg_path import pytest class DummyGlyph: def draw(self, pen): pen.moveTo((0, 0)) pen.lineTo((0, 10)) pen.lineTo((10, 10)) pen.lineTo((10, 0)) pen.closePath() pen.moveTo((0, 15)) pen.curveTo((0, 20), (10, 20), (10, 15)) pen.closePath() pen.moveTo((0, -5)) pen.qCurveTo((0, -8), (3, -10), (7, -10), (10, -8), (10, -5)) pen.endPath() def test_addComponent_decompose(): pen = SVGPathPen(glyphSet={"a": DummyGlyph()}) pen.addComponent("a", Affine2D.identity()) assert pen.path.d == ( "M0,0 L0,10 L10,10 L10,0 Z " "M0,15 C0,20 10,20 10,15 Z " "M0,-5 Q0,-8 1.5,-9 Q3,-10 5,-10 Q7,-10 8.5,-9 Q10,-8 10,-5" ) def test_addComponent_decompose_with_transform(): pen = SVGPathPen(glyphSet={"a": DummyGlyph()}) pen.addComponent("a", Affine2D(2, 0, 0, 2, 0, 0)) assert pen.path.d == ( "M0,0 L0,20 L20,20 L20,0 Z " "M0,30 C0,40 20,40 20,30 Z " "M0,-10 Q0,-16 3,-18 Q6,-20 10,-20 Q14,-20 17,-18 Q20,-16 20,-10" ) def test_draw_onto_existing_path(): path = SVGPath(d="M0,0 L0,10 L10,10 L10,0 Z") pen = SVGPathPen(path=path) pen.moveTo((0, 15)) pen.lineTo((5, 20)) pen.lineTo((10, 15)) pen.closePath() assert path.d == "M0,0 L0,10 L10,10 L10,0 Z M0,15 L5,20 L10,15 Z" def test_addComponent_missing(): pen = SVGPathPen(glyphSet={"a": DummyGlyph()}) with pytest.raises(KeyError): pen.addComponent("b", Affine2D.identity()) @pytest.mark.parametrize( "d", [ "M0,0 L0,10 L10,10 L10,0 Z", "M0,0 L0,10 L10,10 L10,0", "M0,0 L0,10 L10,10 L10,0 Z M12,0 L12,10 L22,10 L22,0", "M0,0 L0,10 L10,10 L10,0 M12,0 L12,10 L22,10 L22,0 Z", "M0,0 C0,3 2,5 5,5 C8,5 10,3 10,0 C10,-3 8,-5 5,-5 C2,-5 0,-3 0,0 Z", "M0,0 Q0,10 10,10 Q20,10 20,0 Z", ], ) def test_roundtrip_path_with_pen(d): path = SVGPath(d=d) pen = SVGPathPen() draw_svg_path(path, pen) assert pen.path.d == d def test_draw_svg_close_subpaths(): path = SVGPath(d="M0,0 L0,10 L10,10 L10,0 M12,0 L12,10 L22,10 L22,0") pen = SVGPathPen() draw_svg_path(path, pen, close_subpaths=True) assert pen.path.d == "M0,0 L0,10 L10,10 L10,0 Z M12,0 L12,10 L22,10 L22,0 Z"
[]
[]
[]
[]
[]
python
null
null
null
macros/_vocola_main.py
# _vocola_main.py - NatLink support for Vocola # -*- coding: latin-1 -*- # # Contains: # - "Built-in" voice commands # - Autoloading of changed command files # # # Copyright (c) 2002-2012 by Rick Mohr. # # Portions Copyright (c) 2012-2013 by Hewlett-Packard Development Company, L.P. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # WARNING: This version of _vocola_main.py has been modified to work # with Quintijn's installer/version of NatLink and has a # number of unofficial changes/improvements. The code has # been organized to minimize the diff's with the official # version. # import string import sys import os # access to file information import os.path # to parse filenames import time # print time in messages from stat import * # file statistics import re import natlink from natlinkutils import * ########################################################################### # # # Configuration # # # ########################################################################### # The Vocola translator is a perl program. By default we use the precompiled # executable vcl2py.exe, which doesn't require installing perl. # To instead use perl and vcl2py.pl, set the following variable to 1: usePerl = 0 try: import natlinkstatus Quintijn_installer = True status = natlinkstatus.NatlinkStatus() VocolaEnabled = not not status.getVocolaUserDirectory() language = status.getLanguage() except ImportError: Quintijn_installer = False VocolaEnabled = True language = 'enx' # get location of MacroSystem folder: NatLinkFolder = os.path.split( sys.modules['natlinkmain'].__dict__['__file__'])[0] # (originally, natlinkmain lived in MacroSystem, not MacroSystem\core) NatLinkFolder = re.sub(r'\core$', "", NatLinkFolder) VocolaFolder = os.path.normpath(os.path.join(NatLinkFolder, '..', 'Vocola')) ExecFolder = os.path.normpath(os.path.join(NatLinkFolder, '..', 'Vocola', 'exec')) # C module "simpscrp" defines Exec(), which runs a program in a minimized # window and waits for completion. Since such modules need to be compiled # separately for each python version we need this careful import: pydFolder = os.path.normpath(os.path.join(NatLinkFolder, '..', 'Vocola', 'exec', sys.version[0:3])) ExtensionsFolder = os.path.normpath(os.path.join(NatLinkFolder, '..', 'Vocola', 'extensions')) NatLinkFolder = os.path.abspath(NatLinkFolder) if VocolaEnabled: sys.path.append(pydFolder) sys.path.append(ExecFolder) sys.path.append(ExtensionsFolder) def get_command_folder(): configured = None try: import natlinkstatus # Quintijn's's installer: configured = natlinkstatus.NatlinkStatus().getVocolaUserDirectory() except ImportError: try: import RegistryDict import win32con # Scott's installer: r = RegistryDict.RegistryDict(win32con.HKEY_CURRENT_USER, "Software\NatLink") if r: configured = r["VocolaUserDirectory"] except ImportError: pass if os.path.isdir(configured): return configured systemCommandFolder = os.path.join(VocolaFolder, 'Commands') if os.path.isdir(systemCommandFolder): return systemCommandFolder return None commandFolder = get_command_folder() if VocolaEnabled and not commandFolder: print >> sys.stderr, "Warning: no Vocola command folder found!" ## ## Quintijn's unofficial multiple language kludge: ## def copyVclFileLanguageVersion(Input, Output): """copy to another location, keeping the include files one directory above """ # let include lines to relative paths point to the folder above ..\ # so you can take the same include file for the alternate language. reInclude = re.compile(r'(include\s+)\w') Input = os.path.normpath(Input) Output = os.path.normpath(Output) input = open(Input, 'r').read() output = open(Output, 'w') output.write("# vocola file for alternate language: %s\n"% language) lines = map(string.strip, str(input).split('\n')) for line in lines: if reInclude.match(line): line = 'include ..\\' + line[8:] output.write(line + '\n') output.close() def copyToNewSubDirectory(trunk, subdirectory): for f in os.listdir(trunk): if f.endswith('.vcl'): copyVclFileLanguageVersion(os.path.join(trunk, f), os.path.join(subdirectory, f)) if VocolaEnabled and status.getVocolaTakesLanguages(): print '_vocola_main started with language: %s' % language if language != 'enx' and commandFolder: uDir = commandFolder uDir2 = os.path.join(uDir, language) if not os.path.isdir(uDir2): print 'creating userCommandFolder for language %s' % language os.mkdir(uDir2) copyToNewSubDirectory(uDir, uDir2) commandFolder = uDir2 ########################################################################### # # # The built-in commands # # # ########################################################################### class ThisGrammar(GrammarBase): gramSpec = """ <NatLinkWindow> exported = [Show] (NatLink|Vocola) Window; <edit> exported = Edit [Voice] Commands; <editGlobal> exported = Edit Global [Voice] Commands; <editMachine> exported = Edit Machine [Voice] Commands; <editGlobalMachine> exported = Edit Global Machine [Voice] Commands; <loadAll> exported = Load All [Voice] Commands; <loadCurrent> exported = Load [Voice] Commands; <loadGlobal> exported = Load Global [Voice] Commands; <loadExtensions> exported = Load [Voice] Extensions; <discardOld> exported = Discard Old [Voice] Commands; """ if language == 'nld': gramSpec = """ <NatLinkWindow> exported = Toon (NatLink|Vocola) venster; <edit> exported = (Eddit|Bewerk|Sjoo|Toon)(Commandoos|Commands) | (Eddit|Bewerk|Sjoo|Toon)(stem|vojs)(Commandoos|Commands); <editGlobal> exported = (Eddit|Bewerk|Sjoo|Toon) (Global|globale) [stem|vojs] (Commandoos|Commands); <editMachine> exported = (Eddit|Bewerk|Sjoo|Toon) Machine [stem|vojs] (Commandoos|Commands); <editGlobalMachine> exported = (Eddit|Bewerk|Sjoo|Toon) (Global|globale) Machine [stem|vojs] (Commandoos|Commands); <loadAll> exported = (Laad|Lood) alle [stem|vojs] (Commandoos|Commands); <loadCurrent> exported = (Laad|Lood) [stem|vojs] (Commandoos|Commands); <loadGlobal> exported = (Laad|Lood) globale [stem|vojs] (Commandoos|Commands); <loadExtensions> exported = Laad [stem] extensies; <discardOld> exported = (Discard|Verwijder) (oude|oold) [stem|vojs] (Commandoos|Commands); """ elif language == 'fra': gramSpec = """ <NatLinkWindow> exported = [Afficher] Fenetre (NatLink|Vocola); <edit> exported = Editer Commandes [Vocales]; <editGlobal> exported = Editer Commandes [Vocales] Globales; <editMachine> exported = Editer Commandes [Vocales] Machine; <editGlobalMachine> exported = Editer Commandes [Vocales] Globales Machine; <loadAll> exported = Charger Toutes Les Commandes [Vocales]; <loadCurrent> exported = Charger Commandes [Vocales]; <loadGlobal> exported = Charger Commandes [Vocales] Globales; <loadExtensions> exported = Charger Extensions [Vocales]; <discardOld> exported = Effacer Commandes [Vocales] Precedentes; """ elif language == 'deu': gramSpec = """ <NatLinkWindow> exported = [Zeige] (NatLink|Vocola) Fenster; <edit> exported = Bearbeite [Sprach] Befehle; <editGlobal> exported = Bearbeite globale [Sprach] Befehle; <editMachine> exported = Bearbeite Maschinen [Sprach] Befehle; <editGlobalMachine> exported = Bearbeite globale Maschinen [Sprach] Befehle; <loadAll> exported = Lade alle [Sprach] Befehle; <loadCurrent> exported = Lade [Sprach] Befehle; <loadGlobal> exported = Lade globale [Sprach] Befehle; <loadExtensions> exported = Lade [Sprach] Extensions; <discardOld> exported = Verwerfe alte [Sprach] Befehle; """ elif language == 'ita': gramSpec = """ <NatLinkWindow> exported = [Mostra] Finestra Di (NatLink|Vocola); <edit> exported = Modifica Comandi [Vocali]; <editGlobal> exported = Modifica Comandi [Vocali] Globali; <editMachine> exported = Modifica Comandi [Vocali] [del] Computer; <editGlobalMachine> exported = Modifica Comandi [Vocali] Globali [del] Computer; <loadAll> exported = Carica Tutti I Comandi [Vocali]; <loadCurrent> exported = Carica I Comandi [Vocali]; <loadGlobal> exported = Carica Comandi [Vocali] Gliobali; <loadExtensions> exported = Carica Estensioni [Vocali]; <discardOld> exported = Annulla Vecchi Comandi [Vocali]; """ elif language == 'esp': gramSpec = """ <NatLinkWindow> exported = [Mostrar] Ventana de (NatLink|Vocola) ; <edit> exported = (Modificar|Editar) Comandos [de voz]; <editGlobal> exported = (Modificar|Editar) Comandos [de voz] Globales ; <editMachine> exported = (Modificar|Editar) Comandos [de voz] de (este ordenador|la Computadora); <editGlobalMachine> exported = (Modificar|Editar) Comandos [de voz] Globales de (este ordenador|la Computadora); <loadAll> exported = (Recargar|Cargar) Todos Los Comandos [de voz]; <loadCurrent> exported = (Recargar|Cargar) Comandos [de voz]; <loadGlobal> exported = (Recargar|Cargar) Comandos [de voz] Globales; <loadExtensions> exported = (Recargar|Cargar) Extensiones [de voz]; <discardOld> exported = Descartar Comandos [de voz] Viejos; """ elif language != 'enx': print >> sys.stderr, """\n\n Vocola Warning: no language "%s" translations for the built-in Vocola commands (e.g., commands to load voice commands) are currently available; consider helping translate them -- inquire on http://www.speechcomputing.com. For now the English versions, like "Edit Commands" and "Edit Global Commands" are activated. """ % language def initialize(self): self.updateUnimacroHeaderIfNeeded() if os.environ.has_key('COMPUTERNAME'): self.machine = string.lower(os.environ['COMPUTERNAME']) else: self.machine = 'local' self.load_extensions() self.loadAllFiles(False) self.load(self.gramSpec) self.activateAll() def gotBegin(self,moduleInfo): self.currentModule = moduleInfo # delay enabling until now to avoid NatLink clobbering our callback: enable_callback() # Get app name by stripping folder and extension from currentModule name def getCurrentApplicationName(self): return string.lower(os.path.splitext(os.path.split(self.currentModule[0]) [1]) [0]) ### Miscellaneous commands # "Show NatLink Window" -- print to output window so it appears def gotResults_NatLinkWindow(self, words, fullResults): print "This is the NatLink/Vocola output window" # "Load Extensions" -- scan for new/changed extensions: def gotResults_loadExtensions(self, words, fullResults): self.load_extensions(True) for module in sys.modules.keys(): if module.startswith("vocola_ext_"): del sys.modules[module] def load_extensions(self, verbose=False): #if sys.modules.has_key("scan_extensions"): # del sys.modules["scan_extensions"] import scan_extensions arguments = ["scan_extensions", ExtensionsFolder] if verbose: arguments.insert(1, "-v") scan_extensions.main(arguments) ### Loading Vocola Commands # "Load All Commands" -- translate all Vocola files def gotResults_loadAll(self, words, fullResults): self.loadAllFiles(True) # "Load Commands" -- translate Vocola files for current application def gotResults_loadCurrent(self, words, fullResults): self.loadSpecificFiles(self.getCurrentApplicationName()) # "Load Global Commands" -- translate global Vocola files def gotResults_loadGlobal(self, words, fullResults): self.loadSpecificFiles('') # "Discard Old [Voice] Commands" -- purge output then translate all files def gotResults_discardOld(self, words, fullResults): purgeOutput() self.loadAllFiles(True) # Load all command files def loadAllFiles(self, force): if commandFolder: compile_Vocola(commandFolder, force) # Load command files for specific application def loadSpecificFiles(self, module): special = re.compile(r'([][()^$.+*?{\\])') pattern = "^" + special.sub(r'\\\1', module) pattern += "(_[^@]*)?(@" + special.sub(r'\\\1', self.machine) pattern += ")?\.vcl$" p = re.compile(pattern) targets = [] if commandFolder: targets += [os.path.join(commandFolder,f) for f in os.listdir(commandFolder) if p.search(f)] if len(targets) > 0: for target in targets: self.loadFile(target) else: print >> sys.stderr if module == "": print >> sys.stderr, "Found no Vocola global command files [for machine '" + self.machine + "']" else: print >> sys.stderr, "Found no Vocola command files for application '" + module + "' [for machine '" + self.machine + "']" # Load a specific command file, returning false if not present def loadFile(self, file): try: os.stat(file) compile_Vocola(file, False) return True except OSError: return False # file not found ### Editing Vocola Command Files # "Edit Commands" -- open command file for current application def gotResults_edit(self, words, fullResults): app = self.getCurrentApplicationName() file = app + '.vcl' comment = 'Voice commands for ' + app self.openCommandFile(file, comment) # "Edit Machine Commands" -- open command file for current app & machine def gotResults_editMachine(self, words, fullResults): app = self.getCurrentApplicationName() file = app + '@' + self.machine + '.vcl' comment = 'Voice commands for ' + app + ' on ' + self.machine self.openCommandFile(file, comment) # "Edit Global Commands" -- open global command file def gotResults_editGlobal(self, words, fullResults): file = '_vocola.vcl' comment = 'Global voice commands' self.openCommandFile(file, comment) # "Edit Global Machine Commands" -- open global command file for machine def gotResults_editGlobalMachine(self, words, fullResults): file = '_vocola@' + self.machine + '.vcl' comment = 'Global voice commands on ' + self.machine self.openCommandFile(file, comment) def FindExistingCommandFile(self, file): if commandFolder: f = commandFolder + '\\' + file if os.path.isfile(f): return f return "" # Open a Vocola command file (using the application associated with ".vcl") def openCommandFile(self, file, comment): if not commandFolder: print >> sys.stderr, "Error: Unable to create command file because no Vocola command folder found." return path = self.FindExistingCommandFile(file) if not path: path = commandFolder + '\\' + file new = open(path, 'w') new.write('# ' + comment + '\n\n') # insert include line to Unimacro.vch: if status.getVocolaTakesUnimacroActions(): if language == 'enx' or not status.getVocolaTakesLanguages(): includeLine = 'include Unimacro.vch;\n\n' else: includeLine = 'include ..\\Unimacro.vch;\n\n' new.write(includeLine) new.close() wantedPath = os.path.join(commandFolder, file) if path and path != wantedPath: # copy from other location if wantedPath.startswith(path) and len(wantedPath) - len(path) == 3: print 'copying enx version to language version %s'% language copyVclFileLanguageVersion(path, wantedPath) else: print 'copying from other location' self.copyVclFile(path, wantedPath) path = wantedPath # # NatLink/DNS bug causes os.startfile or wpi32api.ShellExecute # to crash DNS if allResults is on in *any* grammer (e.g., Unimacro) # # Accordingly, use AppBringUp instead: # #try: # os.startfile(path) #except WindowsError, e: # print # print "Unable to open voice command file with associated editor: " + str(e) # print "Trying to open it with notepad instead." # prog = os.path.join(os.getenv('WINDIR'), 'notepad.exe') # os.spawnv(os.P_NOWAIT, prog, [prog, path]) natlink.execScript("AppBringUp \"" + path + "\", \"" + path + "\"") def copyVclFile(self, Input, Output): """copy to another location """ # QH, febr, 5, 2008 Input = os.path.normpath(Input) Output = os.path.normpath(Output) input = open(Input, 'r').read() output = open(Output, 'w') output.write("# vocola file from a sample directory %s\n"% Input) lines = map(string.strip, str(input).split('\n')) for line in lines: output.write(line + '\n') output.close() def updateUnimacroHeaderIfNeeded(self): import shutil if not status.getVocolaTakesUnimacroActions(): return destDir = status.getVocolaUserDirectory() sourceDir = os.path.join(status.getUserDirectory(), 'vocola_compatibility') destPath = os.path.join(destDir, 'Unimacro.vch') sourcePath = os.path.join(sourceDir, 'Unimacro.vch') sourceTime, destTime = vocolaGetModTime(sourcePath), vocolaGetModTime(destPath) if not (sourceTime or destTime): print >> sys.stderr, """\n Error: The option "Vocola Takes Unimacro Actions" is switched on, but no file "Unimacro.vch" is found. Please fix the configuration of NatLink/Vocola/Unimacro and restart Dragon. Either ensure the source file is at: "%s", or switch off the option "Vocola Takes Unimacro Actions". """% sourceDir return if destTime < sourceTime: try: shutil.copyfile(sourcePath, destPath) except IOError: print >> sys.stderr, """\n Warning: Could not copy example "Unimacro.vch" to: "%s". There is a valid "Unimacro.vch" available, but a newer file is available at: "%s". Please fix the configuration of NatLink/Vocola/Unimacro and restart Dragon, if you want to use the updated version of this file."""% (destDir, sourceDir) else: print 'Succesfully copied "Unimacro.vch" from\n\t"%s" to\n\t"%s".'% (sourceDir, destDir) ########################################################################### # # # Compiling Vocola files # # # ########################################################################### may_have_compiled = False # has the compiler been called? compile_error = False # has a compiler error occurred? # Run Vocola compiler, converting command files from "inputFileOrFolder" # and writing output to NatLink/MacroSystem def compile_Vocola(inputFileOrFolder, force): global may_have_compiled, compiler_error may_have_compiled = True # below line currently needed because kludge changes the the folder: VocolaFolder = os.path.normpath(os.path.join(NatLinkFolder, '..', 'Vocola')) if usePerl: executable = "perl" arguments = [VocolaFolder + r'\exec\vcl2py.pl'] else: executable = VocolaFolder + r'\exec\vcl2py.exe' arguments = [] executable = sys.prefix + r'\python.exe' arguments = [VocolaFolder + r'\exec\vcl2py.py'] arguments += ['-extensions', ExtensionsFolder + r'\extensions.csv'] if language == "enx": arguments += ['-numbers', 'zero,one,two,three,four,five,six,seven,eight,nine'] arguments += ["-suffix", "_vcl" ] if force: arguments += ["-f"] arguments += [inputFileOrFolder, NatLinkFolder] hidden_call(executable, arguments) logName = commandFolder + r'\vcl2py_log.txt' if os.path.isfile(logName): try: log = open(logName, 'r') compiler_error = True print >> sys.stderr, log.read() log.close() os.remove(logName) except IOError: # no log file means no Vocola errors pass # Unload all commands, including those of files no longer existing def purgeOutput(): pattern = re.compile("_vcl\d*\.pyc?$") [os.remove(os.path.join(NatLinkFolder,f)) for f in os.listdir(NatLinkFolder) if pattern.search(f)] # # Run program with path executable and arguments arguments. Waits for # the program to finish. Runs the program in a hidden window. # def hidden_call(executable, arguments): args = [executable] + arguments try: # Using simpscrp is depreciated; remove '_disabled' below to use: import simpscrp_disabled args = ['"' + str(x) + '"' for x in args] call = ' '.join(args) simpscrp.Exec(call, 1) except ImportError: try: import subprocess si = subprocess.STARTUPINFO() # Location of below constants seems to vary from Python # version to version so hardcode them: si.dwFlags = 1 # subprocess.STARTF_USESHOWWINDOW si.wShowWindow = 0 # subprocess.SW_HIDE return subprocess.call(args, startupinfo=si) except ImportError: pid = os.spawnv(os.P_NOWAIT, executable, args) pid, exit_code = os.waitpid(pid, 0) exit_code = exit_code >> 8 return exit_code lastVocolaFileTime = 0 lastCommandFolderTime = 0 def compile_changed(): global lastVocolaFileTime, lastCommandFolderTime global compiler_error current = getLastVocolaFileModTime() if current > lastVocolaFileTime: compiler_error = False thisGrammar.loadAllFiles(False) if not compiler_error: lastVocolaFileTime = current #source_changed = False #if commandFolder: # if vocolaGetModTime(commandFolder) > lastCommandFolderTime: # lastCommandFolderTime = vocolaGetModTime(commandFolder) # source_changed = True #if source_changed: # deleteOrphanFiles() # Returns the newest modified time of any Vocola command folder file or # 0 if none: def getLastVocolaFileModTime(): last = 0 if commandFolder: last = max([last] + [vocolaGetModTime(os.path.join(commandFolder,f)) for f in os.listdir(commandFolder)]) return last # Returns the modification time of a file or 0 if the file does not exist: def vocolaGetModTime(file): try: return os.stat(file)[ST_MTIME] except OSError: return 0 # file not found def deleteOrphanFiles(): print "checking for orphans..." for f in os.listdir(NatLinkFolder): if not re.search("_vcl.pyc?$", f): continue s = getSourceFilename(f) if s: if vocolaGetModTime(s)>0: continue f = os.path.join(NatLinkFolder, f) print "Deleting: " + f os.remove(f) def getSourceFilename(output_filename): m = re.match("^(.*)_vcl.pyc?$", output_filename) if not m: return None # Not a Vocola file name = m.group(1) if not commandFolder: return None marker = "e_s_c_a_p_e_d__" m = re.match("^(.*)" + marker + "(.*)$", name) # rightmost marker! if m: name = m.group(1) tail = m.group(2) tail = re.sub("__a_t__", "@", tail) tail = re.sub("___", "_", tail) name += tail name = re.sub("_@", "@", name) return commandFolder + "\\" + name + ".vcl" lastNatLinkModTime = 0 # Check for changes to our output .py files and report status relative # to last time this routine was called; return code means: # 0: no changes # 1: 1 or more existing .py files were modified, but no new .py files created # 2: one or more new .py files may have been created, plus maybe existing changed def output_changes(): global lastNatLinkModTime, may_have_compiled old_may_have_compiled = may_have_compiled may_have_compiled = False current = vocolaGetModTime(NatLinkFolder) if current > lastNatLinkModTime: lastNatLinkModTime = current return 2 if old_may_have_compiled: return 1 else: return 0 # When speech is heard this function will be called before any others. # # Must return result of output_changes() so we can tell NatLink when # files need to be loaded. def utterance_start_callback(moduleInfo): compile_changed() return output_changes() ########################################################################### # # # Callback handling # # # ########################################################################### # # With Quintijn's installer as of February 4, 2008: # # _vocola_main is loaded before any other NatLink modules # vocolaBeginCallback is called directly by natlinkmain before any # other grammer's gotBegin method # natlinkmain now guarantees we are not called with CallbackDepth>1 # we return the result of output_changes() directly rather than # massaging NatLink to deal with new .py files # callback_enabled = False def enable_callback(): global callback_enabled if not callback_enabled: callback_enabled = True if not Quintijn_installer: # Replace NatLink's "begin" callback function with ours: natlink.setBeginCallback(vocolaBeginCallback) def disable_callback(): global callback_enabled callback_enabled = False if not Quintijn_installer: natlink.setBeginCallback(beginCallback) def vocolaBeginCallback(moduleInfo): if not callback_enabled: return 0 changes = 0 if Quintijn_installer or getCallbackDepth()<2: changes = utterance_start_callback(moduleInfo) if Quintijn_installer: return changes else: if changes > 1: # make sure NatLink sees any new .py files: natlinkmain.findAndLoadFiles() natlinkmain.loadModSpecific(moduleInfo) natlinkmain.beginCallback(moduleInfo) ########################################################################### # # # Startup/shutdown # # # ########################################################################### thisGrammar = None # remove previous Vocola/Python compilation output as it may be out of # date (e.g., new compiler, source file deleted, partially written due # to crash, new machine name, etc.): purgeOutput() if not VocolaEnabled: print "Vocola not active" else: print "Vocola version 2.8I starting..." thisGrammar = ThisGrammar() thisGrammar.initialize() def unload(): global thisGrammar disable_callback() if thisGrammar: thisGrammar.unload() thisGrammar = None
[]
[]
[ "COMPUTERNAME", "WINDIR" ]
[]
["COMPUTERNAME", "WINDIR"]
python
2
0
conanfile.py
from conans import ConanFile from conans import tools from conans.client.build.cppstd_flags import cppstd_flag from conans.model.version import Version from conans.errors import ConanException import os import sys import shutil try: from cStringIO import StringIO except ImportError: from io import StringIO # From from *1 (see below, b2 --show-libraries), also ordered following linkage order # see https://github.com/Kitware/CMake/blob/master/Modules/FindBoost.cmake to know the order lib_list = ['math', 'wave', 'container', 'contract', 'exception', 'graph', 'iostreams', 'locale', 'log', 'program_options', 'random', 'regex', 'mpi', 'serialization', 'coroutine', 'fiber', 'context', 'timer', 'thread', 'chrono', 'date_time', 'atomic', 'filesystem', 'system', 'graph_parallel', 'python', 'stacktrace', 'test', 'type_erasure'] class BoostConan(ConanFile): name = "boost" version = "1.69.0" settings = "os", "arch", "compiler", "build_type", "cppstd" folder_name = "boost_%s" % version.replace(".", "_") description = "Boost provides free peer-reviewed portable C++ source libraries" # The current python option requires the package to be built locally, to find default Python # implementation options = { "shared": [True, False], "header_only": [True, False], "error_code_header_only": [True, False], "system_no_deprecated": [True, False], "asio_no_deprecated": [True, False], "fPIC": [True, False], "skip_lib_rename": [True, False], "magic_autolink": [True, False], # enables BOOST_ALL_NO_LIB "python_executable": "ANY", # system default python installation is used, if None "python_version": "ANY", # major.minor; computed automatically, if None "namespace": "ANY", # custom boost namespace for bcp, e.g. myboost "namespace_alias": [True, False] # enable namespace alias for bcp, boost=myboost } options.update({"without_%s" % libname: [True, False] for libname in lib_list}) default_options = ["shared=False", "header_only=False", "error_code_header_only=False", "system_no_deprecated=False", "asio_no_deprecated=False", "fPIC=True", "skip_lib_rename=False", "magic_autolink=False", "python_executable=None", "python_version=None", "namespace=boost", "namespace_alias=False"] default_options.extend(["without_%s=False" % libname for libname in lib_list if libname != "python"]) default_options.append("without_python=True") default_options.append("bzip2:shared=False") default_options.append("zlib:shared=False") default_options = tuple(default_options) url = "https://github.com/lasote/conan-boost" license = "Boost Software License - Version 1.0. http://www.boost.org/LICENSE_1_0.txt" short_paths = True no_copy_source = True exports = ['patches/*'] _bcp_dir = "custom-boost" def config_options(self): if self.settings.os == "Windows": self.options.remove("fPIC") @property def _is_msvc(self): return self.settings.compiler == "Visual Studio" @property def zip_bzip2_requires_needed(self): return not self.options.without_iostreams and not self.options.header_only def configure(self): if self.zip_bzip2_requires_needed: self.requires("bzip2/1.0.6@conan/stable") self.requires("zlib/1.2.11@conan/stable") def package_id(self): if self.options.header_only: self.info.header_only() else: del self.info.options.python_executable # PATH to the interpreter is not important, only version matters if self.options.without_python: del self.info.options.python_version else: self.info.options.python_version = self._python_version def source(self): if tools.os_info.is_windows: sha256 = "d074bcbcc0501c4917b965fc890e303ee70d8b01ff5712bae4a6c54f2b6b4e52" extension = ".zip" else: sha256 = "9a2c2819310839ea373f42d69e733c339b4e9a19deab6bfec448281554aa4dbb" extension = ".tar.gz" zip_name = "%s%s" % (self.folder_name, extension) url = "https://dl.bintray.com/boostorg/release/%s/source/%s" % (self.version, zip_name) tools.get(url, sha256=sha256) tools.patch(base_path=os.path.join(self.source_folder, self.folder_name), patch_file='patches/python_base_prefix.patch', strip=1) ##################### BUILDING METHODS ########################### @property def _python_executable(self): """ obtain full path to the python interpreter executable :return: path to the python interpreter executable, either set by option, or system default """ exe = self.options.python_executable if self.options.python_executable else sys.executable return str(exe).replace('\\', '/') def _run_python_script(self, script): """ execute python one-liner script and return its output :param script: string containing python script to be executed :return: output of the python script execution, or None, if script has failed """ output = StringIO() command = '"%s" -c "%s"' % (self._python_executable, script) self.output.info('running %s' % command) try: self.run(command=command, output=output) except ConanException: self.output.info("(failed)") return None output = output.getvalue().strip() self.output.info(output) return output if output != "None" else None def _get_python_path(self, name): """ obtain path entry for the python installation :param name: name of the python config entry for path to be queried (such as "include", "platinclude", etc.) :return: path entry from the sysconfig """ # https://docs.python.org/3/library/sysconfig.html # https://docs.python.org/2.7/library/sysconfig.html return self._run_python_script("from __future__ import print_function; " "import sysconfig; " "print(sysconfig.get_path('%s'))" % name) def _get_python_sc_var(self, name): """ obtain value of python sysconfig variable :param name: name of variable to be queried (such as LIBRARY or LDLIBRARY) :return: value of python sysconfig variable """ return self._run_python_script("from __future__ import print_function; " "import sysconfig; " "print(sysconfig.get_config_var('%s'))" % name) def _get_python_du_var(self, name): """ obtain value of python distutils sysconfig variable (sometimes sysconfig returns empty values, while python.sysconfig provides correct values) :param name: name of variable to be queried (such as LIBRARY or LDLIBRARY) :return: value of python sysconfig variable """ return self._run_python_script("from __future__ import print_function; " "import distutils.sysconfig as du_sysconfig; " "print(du_sysconfig.get_config_var('%s'))" % name) def _get_python_var(self, name): """ obtain value of python variable, either by sysconfig, or by distutils.sysconfig :param name: name of variable to be queried (such as LIBRARY or LDLIBRARY) :return: value of python sysconfig variable """ return self._get_python_sc_var(name) or self._get_python_du_var(name) @property def _python_version(self): """ obtain version of python interpreter :return: python interpreter version, in format major.minor """ version = self._run_python_script("from __future__ import print_function; " "import sys; " "print('%s.%s' % (sys.version_info[0], sys.version_info[1]))") if self.options.python_version and version != self.options.python_version: raise Exception("detected python version %s doesn't match conan option %s" % (version, self.options.python_version)) return version @property def _python_inc(self): """ obtain the result of the "sysconfig.get_python_inc()" call :return: result of the "sysconfig.get_python_inc()" execution """ return self._run_python_script("from __future__ import print_function; " "import sysconfig; " "print(sysconfig.get_python_inc())") @property def _python_abiflags(self): """ obtain python ABI flags, see https://www.python.org/dev/peps/pep-3149/ for the details :return: the value of python ABI flags """ return self._run_python_script("from __future__ import print_function; " "import sys; " "print(getattr(sys, 'abiflags', ''))") @property def _python_includes(self): """ attempt to find directory containing Python.h header file :return: the directory with python includes """ include = self._get_python_path('include') plat_include = self._get_python_path('platinclude') include_py = self._get_python_var('INCLUDEPY') include_dir = self._get_python_var('INCLUDEDIR') python_inc = self._python_inc candidates = [include, plat_include, include_py, include_dir, python_inc] for candidate in candidates: if candidate: python_h = os.path.join(candidate, 'Python.h') self.output.info('checking %s' % python_h) if os.path.isfile(python_h): self.output.info('found Python.h: %s' % python_h) return candidate.replace('\\', '/') raise Exception("couldn't locate Python.h - make sure you have installed python development files") @property def _python_libraries(self): """ attempt to find python development library :return: the full path to the python library to be linked with """ library = self._get_python_var("LIBRARY") ldlibrary = self._get_python_var("LDLIBRARY") libdir = self._get_python_var("LIBDIR") multiarch = self._get_python_var("MULTIARCH") masd = self._get_python_var("multiarchsubdir") with_dyld = self._get_python_var("WITH_DYLD") if libdir and multiarch and masd: if masd.startswith(os.sep): masd = masd[len(os.sep):] libdir = os.path.join(libdir, masd) if not libdir: libdest = self._get_python_var("LIBDEST") libdir = os.path.join(os.path.dirname(libdest), "libs") candidates = [ldlibrary, library] library_prefixes = [""] if self._is_msvc else ["", "lib"] library_suffixes = [".lib"] if self._is_msvc else [".so", ".dll.a", ".a"] if with_dyld: library_suffixes.insert(0, ".dylib") python_version = self._python_version python_version_no_dot = python_version.replace(".", "") versions = ["", python_version, python_version_no_dot] abiflags = self._python_abiflags for prefix in library_prefixes: for suffix in library_suffixes: for version in versions: candidates.append("%spython%s%s%s" % (prefix, version, abiflags, suffix)) for candidate in candidates: if candidate: python_lib = os.path.join(libdir, candidate) self.output.info('checking %s' % python_lib) if os.path.isfile(python_lib): self.output.info('found python library: %s' % python_lib) return python_lib.replace('\\', '/') raise Exception("couldn't locate python libraries - make sure you have installed python development files") def _clean(self): src = os.path.join(self.source_folder, self.folder_name) clean_dirs = [os.path.join(self.build_folder, "bin.v2"), os.path.join(self.build_folder, "architecture"), os.path.join(self.source_folder, self._bcp_dir), os.path.join(src, "dist", "bin"), os.path.join(src, "stage"), os.path.join(src, "tools", "build", "src", "engine", "bootstrap"), os.path.join(src, "tools", "build", "src", "engine", "bin.ntx86"), os.path.join(src, "tools", "build", "src", "engine", "bin.ntx86_64")] for d in clean_dirs: if os.path.isdir(d): self.output.warn('removing "%s"' % d) shutil.rmtree(d) @property def _b2_exe(self): folder = os.path.join(self.source_folder, self.folder_name, "tools", "build") return os.path.join(folder, "b2.exe" if tools.os_info.is_windows else "b2") @property def _bcp_exe(self): folder = os.path.join(self.source_folder, self.folder_name, "dist", "bin") return os.path.join(folder, "bcp.exe" if tools.os_info.is_windows else "bcp") @property def _use_bcp(self): return self.options.namespace != "boost" @property def _boost_dir(self): return self._bcp_dir if self._use_bcp else self.folder_name @property def _boost_build_dir(self): return os.path.join(self.source_folder, self.folder_name, "tools", "build") def _build_bcp(self): folder = os.path.join(self.source_folder, self.folder_name, 'tools', 'bcp') with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): with tools.chdir(folder): command = "%s -j%s --abbreviate-paths -d2" % (self._b2_exe, tools.cpu_count()) self.output.warn(command) self.run(command) def _run_bcp(self): with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): with tools.chdir(self.source_folder): os.mkdir(self._bcp_dir) namespace = "--namespace=%s" % self.options.namespace alias = "--namespace-alias" if self.options.namespace_alias else "" boostdir = "--boost=%s" % self.folder_name libraries = {"build", "boost-build.jam", "boostcpp.jam"} for d in os.listdir(os.path.join(self.folder_name, "boost")): if os.path.isdir(os.path.join(self.folder_name, "boost", d)): libraries.add(d) for d in os.listdir(os.path.join(self.folder_name, "libs")): if os.path.isdir(os.path.join(self.folder_name, "libs", d)): libraries.add(d) libraries = ' '.join(libraries) command = "{bcp} {namespace} {alias} " \ "{boostdir} {libraries} {outdir}".format(bcp=self._bcp_exe, namespace=namespace, alias=alias, libraries=libraries, boostdir=boostdir, outdir=self._bcp_dir) self.output.warn(command) self.run(command) def build(self): if self.options.header_only: self.output.warn("Header only package, skipping build") return self._clean() self._bootstrap() if self._use_bcp: self._build_bcp() self._run_bcp() flags = self.get_build_flags() # Help locating bzip2 and zlib self.create_user_config_jam(self._boost_build_dir) # JOIN ALL FLAGS b2_flags = " ".join(flags) full_command = "%s %s -j%s --abbreviate-paths -d2" % (self._b2_exe, b2_flags, tools.cpu_count()) # -d2 is to print more debug info and avoid travis timing out without output sources = os.path.join(self.source_folder, self._boost_dir) full_command += ' --debug-configuration --build-dir="%s"' % self.build_folder self.output.warn(full_command) with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): with tools.chdir(sources): # to locate user config jam (BOOST_BUILD_PATH) with tools.environment_append({"BOOST_BUILD_PATH": self._boost_build_dir}): # To show the libraries *1 # self.run("%s --show-libraries" % b2_exe) self.run(full_command) @property def _b2_os(self): return {"Windows": "windows", "WindowsStore": "windows", "Linux": "linux", "Android": "android", "Macos": "darwin", "iOS": "iphone", "watchOS": "iphone", "tvOS": "appletv", "FreeBSD": "freebsd", "SunOS": "solatis"}.get(str(self.settings.os)) @property def _b2_address_model(self): if str(self.settings.arch) in ["x86_64", "ppc64", "ppc64le", "mips64", "armv8", "sparcv9"]: return "64" else: return "32" @property def _b2_binary_format(self): return {"Windows": "pe", "WindowsStore": "pe", "Linux": "elf", "Android": "elf", "Macos": "mach-o", "iOS": "mach-o", "watchOS": "mach-o", "tvOS": "mach-o", "FreeBSD": "elf", "SunOS": "elf"}.get(str(self.settings.os)) @property def _b2_architecture(self): if str(self.settings.arch).startswith('x86'): return 'x86' elif str(self.settings.arch).startswith('ppc'): return 'power' elif str(self.settings.arch).startswith('arm'): return 'arm' elif str(self.settings.arch).startswith('sparc'): return 'sparc' elif str(self.settings.arch).startswith('mips64'): return 'mips64' elif str(self.settings.arch).startswith('mips'): return 'mips1' else: return None @property def _b2_abi(self): if str(self.settings.arch).startswith('x86'): return "ms" if str(self.settings.os) in ["Windows", "WindowsStore"] else "sysv" elif str(self.settings.arch).startswith('ppc'): return "sysv" elif str(self.settings.arch).startswith('arm'): return "aapcs" elif str(self.settings.arch).startswith('mips'): return "o32" else: return None def get_build_flags(self): if tools.cross_building(self.settings): flags = self.get_build_cross_flags() else: flags = [] # https://www.boost.org/doc/libs/1_69_0/libs/context/doc/html/context/architectures.html if self._b2_os: flags.append("target-os=%s" % self._b2_os) if self._b2_architecture: flags.append("architecture=%s" % self._b2_architecture) if self._b2_address_model: flags.append("address-model=%s" % self._b2_address_model) if self._b2_binary_format: flags.append("binary-format=%s" % self._b2_binary_format) if self._b2_abi: flags.append("abi=%s" % self._b2_abi) flags.append("-sBOOST_BUILD_PATH=%s" % self._boost_build_dir) if self.settings.compiler == "gcc": flags.append("--layout=system") if self._is_msvc and self.settings.compiler.runtime: flags.append("runtime-link=%s" % ("static" if "MT" in str(self.settings.compiler.runtime) else "shared")) flags.append("threading=multi") flags.append("link=%s" % ("static" if not self.options.shared else "shared")) if self.settings.build_type == "Debug": flags.append("variant=debug") else: flags.append("variant=release") for libname in lib_list: if getattr(self.options, "without_%s" % libname): flags.append("--without-%s" % libname) toolset, _, _ = self.get_toolset_version_and_exe() flags.append("toolset=%s" % toolset) if self.settings.cppstd: flags.append("cxxflags=%s" % cppstd_flag( self.settings.get_safe("compiler"), self.settings.get_safe("compiler.version"), self.settings.get_safe("cppstd") ) ) # CXX FLAGS cxx_flags = [] # fPIC DEFINITION if self.settings.os != "Windows": if self.options.fPIC: cxx_flags.append("-fPIC") # Standalone toolchain fails when declare the std lib if self.settings.os != "Android": try: if str(self.settings.compiler.libcxx) == "libstdc++": flags.append("define=_GLIBCXX_USE_CXX11_ABI=0") elif str(self.settings.compiler.libcxx) == "libstdc++11": flags.append("define=_GLIBCXX_USE_CXX11_ABI=1") if "clang" in str(self.settings.compiler): if str(self.settings.compiler.libcxx) == "libc++": cxx_flags.append("-stdlib=libc++") flags.append('linkflags="-stdlib=libc++"') else: cxx_flags.append("-stdlib=libstdc++") except: pass if self.options.error_code_header_only: flags.append("define=BOOST_ERROR_CODE_HEADER_ONLY=1") if self.options.system_no_deprecated: flags.append("define=BOOST_SYSTEM_NO_DEPRECATED=1") if self.options.asio_no_deprecated: flags.append("define=BOOST_ASIO_NO_DEPRECATED=1") if tools.is_apple_os(self.settings.os): if self.settings.get_safe("os.version"): cxx_flags.append(tools.apple_deployment_target_flag(self.settings.os, self.settings.os.version)) if self.settings.os == "iOS": cxx_flags.append("-DBOOST_AC_USE_PTHREADS") cxx_flags.append("-DBOOST_SP_USE_PTHREADS") cxx_flags.append("-fvisibility=hidden") cxx_flags.append("-fvisibility-inlines-hidden") cxx_flags.append("-fembed-bitcode") cxx_flags = 'cxxflags="%s"' % " ".join(cxx_flags) if cxx_flags else "" flags.append(cxx_flags) return flags def get_build_cross_flags(self): arch = self.settings.get_safe('arch') flags = [] self.output.info("Cross building, detecting compiler...") if arch.startswith('arm'): if 'hf' in arch: flags.append('-mfloat-abi=hard') elif arch in ["x86", "x86_64"]: pass elif arch.startswith("ppc"): pass else: raise Exception("I'm so sorry! I don't know the appropriate ABI for " "your architecture. :'(") self.output.info("Cross building flags: %s" % flags) return flags @property def _ar(self): if "AR" in os.environ: return os.environ["AR"] if tools.is_apple_os(self.settings.os): return tools.XCRun(self.settings).ar return None @property def _ranlib(self): if "RANLIB" in os.environ: return os.environ["RANLIB"] if tools.is_apple_os(self.settings.os): return tools.XCRun(self.settings).ranlib return None @property def _cxx(self): if "CXX" in os.environ: return os.environ["CXX"] if tools.is_apple_os(self.settings.os): return tools.XCRun(self.settings).cxx return None def create_user_config_jam(self, folder): """To help locating the zlib and bzip2 deps""" self.output.warn("Patching user-config.jam") compiler_command = self._cxx contents = "" if self.zip_bzip2_requires_needed: contents = "\nusing zlib : 1.2.11 : <include>%s <search>%s <name>%s ;" % ( self.deps_cpp_info["zlib"].include_paths[0].replace('\\', '/'), self.deps_cpp_info["zlib"].lib_paths[0].replace('\\', '/'), self.deps_cpp_info["zlib"].libs[0]) contents += "\nusing bzip2 : 1.0.6 : <include>%s <search>%s <name>%s ;" % ( self.deps_cpp_info["bzip2"].include_paths[0].replace('\\', '/'), self.deps_cpp_info["bzip2"].lib_paths[0].replace('\\', '/'), self.deps_cpp_info["bzip2"].libs[0]) if not self.options.without_python: # https://www.boost.org/doc/libs/1_69_0/libs/python/doc/html/building/configuring_boost_build.html contents += "\nusing python : {version} : {executable} : {includes} : {libraries} ;"\ .format(version=self._python_version, executable=self._python_executable, includes=self._python_includes, libraries=self._python_libraries) toolset, version, exe = self.get_toolset_version_and_exe() exe = compiler_command or exe # Prioritize CXX # Specify here the toolset with the binary if present if don't empty parameter : : contents += '\nusing "%s" : "%s" : ' % (toolset, version) contents += ' "%s"' % exe.replace("\\", "/") if tools.is_apple_os(self.settings.os): contents += " -isysroot %s" % tools.XCRun(self.settings).sdk_path if self.settings.get_safe("arch"): contents += " -arch %s" % tools.to_apple_arch(self.settings.arch) contents += " : \n" if self._ar: contents += '<archiver>"%s" ' % tools.which(self._ar).replace("\\", "/") if self._ranlib: contents += '<ranlib>"%s" ' % tools.which(self._ranlib).replace("\\", "/") if "CXXFLAGS" in os.environ: contents += '<cxxflags>"%s" ' % os.environ["CXXFLAGS"] if "CFLAGS" in os.environ: contents += '<cflags>"%s" ' % os.environ["CFLAGS"] if "LDFLAGS" in os.environ: contents += '<linkflags>"%s" ' % os.environ["LDFLAGS"] if "ASFLAGS" in os.environ: contents += '<asmflags>"%s" ' % os.environ["ASFLAGS"] contents += " ;" self.output.warn(contents) filename = "%s/user-config.jam" % folder tools.save(filename, contents) def get_toolset_version_and_exe(self): compiler_version = str(self.settings.compiler.version) compiler = str(self.settings.compiler) if self._is_msvc: cversion = self.settings.compiler.version _msvc_version = "14.1" if Version(str(cversion)) >= "15" else "%s.0" % cversion return "msvc", _msvc_version, "" elif compiler == "gcc" and compiler_version[0] >= "5": # For GCC >= v5 we only need the major otherwise Boost doesn't find the compiler # The NOT windows check is necessary to exclude MinGW: if not tools.which("g++-%s" % compiler_version[0]): # In fedora 24, 25 the gcc is 6, but there is no g++-6 and the detection is 6.3.1 # so b2 fails because 6 != 6.3.1. Specify the exe to avoid the smart detection executable = tools.which("g++") else: executable = "" return compiler, compiler_version[0], executable elif str(self.settings.compiler) in ["clang", "gcc"]: # For GCC < v5 and Clang we need to provide the entire version string return compiler, compiler_version, "" elif self.settings.compiler == "apple-clang": return "clang-darwin", compiler_version, self._cxx elif self.settings.compiler == "sun-cc": return "sunpro", compiler_version, "" else: return compiler, compiler_version, "" ##################### BOOSTRAP METHODS ########################### def _get_boostrap_toolset(self): if self._is_msvc: comp_ver = self.settings.compiler.version return "vc%s" % ("141" if Version(str(comp_ver)) >= "15" else comp_ver) if tools.os_info.is_windows: return "" with_toolset = {"apple-clang": "darwin"}.get(str(self.settings.compiler), str(self.settings.compiler)) # fallback for the case when no unversioned gcc/clang is available if with_toolset in ["gcc", "clang"] and not tools.which(with_toolset): with_toolset = "cc" return with_toolset def _bootstrap(self): folder = os.path.join(self.source_folder, self.folder_name, "tools", "build") try: bootstrap = "bootstrap.bat" if tools.os_info.is_windows else "./bootstrap.sh" with tools.vcvars(self.settings) if self._is_msvc else tools.no_op(): self.output.info("Using %s %s" % (self.settings.compiler, self.settings.compiler.version)) with tools.chdir(folder): option = "" if tools.os_info.is_windows else "-with-toolset=" cmd = "%s %s%s" % (bootstrap, option, self._get_boostrap_toolset()) self.output.info(cmd) self.run(cmd) except Exception as exc: self.output.warn(str(exc)) if os.path.exists(os.path.join(folder, "bootstrap.log")): self.output.warn(tools.load(os.path.join(folder, "bootstrap.log"))) raise #################################################################### def package(self): # This stage/lib is in source_folder... Face palm, looks like it builds in build but then # copy to source with the good lib name out_lib_dir = os.path.join(self._boost_dir, "stage", "lib") self.copy(pattern="*", dst="include/boost", src="%s/boost" % self._boost_dir) if not self.options.shared: self.copy(pattern="*.a", dst="lib", src=out_lib_dir, keep_path=False) self.copy(pattern="*.so", dst="lib", src=out_lib_dir, keep_path=False, symlinks=True) self.copy(pattern="*.so.*", dst="lib", src=out_lib_dir, keep_path=False, symlinks=True) self.copy(pattern="*.dylib*", dst="lib", src=out_lib_dir, keep_path=False) self.copy(pattern="*.lib", dst="lib", src=out_lib_dir, keep_path=False) self.copy(pattern="*.dll", dst="bin", src=out_lib_dir, keep_path=False) # When first call with source do not package anything if not os.path.exists(os.path.join(self.package_folder, "lib")): return self.renames_to_make_cmake_find_package_happy() def renames_to_make_cmake_find_package_happy(self): if not self.options.skip_lib_rename: # CMake findPackage help renames = [] for libname in os.listdir(os.path.join(self.package_folder, "lib")): new_name = libname libpath = os.path.join(self.package_folder, "lib", libname) if "-" in libname: new_name = libname.split("-", 1)[0] + "." + libname.split(".")[-1] if new_name.startswith("lib"): new_name = new_name[3:] renames.append([libpath, os.path.join(self.package_folder, "lib", new_name)]) for original, new in renames: if original != new and not os.path.exists(new): self.output.info("Rename: %s => %s" % (original, new)) os.rename(original, new) def package_info(self): gen_libs = tools.collect_libs(self) # List of lists, so if more than one matches the lib like serialization and wserialization # both will be added to the list ordered_libs = [[] for _ in range(len(lib_list))] # The order is important, reorder following the lib_list order missing_order_info = [] for real_lib_name in gen_libs: for pos, alib in enumerate(lib_list): if os.path.splitext(real_lib_name)[0].split("-")[0].endswith(alib): ordered_libs[pos].append(real_lib_name) break else: # self.output.info("Missing in order: %s" % real_lib_name) if "_exec_monitor" not in real_lib_name: # https://github.com/bincrafters/community/issues/94 missing_order_info.append(real_lib_name) # Assume they do not depend on other # Flat the list and append the missing order self.cpp_info.libs = [item for sublist in ordered_libs for item in sublist if sublist] + missing_order_info if self.options.without_test: # remove boost_unit_test_framework self.cpp_info.libs = [lib for lib in self.cpp_info.libs if "unit_test" not in lib] self.output.info("LIBRARIES: %s" % self.cpp_info.libs) self.output.info("Package folder: %s" % self.package_folder) if not self.options.header_only and self.options.shared: self.cpp_info.defines.append("BOOST_ALL_DYN_LINK") else: self.cpp_info.defines.append("BOOST_USE_STATIC_LIBS") if self.options.system_no_deprecated: self.cpp_info.defines.append("BOOST_SYSTEM_NO_DEPRECATED") if self.options.asio_no_deprecated: self.cpp_info.defines.append("BOOST_ASIO_NO_DEPRECATED") if not self.options.header_only: if self.options.error_code_header_only: self.cpp_info.defines.append("BOOST_ERROR_CODE_HEADER_ONLY") if not self.options.without_python: if not self.options.shared: self.cpp_info.defines.append("BOOST_PYTHON_STATIC_LIB") if self._is_msvc: if not self.options.magic_autolink: # DISABLES AUTO LINKING! NO SMART AND MAGIC DECISIONS THANKS! self.cpp_info.defines.extend(["BOOST_ALL_NO_LIB"]) self.output.info("Disabled magic autolinking (smart and magic decisions)") else: self.output.info("Enabled magic autolinking (smart and magic decisions)") # https://github.com/conan-community/conan-boost/issues/127#issuecomment-404750974 self.cpp_info.libs.append("bcrypt") elif self.settings.os == "Linux": # https://github.com/conan-community/conan-boost/issues/135 self.cpp_info.libs.append("pthread") self.env_info.BOOST_ROOT = self.package_folder
[]
[]
[ "ASFLAGS", "AR", "CXX", "RANLIB", "CFLAGS", "LDFLAGS", "CXXFLAGS" ]
[]
["ASFLAGS", "AR", "CXX", "RANLIB", "CFLAGS", "LDFLAGS", "CXXFLAGS"]
python
7
0
jenkin/services/simpletwoservice/website-service/src/main.go
package main import ( "crypto/tls" "encoding/json" "html/template" "log" "net/http" "os" "github.com/gin-gonic/gin" ) const VERSION string = "1.0.0" type Product struct { ID string Title string Description string Price float64 } func main() { // Products template html := ` <html> <head> <title>Product Listing</title> </head> <body> <h1>Product Listing</h1> {{range .}} <h2>{{.Title}}</h2> <p><b>ID</b>: {{.ID}}</p> <p><b>Description</b>: {{.Description}}</p> <p><b>Price</b>: {{.Price}}</p> {{end}} </body> </html> ` tmpl, err := template.New("product-listing").Parse(html) if err != nil { log.Fatalf("Error parsing product listing template: %s", err) } router := gin.Default() router.SetHTMLTemplate(tmpl) // Router handlers router.GET("/", func(c *gin.Context) { os.Setenv("PRODUCT_SERVICE_URL", "threetier-dev-alb-2035062619.us-east-1.elb.amazonaws.com/products") product := os.Getenv("PRODUCT_SERVICE_URL") tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} resp, err := client.Get("https://" + product) // resp, err := http.Get("https://" + product) if err != nil { c.IndentedJSON(500, gin.H{ "status": "error", "message": "Could not connect to product service", "detailed": err.Error(), }) return } defer resp.Body.Close() var products []Product json.NewDecoder(resp.Body).Decode(&products) c.HTML(200, "product-listing", products) }) // Lets go... router.Run(":8000") }
[ "\"PRODUCT_SERVICE_URL\"" ]
[]
[ "PRODUCT_SERVICE_URL" ]
[]
["PRODUCT_SERVICE_URL"]
go
1
0
service/common/generate_protos_test.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 # pylint: disable=g-bad-import-order, g-import-not-at-top, reimported """Tests that protos are generated after using the generate_protos module.""" import glob import os import subprocess import sys import tempfile from unittest import mock from absl.testing import absltest # Set the environment variable to false so the test can call the method to # generate protos explicitly. os.environ['FALKEN_AUTO_GENERATE_PROTOS'] = '0' from common import generate_protos class GenerateProtosTest(absltest.TestCase): """Test generate_protos module.""" def setUp(self): super().setUp() os.environ['FALKEN_GENERATED_PROTOS_DIR'] = '' self._temp_dir = tempfile.TemporaryDirectory() def tearDown(self): """Tear down the testing environment.""" super().tearDown() self._temp_dir.cleanup() def test_get_generated_protos_dir(self): """Get the generated protos directory.""" self.assertEqual( generate_protos._PROTO_GEN_DIR, os.path.basename(generate_protos.get_generated_protos_dir())) os.environ['FALKEN_GENERATED_PROTOS_DIR'] = ( os.path.join('custom', 'dir')) self.assertEqual( os.path.join('custom', 'dir', generate_protos._PROTO_GEN_DIR), generate_protos.get_generated_protos_dir()) @mock.patch.object(generate_protos, 'get_generated_protos_dir') @mock.patch.object(generate_protos, 'clean_up') @mock.patch.object(subprocess, 'check_call') def test_generate_protos_failed(self, mock_check_call, mock_clean_up, mock_get_generated_protos_dir): """Call the generate method and make sure it cleans up on failure.""" mock_get_generated_protos_dir.return_value = os.path.join( self._temp_dir.name, generate_protos._PROTO_GEN_DIR) mock_check_call.side_effect = subprocess.CalledProcessError(1, 'fake') with self.assertRaises(subprocess.CalledProcessError): generate_protos.generate() mock_clean_up.called_once() @mock.patch.object(generate_protos, 'get_generated_protos_dir') def test_generate_protos(self, mock_get_generated_protos_dir): """Call the generate method and verify generation and use.""" generated_dir = os.path.join(self._temp_dir.name, generate_protos._PROTO_GEN_DIR) mock_get_generated_protos_dir.return_value = generated_dir generate_protos.generate() source_protos = [] for d in generate_protos.get_source_proto_dirs(): source_protos += glob.glob(f'{d}/*.proto') def extract_generated_proto_path(source_proto_path, generated_dir): """Create generated proto path with the proto name. This can then be concatenated with a suffix like .py, _pb2.py, or _pb2_grpc.py. Args: source_proto_path: Proto path of the source proto file. generated_dir: Directory containing the generated protos. Returns: String that follows /generated_dir/source_proto's proto name without extension. e.g. '/falken/service/proto_gen_module/brain' """ return os.path.join(generated_dir, os.path.basename(source_proto_path).split('.')[0]) all_expected_protos = ([ extract_generated_proto_path(proto_name, generated_dir) + '_pb2.py' for proto_name in source_protos ] + [ extract_generated_proto_path(proto_name, generated_dir) + '_pb2_grpc.py' for proto_name in source_protos ]) generated_protos = glob.glob(f'{generated_dir}/*.py') self.assertSameElements(generated_protos, all_expected_protos) import primitives_pb2 self.assertIsNotNone(primitives_pb2.Rotation()) @mock.patch.object(generate_protos, 'get_generated_protos_dir') def test_generate_protos_cache(self, mock_get_generated_protos_dir): """Verify calling generate_proto multiple times without clean_up works.""" mock_get_generated_protos_dir.return_value = os.path.join( self._temp_dir.name, generate_protos._PROTO_GEN_DIR) # First, verify that importing brain_pb2 before generate_protos is called # raises a ModuleNotFoundError. with self.assertRaises(ModuleNotFoundError): import brain_pb2 old_sys_path = sys.path[:] # Call generate for the first time. generate_protos.generate() import brain_pb2 self.assertIsNotNone(brain_pb2.Brain()) # Restore the sys.path to what it was before calling generate(). sys.path = old_sys_path # Because the sys.path does not contain the path to the generated protos, # snapshot_pb2 cannot be found. with self.assertRaises(ModuleNotFoundError): import snapshot_pb2 # Generate the protos again, which does not actually re-generate the protos, # but adds the path to the sys.path. Now snapshot_pb2 can be found. generate_protos.generate() import snapshot_pb2 self.assertIsNotNone(snapshot_pb2.Snapshot()) if __name__ == '__main__': absltest.main()
[]
[]
[ "FALKEN_GENERATED_PROTOS_DIR", "FALKEN_AUTO_GENERATE_PROTOS" ]
[]
["FALKEN_GENERATED_PROTOS_DIR", "FALKEN_AUTO_GENERATE_PROTOS"]
python
2
0