filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
examples/projects.go | package main
import (
"fmt"
"os"
"github.com/wtfutil/todoist"
)
func main() {
todoist.Token = os.Getenv("todoist_token")
fmt.Println("CreateProject")
project, err := todoist.CreateProject("teste")
if err != nil {
panic(err)
}
fmt.Println(project)
fmt.Println("ListProject")
projects, err := todoist.ListProject()
if err != nil {
panic(err)
}
fmt.Println(projects)
fmt.Println("UpdateProject")
project.Name = project.Name + " Update"
if err = project.Update(); err != nil {
panic(err)
}
fmt.Println("GetProject")
project, err = todoist.GetProject(project.ID)
if err != nil {
panic(err)
}
fmt.Println(project)
fmt.Println("DeleteProject")
if err := project.Delete(); err != nil {
panic(err)
}
fmt.Println("End Project")
}
| [
"\"todoist_token\""
]
| []
| [
"todoist_token"
]
| [] | ["todoist_token"] | go | 1 | 0 | |
pilot/pkg/proxy/envoy/v2/lds_test.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2_test
import (
"io/ioutil"
"os"
"testing"
"time"
xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
xdsapi_listener "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
xdsapi_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
testenv "istio.io/istio/mixer/test/client/env"
"istio.io/istio/pilot/pkg/bootstrap"
"istio.io/istio/pilot/pkg/model"
v2 "istio.io/istio/pilot/pkg/proxy/envoy/v2"
"istio.io/istio/pkg/adsc"
"istio.io/istio/pkg/test/env"
"istio.io/istio/tests/util"
)
// TestLDS using isolated namespaces
func TestLDSIsolated(t *testing.T) {
_, tearDown := initLocalPilotTestEnv(t)
defer tearDown()
// Sidecar in 'none' mode
t.Run("sidecar_none", func(t *testing.T) {
// TODO: add a Service with EDS resolution in the none ns.
// The ServiceEntry only allows STATIC - both STATIC and EDS should generated TCP listeners on :port
// while DNS and NONE should generate old-style bind ports.
// Right now 'STATIC' and 'EDS' result in ClientSideLB in the internal object, so listener test is valid.
ldsr, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{
model.NodeMetadataInterceptionMode: string(model.InterceptionNone),
model.NodeMetadataHTTP10: "1",
},
IP: "10.11.0.1", // matches none.yaml s1tcp.none
Namespace: "none",
})
if err != nil {
t.Fatal(err)
}
defer ldsr.Close()
ldsr.Watch()
_, err = ldsr.Wait("rds", 50000*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS", err)
return
}
err = ldsr.Save(env.IstioOut + "/none")
if err != nil {
t.Fatal(err)
}
// 7071 (inbound), 2001 (service - also as http proxy), 15002 (http-proxy)
// We dont get mixer on 9091 or 15004 because there are no services defined in istio-system namespace
// in the none.yaml setup
if len(ldsr.HTTPListeners) != 3 {
// TODO: we are still debating if for HTTP services we have any use case to create a 127.0.0.1:port outbound
// for the service (the http proxy is already covering this)
t.Error("HTTP listeners, expecting 5 got ", len(ldsr.HTTPListeners), ldsr.HTTPListeners)
}
// s1tcp:2000 outbound, bind=true (to reach other instances of the service)
// s1:5005 outbound, bind=true
// :443 - https external, bind=false
// 10.11.0.1_7070, bind=true -> inbound|2000|s1 - on port 7070, fwd to 37070
// virtual
if len(ldsr.TCPListeners) == 0 {
t.Fatal("No response")
}
for _, s := range []string{"lds_tcp", "lds_http", "rds", "cds", "ecds"} {
want, err := ioutil.ReadFile(env.IstioOut + "/none_" + s + ".json")
if err != nil {
t.Fatal(err)
}
got, err := ioutil.ReadFile("testdata/none_" + s + ".json")
if err != nil {
t.Fatal(err)
}
if err = util.Compare(got, want); err != nil {
// Just log for now - golden changes every time there is a config generation update.
// It is mostly intended as a reference for what is generated - we need to add explicit checks
// for things we need, like the number of expected listeners.
// This is mainly using for debugging what changed from the snapshot in the golden files.
if os.Getenv("CONFIG_DIFF") == "1" {
t.Logf("error in golden file %s %v", s, err)
}
}
}
// TODO: check bind==true
// TODO: verify listeners for outbound are on 127.0.0.1 (not yet), port 2000, 2005, 2007
// TODO: verify virtual listeners for unsupported cases
// TODO: add and verify SNI listener on 127.0.0.1:443
// TODO: verify inbound service port is on 127.0.0.1, and containerPort on 0.0.0.0
// TODO: BUG, SE with empty endpoints is rejected - it is actually valid config (service may not have endpoints)
})
// Test for the examples in the ServiceEntry doc
t.Run("se_example", func(t *testing.T) {
// TODO: add a Service with EDS resolution in the none ns.
// The ServiceEntry only allows STATIC - both STATIC and EDS should generated TCP listeners on :port
// while DNS and NONE should generate old-style bind ports.
// Right now 'STATIC' and 'EDS' result in ClientSideLB in the internal object, so listener test is valid.
ldsr, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{},
IP: "10.12.0.1", // matches none.yaml s1tcp.none
Namespace: "seexamples",
})
if err != nil {
t.Fatal(err)
}
defer ldsr.Close()
ldsr.Watch()
_, err = ldsr.Wait("rds", 50000*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS", err)
return
}
err = ldsr.Save(env.IstioOut + "/seexample")
if err != nil {
t.Fatal(err)
}
})
// Test for the examples in the ServiceEntry doc
t.Run("se_examplegw", func(t *testing.T) {
// TODO: add a Service with EDS resolution in the none ns.
// The ServiceEntry only allows STATIC - both STATIC and EDS should generated TCP listeners on :port
// while DNS and NONE should generate old-style bind ports.
// Right now 'STATIC' and 'EDS' result in ClientSideLB in the internal object, so listener test is valid.
ldsr, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{},
IP: "10.13.0.1",
Namespace: "exampleegressgw",
})
if err != nil {
t.Fatal(err)
}
defer ldsr.Close()
ldsr.Watch()
_, err = ldsr.Wait("rds", 50000*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS", err)
return
}
err = ldsr.Save(env.IstioOut + "/seexample-eg")
if err != nil {
t.Fatal(err)
}
})
}
// TestLDS using default sidecar in root namespace
func TestLDSWithDefaultSidecar(t *testing.T) {
server, tearDown := util.EnsureTestServer(func(args *bootstrap.PilotArgs) {
args.Plugins = bootstrap.DefaultPlugins
args.Config.FileDir = env.IstioSrc + "/tests/testdata/networking/sidecar-ns-scope"
args.Mesh.MixerAddress = ""
args.Mesh.RdsRefreshDelay = nil
args.MeshConfig = nil
args.Mesh.ConfigFile = env.IstioSrc + "/tests/testdata/networking/sidecar-ns-scope/mesh.yaml"
args.Service.Registries = []string{}
})
testEnv = testenv.NewTestSetup(testenv.SidecarTest, t)
testEnv.Ports().PilotGrpcPort = uint16(util.MockPilotGrpcPort)
testEnv.Ports().PilotHTTPPort = uint16(util.MockPilotHTTPPort)
testEnv.IstioSrc = env.IstioSrc
testEnv.IstioOut = env.IstioOut
server.EnvoyXdsServer.ConfigUpdate(true)
defer tearDown()
adsResponse, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{
model.NodeMetadataConfigNamespace: "ns1",
model.NodeMetadataInstanceIPs: "100.1.1.2", // as service instance of http2.ns1
model.NodeMetadataIstioProxyVersion: "1.1.0",
},
IP: "100.1.1.2",
Namespace: "ns1",
})
if err != nil {
t.Fatal(err)
}
defer adsResponse.Close()
adsResponse.Watch()
_, err = adsResponse.Wait("lds", 10*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS response", err)
return
}
_, err = adsResponse.Wait("rds", 10*time.Second)
if err != nil {
t.Fatal("Failed to receive RDS response", err)
return
}
_, err = adsResponse.Wait("cds", 10*time.Second)
if err != nil {
t.Fatal("Failed to receive CDS response", err)
return
}
// Expect 6 listeners : 1 orig_dst, 1 http inbound + 4 outbound (http, tcp1, istio-policy and istio-telemetry)
// plus 2 extra due to the mem registry
if (len(adsResponse.HTTPListeners) + len(adsResponse.TCPListeners)) != 6 {
t.Fatalf("Expected 8 listeners, got %d\n", len(adsResponse.HTTPListeners)+len(adsResponse.TCPListeners))
}
// Expect 10 CDS clusters: 1 inbound + 7 outbound (2 http services, 1 tcp service, 2 istio-system services,
// and 2 subsets of http1), 1 blackhole, 1 passthrough
// plus 2 extra due to the mem registry
if (len(adsResponse.Clusters) + len(adsResponse.EDSClusters)) != 10 {
t.Fatalf("Expected 12 Clusters in CDS output. Got %d", len(adsResponse.Clusters)+len(adsResponse.EDSClusters))
}
// Expect two vhost blocks in RDS output for 8080 (one for http1, another for http2)
// plus one extra due to mem registry
if len(adsResponse.Routes["8080"].VirtualHosts) != 3 {
t.Fatalf("Expected two VirtualHosts in RDS output. Got %d", len(adsResponse.Routes["8080"].VirtualHosts))
}
}
// TestLDS using gateways
func TestLDSWithIngressGateway(t *testing.T) {
server, tearDown := util.EnsureTestServer(func(args *bootstrap.PilotArgs) {
args.Plugins = bootstrap.DefaultPlugins
args.Config.FileDir = env.IstioSrc + "/tests/testdata/networking/ingress-gateway"
args.Mesh.MixerAddress = ""
args.Mesh.RdsRefreshDelay = nil
args.Mesh.ConfigFile = env.IstioSrc + "/tests/testdata/networking/ingress-gateway/mesh.yaml"
args.Service.Registries = []string{}
})
testEnv = testenv.NewTestSetup(testenv.GatewayTest, t)
testEnv.Ports().PilotGrpcPort = uint16(util.MockPilotGrpcPort)
testEnv.Ports().PilotHTTPPort = uint16(util.MockPilotHTTPPort)
testEnv.IstioSrc = env.IstioSrc
testEnv.IstioOut = env.IstioOut
server.EnvoyXdsServer.ConfigUpdate(true)
defer tearDown()
adsResponse, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{
model.NodeMetadataConfigNamespace: "istio-system",
model.NodeMetadataInstanceIPs: "99.1.1.1", // as service instance of ingress gateway
model.NodeMetadataIstioProxyVersion: "1.1.0",
},
IP: "99.1.1.1",
Namespace: "istio-system",
NodeType: "router",
})
if err != nil {
t.Fatal(err)
}
defer adsResponse.Close()
adsResponse.DumpCfg = true
adsResponse.Watch()
_, err = adsResponse.Wait("lds", 10000*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS response", err)
return
}
// Expect 2 listeners : 1 for 80, 1 for 443
// where 443 listener has 3 filter chains
if (len(adsResponse.HTTPListeners) + len(adsResponse.TCPListeners)) != 2 {
t.Fatalf("Expected 2 listeners, got %d\n", len(adsResponse.HTTPListeners)+len(adsResponse.TCPListeners))
}
// TODO: This is flimsy. The ADSC code treats any listener with http connection manager as a HTTP listener
// instead of looking at it as a listener with multiple filter chains
l := adsResponse.HTTPListeners["0.0.0.0_443"]
if l != nil {
if len(l.FilterChains) != 3 {
t.Fatalf("Expected 3 filter chains, got %d\n", len(l.FilterChains))
}
}
}
// TestLDS is running LDSv2 tests.
func TestLDS(t *testing.T) {
_, tearDown := initLocalPilotTestEnv(t)
defer tearDown()
t.Run("sidecar", func(t *testing.T) {
ldsr, cancel, err := connectADS(util.MockPilotGrpcAddr)
if err != nil {
t.Fatal(err)
}
defer cancel()
err = sendLDSReq(sidecarID(app3Ip, "app3"), ldsr)
if err != nil {
t.Fatal(err)
}
res, err := ldsr.Recv()
if err != nil {
t.Fatal("Failed to receive LDS", err)
return
}
strResponse, _ := model.ToJSONWithIndent(res, " ")
_ = ioutil.WriteFile(env.IstioOut+"/ldsv2_sidecar.json", []byte(strResponse), 0644)
if len(res.Resources) == 0 {
t.Fatal("No response")
}
})
// 'router' or 'gateway' type of listener
t.Run("gateway", func(t *testing.T) {
ldsr, cancel, err := connectADS(util.MockPilotGrpcAddr)
if err != nil {
t.Fatal(err)
}
defer cancel()
err = sendLDSReq(gatewayID(gatewayIP), ldsr)
if err != nil {
t.Fatal(err)
}
res, err := ldsr.Recv()
if err != nil {
t.Fatal("Failed to receive LDS", err)
}
strResponse, _ := model.ToJSONWithIndent(res, " ")
_ = ioutil.WriteFile(env.IstioOut+"/ldsv2_gateway.json", []byte(strResponse), 0644)
if len(res.Resources) == 0 {
t.Fatal("No response")
}
})
// TODO: compare with some golden once it's stable
// check that each mocked service and destination rule has a corresponding resource
// TODO: dynamic checks ( see EDS )
}
// TestLDS using sidecar scoped on workload without Service
func TestLDSWithSidecarForWorkloadWithoutService(t *testing.T) {
server, tearDown := util.EnsureTestServer(func(args *bootstrap.PilotArgs) {
args.Plugins = bootstrap.DefaultPlugins
args.Config.FileDir = env.IstioSrc + "/tests/testdata/networking/sidecar-without-service"
args.Mesh.MixerAddress = ""
args.Mesh.RdsRefreshDelay = nil
args.Mesh.ConfigFile = env.IstioSrc + "/tests/testdata/networking/sidecar-without-service/mesh.yaml"
args.Service.Registries = []string{}
})
registry := memServiceDiscovery(server, t)
registry.AddWorkload("98.1.1.1", model.Labels{"app": "consumeronly"}) // These labels must match the sidecars workload selector
testEnv = testenv.NewTestSetup(testenv.SidecarConsumerOnlyTest, t)
testEnv.Ports().PilotGrpcPort = uint16(util.MockPilotGrpcPort)
testEnv.Ports().PilotHTTPPort = uint16(util.MockPilotHTTPPort)
testEnv.IstioSrc = env.IstioSrc
testEnv.IstioOut = env.IstioOut
server.EnvoyXdsServer.ConfigUpdate(true)
defer tearDown()
adsResponse, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{
model.NodeMetadataConfigNamespace: "consumerns",
model.NodeMetadataInstanceIPs: "98.1.1.1", // as service instance of ingress gateway
model.NodeMetadataIstioProxyVersion: "1.1.0",
},
IP: "98.1.1.1",
Namespace: "consumerns", // namespace must match the namespace of the sidecar in the configs.yaml
NodeType: "sidecar",
})
if err != nil {
t.Fatal(err)
}
defer adsResponse.Close()
adsResponse.DumpCfg = true
adsResponse.Watch()
_, err = adsResponse.Wait("lds", 10*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS response", err)
return
}
// Expect 1 HTTP listeners for 8081
if len(adsResponse.HTTPListeners) != 1 {
t.Fatalf("Expected 1 http listeners, got %d", len(adsResponse.HTTPListeners))
}
// TODO: This is flimsy. The ADSC code treats any listener with http connection manager as a HTTP listener
// instead of looking at it as a listener with multiple filter chains
if l := adsResponse.HTTPListeners["0.0.0.0_8081"]; l != nil {
if len(l.FilterChains) != 1 {
t.Fatalf("Expected 1 filter chains, got %d", len(l.FilterChains))
}
} else {
t.Fatal("Expected listener for 0.0.0.0_8081")
}
// Expect only one EDS cluster for http1.ns1.svc.cluster.local
if len(adsResponse.EDSClusters) != 1 {
t.Fatalf("Expected 1 eds cluster, got %d", len(adsResponse.EDSClusters))
}
if cluster, ok := adsResponse.EDSClusters["outbound|8081||http1.ns1.svc.cluster.local"]; !ok {
t.Fatalf("Expected EDS cluster outbound|8081||http1.ns1.svc.cluster.local, got %v", cluster.Name)
}
}
// TestLDS using default sidecar in root namespace
func TestLDSEnvoyFilterWithWorkloadSelector(t *testing.T) {
server, tearDown := util.EnsureTestServer(func(args *bootstrap.PilotArgs) {
args.Plugins = bootstrap.DefaultPlugins
args.Config.FileDir = env.IstioSrc + "/tests/testdata/networking/envoyfilter-without-service"
args.Mesh.MixerAddress = ""
args.Mesh.RdsRefreshDelay = nil
args.Mesh.ConfigFile = env.IstioSrc + "/tests/testdata/networking/envoyfilter-without-service/mesh.yaml"
args.Service.Registries = []string{}
})
registry := memServiceDiscovery(server, t)
// The labels of 98.1.1.1 must match the envoyfilter workload selector
registry.AddWorkload("98.1.1.1", model.Labels{"app": "envoyfilter-test-app", "some": "otherlabel"})
registry.AddWorkload("98.1.1.2", model.Labels{"app": "no-envoyfilter-test-app"})
registry.AddWorkload("98.1.1.3", model.Labels{})
testEnv = testenv.NewTestSetup(testenv.SidecarConsumerOnlyTest, t)
testEnv.Ports().PilotGrpcPort = uint16(util.MockPilotGrpcPort)
testEnv.Ports().PilotHTTPPort = uint16(util.MockPilotHTTPPort)
testEnv.IstioSrc = env.IstioSrc
testEnv.IstioOut = env.IstioOut
server.EnvoyXdsServer.ConfigUpdate(true)
defer tearDown()
tests := []struct {
name string
ip string
expectLuaFilter bool
}{
{
name: "Add filter with matching labels to sidecar",
ip: "98.1.1.1",
expectLuaFilter: true,
},
{
name: "Ignore filter with not matching labels to sidecar",
ip: "98.1.1.2",
expectLuaFilter: false,
},
{
name: "Ignore filter with empty labels to sidecar",
ip: "98.1.1.3",
expectLuaFilter: false,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
adsResponse, err := adsc.Dial(util.MockPilotGrpcAddr, "", &adsc.Config{
Meta: map[string]string{
model.NodeMetadataConfigNamespace: "consumerns",
model.NodeMetadataInstanceIPs: test.ip, // as service instance of ingress gateway
model.NodeMetadataIstioProxyVersion: "1.1.0",
},
IP: test.ip,
Namespace: "consumerns", // namespace must match the namespace of the sidecar in the configs.yaml
NodeType: "sidecar",
})
if err != nil {
t.Fatal(err)
}
defer adsResponse.Close()
adsResponse.DumpCfg = false
adsResponse.Watch()
_, err = adsResponse.Wait("lds", 100*time.Second)
if err != nil {
t.Fatal("Failed to receive LDS response", err)
return
}
// Expect 1 HTTP listeners for 8081
if len(adsResponse.HTTPListeners) != 1 {
t.Fatalf("Expected 1 http listeners, got %d", len(adsResponse.HTTPListeners))
}
// TODO: This is flimsy. The ADSC code treats any listener with http connection manager as a HTTP listener
// instead of looking at it as a listener with multiple filter chains
l := adsResponse.HTTPListeners["0.0.0.0_8081"]
expectLuaFilter(t, l, test.expectLuaFilter)
})
}
}
func expectLuaFilter(t *testing.T, l *xdsapi.Listener, expected bool) {
if l != nil {
if len(l.FilterChains) != 1 {
t.Fatalf("Expected 1 filter chains, got %d", len(l.FilterChains))
}
if len(l.FilterChains[0].Filters) != 1 {
t.Fatalf("Expected 1 filter in first filter chain, got %d", len(l.FilterChains))
}
filter := l.FilterChains[0].Filters[0]
if filter.Name != "envoy.http_connection_manager" {
t.Fatalf("Expected HTTP connection, found %v", l.FilterChains[0].Filters[0].Name)
}
httpCfg, ok := filter.ConfigType.(*xdsapi_listener.Filter_TypedConfig)
if !ok {
t.Fatalf("Expected Http Connection Manager Config Filter_TypedConfig, found %T", filter.ConfigType)
}
connectionManagerCfg := xdsapi_http_connection_manager.HttpConnectionManager{}
err := connectionManagerCfg.Unmarshal(httpCfg.TypedConfig.GetValue())
if err != nil {
t.Fatalf("Could not deserialize http connection manager config: %v", err)
}
found := false
for _, filter := range connectionManagerCfg.HttpFilters {
if filter.Name == "envoy.lua" {
found = true
}
}
if expected != found {
t.Fatalf("Expected Lua filter: %v, found: %v", expected, found)
}
}
}
func memServiceDiscovery(server *bootstrap.Server, t *testing.T) *v2.MemServiceDiscovery {
index, found := server.ServiceController.GetRegistryIndex("v2-debug")
if !found {
t.Fatal("Could not find Mock ServiceRegistry")
}
registry, ok := server.ServiceController.GetRegistries()[index].ServiceDiscovery.(*v2.MemServiceDiscovery)
if !ok {
t.Fatal("Unexpected type of Mock ServiceRegistry")
}
return registry
}
// TODO: helper to test the http listener content
// - file access log
// - generate request id
// - cors, fault, router filters
// - tracing
//
| [
"\"CONFIG_DIFF\""
]
| []
| [
"CONFIG_DIFF"
]
| [] | ["CONFIG_DIFF"] | go | 1 | 0 | |
tests/test_shells/postproc.py | #!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import socket
import sys
import codecs
import platform
import re
test_type = sys.argv[1]
test_client = sys.argv[2]
shell = sys.argv[3]
fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'full.log')))
new_fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'log')))
pid_fname = os.path.join('tests', 'shell', '3rd', 'pid')
is_pypy = platform.python_implementation() == 'PyPy'
try:
with open(pid_fname, 'r') as P:
pid = P.read().strip()
except IOError:
pid = None
hostname = socket.gethostname()
user = os.environ['USER']
REFS_RE = re.compile(r'^\[\d+ refs\]\n')
IPYPY_DEANSI_RE = re.compile(r'\033(?:\[(?:\?\d+[lh]|[^a-zA-Z]+[a-ln-zA-Z])|[=>])')
with codecs.open(fname, 'r', encoding='utf-8') as R:
with codecs.open(new_fname, 'w', encoding='utf-8') as W:
found_cd = False
for line in (R if shell != 'fish' else R.read().split('\n')):
if not found_cd:
found_cd = ('cd tests/shell/3rd' in line)
continue
if 'true is the last line' in line:
break
line = line.translate({
ord('\r'): None
})
if REFS_RE.match(line):
continue
line = line.replace(hostname, 'HOSTNAME')
line = line.replace(user, 'USER')
if pid is not None:
line = line.replace(pid, 'PID')
if shell == 'fish':
res = ''
try:
while line.index('\033[0;'):
start = line.index('\033[0;')
end = line.index('\033[0m', start)
res += line[start:end + 4] + '\n'
line = line[end + 4:]
except ValueError:
pass
line = res
elif shell == 'tcsh':
try:
start = line.index('\033[0;')
end = line.index(' ', start)
line = line[start:end] + '\033[0m\n'
except ValueError:
line = ''
elif shell == 'mksh':
# Output is different in travis: on my machine I see full
# command, in travis it is truncated just after `true`.
if line.startswith('[1] + Terminated'):
line = '[1] + Terminated bash -c ...\n'
elif shell == 'dash':
# Position of this line is not stable: it may go both before and
# after the next line
if line.startswith('[1] + Terminated'):
continue
elif shell == 'ipython' and is_pypy:
try:
end_idx = line.rindex('\033[0m')
try:
idx = line[:end_idx].rindex('\033[1;1H')
except ValueError:
idx = line[:end_idx].rindex('\033[?25h')
line = line[idx + len('\033[1;1H'):]
except ValueError:
pass
try:
data_end_idx = line.rindex('\033[1;1H')
line = line[:data_end_idx] + '\n'
except ValueError:
pass
if line == '\033[1;1H\n':
continue
was_empty = line == '\n'
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n' and not was_empty:
line = ''
elif shell == 'rc':
if line == 'read() failed: Connection reset by peer\n':
line = ''
W.write(line)
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
pkg/jobs/jobclient.go | package jobs
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"text/template"
"time"
"github.com/kubeshop/testkube/internal/pkg/api/repository/result"
"github.com/kubeshop/testkube/pkg/api/v1/testkube"
"github.com/kubeshop/testkube/pkg/executor/output"
"github.com/kubeshop/testkube/pkg/k8sclient"
"github.com/kubeshop/testkube/pkg/log"
"github.com/kubeshop/testkube/pkg/secret"
"go.uber.org/zap"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
tcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
// GitUsernameSecretName is git username secret name
GitUsernameSecretName = "git-username"
// GitUsernameEnvVarName is git username environment var name
GitUsernameEnvVarName = "RUNNER_GITUSERNAME"
// GitTokenSecretName is git token secret name
GitTokenSecretName = "git-token"
// GitTokenEnvVarName is git token environment var name
GitTokenEnvVarName = "RUNNER_GITTOKEN"
pollTimeout = 24 * time.Hour
pollInterval = 200 * time.Millisecond
volumeName = "data-volume"
volumeDir = "/data"
)
// JobClient data struct for managing running jobs
type JobClient struct {
ClientSet *kubernetes.Clientset
Repository result.Repository
Namespace string
Cmd string
Log *zap.SugaredLogger
initImage string
jobTemplate string
}
// JobOptions is for configuring JobOptions
type JobOptions struct {
Name string
Namespace string
Image string
Jsn string
TestName string
InitImage string
JobTemplate string
HasSecrets bool
SecretEnvs map[string]string
}
// NewJobClient returns new JobClient instance
func NewJobClient(namespace, initImage, jobTemplate string) (*JobClient, error) {
clientSet, err := k8sclient.ConnectToK8s()
if err != nil {
return nil, err
}
return &JobClient{
ClientSet: clientSet,
Namespace: namespace,
Log: log.DefaultLogger,
initImage: initImage,
jobTemplate: jobTemplate,
}, nil
}
// LaunchK8sJobSync launches new job and run executor of given type
// TODO Consider moving launch of K8s job as always sync
// TODO Consider moving storage calls level up (remove dependency from here)
func (c *JobClient) LaunchK8sJobSync(repo result.Repository, execution testkube.Execution, options JobOptions) (
result testkube.ExecutionResult, err error) {
result = testkube.NewPendingExecutionResult()
jobs := c.ClientSet.BatchV1().Jobs(c.Namespace)
podsClient := c.ClientSet.CoreV1().Pods(c.Namespace)
ctx := context.Background()
jsn, err := json.Marshal(execution)
if err != nil {
return result.Err(err), err
}
options.Name = execution.Id
options.Namespace = execution.TestNamespace
options.Jsn = string(jsn)
options.InitImage = c.initImage
options.TestName = execution.TestName
if options.JobTemplate == "" {
options.JobTemplate = c.jobTemplate
}
jobSpec, err := NewJobSpec(c.Log, options)
if err != nil {
return result.Err(err), err
}
_, err = jobs.Create(ctx, jobSpec, metav1.CreateOptions{})
if err != nil {
return result.Err(err), err
}
pods, err := c.GetJobPods(podsClient, execution.Id, 1, 10)
if err != nil {
return result.Err(err), err
}
// get job pod and
for _, pod := range pods.Items {
if pod.Status.Phase != corev1.PodRunning && pod.Labels["job-name"] == execution.Id {
l := c.Log.With("pod", pod.Name, "namespace", pod.Namespace, "func", "LaunchK8sJobSync")
// save stop time
defer func() {
execution.Stop()
repo.EndExecution(ctx, execution.Id, execution.EndTime, execution.CalculateDuration())
}()
// wait for complete
l.Debug("poll immediate waiting for pod to succeed")
if err := wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil {
// continue on poll err and try to get logs later
l.Errorw("waiting for pod complete error", "error", err)
}
l.Debug("poll immediate end")
var logs []byte
logs, err = c.GetPodLogs(pod.Name)
if err != nil {
l.Errorw("get pod logs error", "error", err)
repo.UpdateResult(ctx, execution.Id, result.Err(err))
return result, err
}
// parse job ouput log (JSON stream)
result, _, err := output.ParseRunnerOutput(logs)
if err != nil {
l.Errorw("parse ouput error", "error", err)
repo.UpdateResult(ctx, execution.Id, result.Err(err))
return result, err
}
l.Infow("execution completed saving result", "executionId", execution.Id, "status", result.Status)
repo.UpdateResult(ctx, execution.Id, result)
return result, nil
}
}
return
}
// LaunchK8sJob launches new job and run executor of given type
// TODO consider moving storage based operation up in hierarchy
// TODO Consider moving launch of K8s job as always sync
func (c *JobClient) LaunchK8sJob(repo result.Repository, execution testkube.Execution, options JobOptions) (
result testkube.ExecutionResult, err error) {
jobs := c.ClientSet.BatchV1().Jobs(c.Namespace)
podsClient := c.ClientSet.CoreV1().Pods(c.Namespace)
ctx := context.Background()
// init result
result = testkube.NewPendingExecutionResult()
jsn, err := json.Marshal(execution)
if err != nil {
return result.Err(err), err
}
options.Name = execution.Id
options.Namespace = execution.TestNamespace
options.Jsn = string(jsn)
options.InitImage = c.initImage
options.TestName = execution.TestName
if options.JobTemplate == "" {
options.JobTemplate = c.jobTemplate
}
jobSpec, err := NewJobSpec(c.Log, options)
if err != nil {
return result.Err(err), fmt.Errorf("new job spec error: %w", err)
}
_, err = jobs.Create(ctx, jobSpec, metav1.CreateOptions{})
if err != nil {
return result.Err(err), fmt.Errorf("job create error: %w", err)
}
pods, err := c.GetJobPods(podsClient, execution.Id, 1, 10)
if err != nil {
return result.Err(err), fmt.Errorf("get job pods error: %w", err)
}
// get job pod and
for _, pod := range pods.Items {
if pod.Status.Phase != corev1.PodRunning && pod.Labels["job-name"] == execution.Id {
// async wait for complete status or error
go func() {
l := c.Log.With("executionID", execution.Id, "func", "LaunchK8sJob")
// save stop time
defer func() {
l.Debug("stopping execution")
execution.Stop()
repo.EndExecution(ctx, execution.Id, execution.EndTime, execution.CalculateDuration())
}()
// wait for complete
l.Debug("poll immediate waiting for pod to succeed")
if err := wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil {
// continue on poll err and try to get logs later
l.Errorw("poll immediate error", "error", err)
}
l.Debug("poll immediate end")
var logs []byte
logs, err = c.GetPodLogs(pod.Name)
if err != nil {
l.Errorw("get pod logs error", "error", err)
repo.UpdateResult(ctx, execution.Id, result.Err(err))
return
}
// parse job ouput log (JSON stream)
result, _, err := output.ParseRunnerOutput(logs)
if err != nil {
l.Errorw("parse ouput error", "error", err)
repo.UpdateResult(ctx, execution.Id, result.Err(err))
return
}
l.Infow("execution completed saving result", "status", result.Status)
repo.UpdateResult(ctx, execution.Id, result)
}()
}
}
return testkube.NewPendingExecutionResult(), nil
}
// GetJobPods returns job pods
func (c *JobClient) GetJobPods(podsClient tcorev1.PodInterface, jobName string, retryNr, retryCount int) (*corev1.PodList, error) {
pods, err := podsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "job-name=" + jobName})
if err != nil {
return nil, err
}
if retryNr == retryCount {
return nil, fmt.Errorf("retry count exceeeded, there are no active pods with given id=%s", jobName)
}
if len(pods.Items) == 0 {
time.Sleep(time.Duration(retryNr * 500 * int(time.Millisecond))) // increase backoff timeout
return c.GetJobPods(podsClient, jobName, retryNr+1, retryCount)
}
return pods, nil
}
// TailJobLogs - locates logs for job pod(s)
func (c *JobClient) TailJobLogs(id string, logs chan []byte) (err error) {
podsClient := c.ClientSet.CoreV1().Pods(c.Namespace)
ctx := context.Background()
pods, err := c.GetJobPods(podsClient, id, 1, 10)
if err != nil {
close(logs)
return err
}
for _, pod := range pods.Items {
if pod.Labels["job-name"] == id {
l := c.Log.With("podNamespace", pod.Namespace, "podName", pod.Name, "podStatus", pod.Status)
switch pod.Status.Phase {
case corev1.PodRunning:
l.Debug("tailing pod logs: immediately")
return c.TailPodLogs(ctx, pod.Name, logs)
case corev1.PodFailed:
err := fmt.Errorf("can't get pod logs, pod failed: %s/%s", pod.Namespace, pod.Name)
l.Errorw(err.Error())
return c.GetLastLogLineError(ctx, pod.Namespace, pod.Name)
default:
l.Debugw("tailing job logs: waiting for pod to be ready")
if err = wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil {
l.Errorw("poll immediate error when tailing logs", "error", err)
return c.GetLastLogLineError(ctx, pod.Namespace, pod.Name)
}
l.Debug("tailing pod logs")
return c.TailPodLogs(ctx, pod.Name, logs)
}
}
}
return
}
// GetLastLogLineError return error if last line is failed
func (c *JobClient) GetLastLogLineError(ctx context.Context, podNamespace, podName string) error {
l := c.Log.With("pod", podName, "namespace", podNamespace)
log, err := c.GetPodLogError(ctx, podName)
if err != nil {
return fmt.Errorf("getPodLogs error: %w", err)
}
l.Debugw("log", "got last log bytes", string(log)) // in case distorted log bytes
entry, err := output.GetLogEntry(log)
if err != nil {
return fmt.Errorf("GetLogEntry error: %w", err)
}
c.Log.Errorw("got last log entry", "log", entry.String())
return fmt.Errorf("error from last log entry: %s", entry.String())
}
// GetPodLogs returns pod logs bytes
func (c *JobClient) GetPodLogs(podName string, logLinesCount ...int64) (logs []byte, err error) {
count := int64(100)
if len(logLinesCount) > 0 {
count = logLinesCount[0]
}
podLogOptions := corev1.PodLogOptions{
Follow: false,
TailLines: &count,
}
podLogRequest := c.ClientSet.CoreV1().
Pods(c.Namespace).
GetLogs(podName, &podLogOptions)
stream, err := podLogRequest.Stream(context.TODO())
if err != nil {
return logs, err
}
defer stream.Close()
buf := new(bytes.Buffer)
_, err = io.Copy(buf, stream)
if err != nil {
return logs, err
}
return buf.Bytes(), nil
}
// GetPodLogError returns last line as error
func (c *JobClient) GetPodLogError(ctx context.Context, podName string) (logsBytes []byte, err error) {
// error line should be last one
return c.GetPodLogs(podName, 1)
}
// TailPodLogs returns pod logs as channel of bytes
func (c *JobClient) TailPodLogs(ctx context.Context, podName string, logs chan []byte) (err error) {
count := int64(1)
podLogOptions := corev1.PodLogOptions{
Follow: true,
TailLines: &count,
}
podLogRequest := c.ClientSet.CoreV1().
Pods(c.Namespace).
GetLogs(podName, &podLogOptions)
stream, err := podLogRequest.Stream(ctx)
if err != nil {
return err
}
go func() {
defer close(logs)
scanner := bufio.NewScanner(stream)
// set default bufio scanner buffer (to limit bufio.Scanner: token too long errors on very long lines)
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, 1024*1024)
for scanner.Scan() {
c.Log.Debug("TailPodLogs stream scan", "out", scanner.Text(), "pod", podName)
logs <- scanner.Bytes()
}
if scanner.Err() != nil {
c.Log.Errorw("scanner error", "error", scanner.Err())
}
}()
return
}
// AbortK8sJob aborts K8S by job name
func (c *JobClient) AbortK8sJob(jobName string) *testkube.ExecutionResult {
var zero int64 = 0
bg := metav1.DeletePropagationBackground
jobs := c.ClientSet.BatchV1().Jobs(c.Namespace)
err := jobs.Delete(context.TODO(), jobName, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
PropagationPolicy: &bg,
})
if err != nil {
return &testkube.ExecutionResult{
Status: testkube.ExecutionStatusFailed,
Output: err.Error(),
}
}
return &testkube.ExecutionResult{
Status: testkube.ExecutionStatusPassed,
}
}
// CreatePersistentVolume creates persistent volume
func (c *JobClient) CreatePersistentVolume(name string) error {
quantity, err := resource.ParseQuantity("10Gi")
if err != nil {
return err
}
pv := &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"type": "local"},
},
Spec: corev1.PersistentVolumeSpec{
Capacity: corev1.ResourceList{"storage": quantity},
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
PersistentVolumeSource: corev1.PersistentVolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: fmt.Sprintf("/mnt/data/%s", name),
},
},
StorageClassName: "manual",
},
}
if _, err = c.ClientSet.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}
// CreatePersistentVolumeClaim creates PVC with given name
func (c *JobClient) CreatePersistentVolumeClaim(name string) error {
storageClassName := "manual"
quantity, err := resource.ParseQuantity("10Gi")
if err != nil {
return err
}
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: corev1.PersistentVolumeClaimSpec{
StorageClassName: &storageClassName,
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{"storage": quantity},
},
},
}
if _, err := c.ClientSet.CoreV1().PersistentVolumeClaims(c.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}
// NewJobSpec is a method to create new job spec
func NewJobSpec(log *zap.SugaredLogger, options JobOptions) (*batchv1.Job, error) {
var secretEnvVars []corev1.EnvVar
i := 1
for secretName, secretVar := range options.SecretEnvs {
secretEnvVars = append(secretEnvVars, corev1.EnvVar{
Name: fmt.Sprintf("RUNNER_SECRET_ENV%d", i),
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: secretName,
},
Key: secretVar,
},
},
})
i++
}
if options.HasSecrets {
secretEnvVars = append(secretEnvVars, []corev1.EnvVar{
{
Name: GitUsernameEnvVarName,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: secret.GetMetadataName(options.TestName),
},
Key: GitUsernameSecretName,
},
},
},
{
Name: GitTokenEnvVarName,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: secret.GetMetadataName(options.TestName),
},
Key: GitTokenSecretName,
},
},
},
}...)
}
tmpl, err := template.New("job").Parse(options.JobTemplate)
if err != nil {
return nil, fmt.Errorf("creating job spec from options.JobTemplate error: %w", err)
}
options.Jsn = strings.ReplaceAll(options.Jsn, "'", "''")
var buffer bytes.Buffer
if err = tmpl.ExecuteTemplate(&buffer, "job", options); err != nil {
return nil, fmt.Errorf("executing job spec template: %w", err)
}
var job batchv1.Job
jobSpec := buffer.String()
log.Debug("Job specification", jobSpec)
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewBufferString(jobSpec), len(jobSpec))
if err := decoder.Decode(&job); err != nil {
return nil, fmt.Errorf("decoding job spec error: %w", err)
}
env := append(envVars, secretEnvVars...)
for i := range job.Spec.Template.Spec.InitContainers {
job.Spec.Template.Spec.InitContainers[i].Env = append(job.Spec.Template.Spec.InitContainers[i].Env, env...)
}
for i := range job.Spec.Template.Spec.Containers {
job.Spec.Template.Spec.Containers[i].Env = append(job.Spec.Template.Spec.Containers[i].Env, env...)
}
return &job, nil
}
var envVars = []corev1.EnvVar{
{
Name: "DEBUG",
Value: os.Getenv("DEBUG"),
},
{
Name: "RUNNER_ENDPOINT",
Value: os.Getenv("STORAGE_ENDPOINT"),
},
{
Name: "RUNNER_ACCESSKEYID",
Value: os.Getenv("STORAGE_ACCESSKEYID"),
},
{
Name: "RUNNER_SECRETACCESSKEY",
Value: os.Getenv("STORAGE_SECRETACCESSKEY"),
},
{
Name: "RUNNER_LOCATION",
Value: os.Getenv("STORAGE_LOCATION"),
},
{
Name: "RUNNER_TOKEN",
Value: os.Getenv("STORAGE_TOKEN"),
},
{
Name: "RUNNER_SSL",
Value: os.Getenv("STORAGE_SSL"),
},
{
Name: "RUNNER_SCRAPPERENABLED",
Value: os.Getenv("SCRAPPERENABLED"),
},
{
Name: "RUNNER_DATADIR",
Value: volumeDir,
},
}
// IsPodReady defines if pod is ready or failed for logs scrapping
func IsPodReady(c *kubernetes.Clientset, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case corev1.PodSucceeded:
return true, nil
case corev1.PodFailed:
return true, fmt.Errorf("pod %s/%s failed", pod.Namespace, pod.Name)
}
return false, nil
}
}
| [
"\"DEBUG\"",
"\"STORAGE_ENDPOINT\"",
"\"STORAGE_ACCESSKEYID\"",
"\"STORAGE_SECRETACCESSKEY\"",
"\"STORAGE_LOCATION\"",
"\"STORAGE_TOKEN\"",
"\"STORAGE_SSL\"",
"\"SCRAPPERENABLED\""
]
| []
| [
"STORAGE_TOKEN",
"STORAGE_ENDPOINT",
"STORAGE_SECRETACCESSKEY",
"STORAGE_ACCESSKEYID",
"STORAGE_LOCATION",
"DEBUG",
"SCRAPPERENABLED",
"STORAGE_SSL"
]
| [] | ["STORAGE_TOKEN", "STORAGE_ENDPOINT", "STORAGE_SECRETACCESSKEY", "STORAGE_ACCESSKEYID", "STORAGE_LOCATION", "DEBUG", "SCRAPPERENABLED", "STORAGE_SSL"] | go | 8 | 0 | |
cmd/world-gen/world-gen.go | // ----------------------------------------------------------------------------
// The MIT License
// LecsGO - Entity Component System framework powered by Golang.
// Url: https://github.com/Leopotam/go-ecs
// Copyright (c) 2021 Leopotam <[email protected]>
// ----------------------------------------------------------------------------
package main
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"text/template"
)
type componentInfo struct {
Name string
Type string
}
type filterInfo struct {
Name string
IncludeTypes []string
ExcludeTypes []string
IncludeIndices []string
ExcludeIndices []string
}
type worldInfo struct {
Name string
InfoTypeName string
Components []componentInfo
Filters []filterInfo
}
func newComponentInfo(typeName string) componentInfo {
return componentInfo{
Name: strings.ReplaceAll(strings.Title(typeName), ".", ""),
Type: typeName,
}
}
func main() {
fset := token.NewFileSet()
inPackage := os.Getenv("GOPACKAGE")
inFileName := os.Getenv("GOFILE")
src, err := ioutil.ReadFile(inFileName)
if err != nil {
panic(err)
}
f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err != nil {
panic(err)
}
// imports.
var imports []string
for _, item := range f.Imports {
var importData string
if item.Name != nil {
importData = fmt.Sprintf("%s %s", item.Name.Name, item.Path.Value)
} else {
importData = item.Path.Value
}
imports = append(imports, importData)
}
// find worlds.
worlds := scanWorlds(f)
for _, info := range worlds {
fmt.Printf("world: %s => %s\n", info.Name, info.InfoTypeName)
}
for i := range worlds {
w := &worlds[i]
scanWorldInfo(f, w)
validateFilters(w)
}
var buf bytes.Buffer
if err := packageTemplate.Execute(&buf, struct {
Package string
Imports []string
Worlds []worldInfo
}{
Package: inPackage,
Imports: imports,
Worlds: worlds,
}); err != nil {
panic(err)
}
formattedCode, err := format.Source(buf.Bytes())
if err != nil {
panic(err)
}
dir := filepath.Dir(inFileName)
outFileName := filepath.Join(dir,
fmt.Sprintf("%s-gen.go", inFileName[:len(inFileName)-len(filepath.Ext(inFileName))]))
w, err := os.Create(outFileName)
if err != nil {
panic(err)
}
defer w.Close()
w.Write(formattedCode)
}
func scanWorlds(f *ast.File) []worldInfo {
var worlds []worldInfo
ast.Inspect(f, func(n ast.Node) bool {
switch t := n.(type) {
case *ast.TypeSpec:
switch t.Type.(type) {
case *ast.StructType:
for _, field := range t.Type.(*ast.StructType).Fields.List {
if field.Tag != nil {
if tag, err := strconv.Unquote(field.Tag.Value); err == nil {
if meta, ok := reflect.StructTag(tag).Lookup("ecs"); ok /*&& meta == "world"*/ {
worldInfo := worldInfo{
Name: t.Name.Name,
// InfoTypeName: field.Type.(*ast.Ident).Name,
InfoTypeName: meta,
}
worlds = append(worlds, worldInfo)
break
}
}
}
}
}
}
return true
})
return worlds
}
func scanWorldInfo(f *ast.File, worldInfo *worldInfo) {
ast.Inspect(f, func(n ast.Node) bool {
switch t := n.(type) {
case *ast.TypeSpec:
switch t.Type.(type) {
case *ast.InterfaceType:
if t.Name.Name == worldInfo.InfoTypeName {
fmt.Printf("world-info found: %s\n", worldInfo.InfoTypeName)
componentsFound := false
for _, method := range t.Type.(*ast.InterfaceType).Methods.List {
if len(method.Names) == 0 {
continue
}
fnName := method.Names[0]
fn := method.Type.(*ast.FuncType)
if !fnName.IsExported() {
if componentsFound {
panic(fmt.Sprintf(`only one private func should be present in world "%s"`, worldInfo.Name))
}
worldInfo.Components = scanComponents(worldInfo, fnName.Name, fn)
componentsFound = true
for _, ci := range worldInfo.Components {
fmt.Printf("component: name=%s, type=%s\n", ci.Name, ci.Type)
}
continue
}
filter := scanFilterConstraints(fn)
filter.Name = fnName.Name
worldInfo.Filters = append(worldInfo.Filters, filter)
}
}
}
}
return true
})
}
func scanComponents(w *worldInfo, name string, fn *ast.FuncType) []componentInfo {
var components []componentInfo
if len(fn.Params.List) > 0 {
panic(fmt.Sprintf(`private func "%s" cant get parameters in world "%s"`, name, w.Name))
}
if fn.Results == nil {
panic(fmt.Sprintf(`private func "%s" should returns components in world "%s"`, name, w.Name))
}
for _, par := range fn.Results.List {
var typeName string
switch par.Type.(type) {
case *ast.SelectorExpr:
sel := par.Type.(*ast.SelectorExpr)
typeName = fmt.Sprintf("%s.%s", sel.X.(*ast.Ident).Name, sel.Sel)
case *ast.Ident:
typeName = par.Type.(*ast.Ident).Name
}
if idx := findComponentByType(components, typeName); idx != -1 {
panic(fmt.Sprintf(`component "%s" already declared in world "%s"`, typeName, w.Name))
}
components = append(components, newComponentInfo(typeName))
}
return components
}
func scanFilterConstraints(fn *ast.FuncType) filterInfo {
filter := filterInfo{}
for _, par := range fn.Params.List {
// fmt.Printf("filter-include: %s\n", par.Type.(*ast.Ident).Name)
var typeName string
switch par.Type.(type) {
case *ast.SelectorExpr:
sel := par.Type.(*ast.SelectorExpr)
typeName = fmt.Sprintf("%s.%s", sel.X.(*ast.Ident).Name, sel.Sel)
case *ast.Ident:
typeName = par.Type.(*ast.Ident).Name
}
filter.IncludeTypes = append(filter.IncludeTypes, typeName)
}
if fn.Results != nil {
for _, par := range fn.Results.List {
// fmt.Printf("filter-exclude: %v\n", par.Type.(*ast.Ident))
var typeName string
switch par.Type.(type) {
case *ast.SelectorExpr:
sel := par.Type.(*ast.SelectorExpr)
typeName = fmt.Sprintf("%s.%s", sel.X.(*ast.Ident).Name, sel.Sel)
case *ast.Ident:
typeName = par.Type.(*ast.Ident).Name
}
filter.ExcludeTypes = append(filter.ExcludeTypes, typeName)
}
}
return filter
}
func findComponentByType(c []componentInfo, typeName string) int {
for i := range c {
if c[i].Type == typeName {
return i
}
}
return -1
}
func validateFilters(w *worldInfo) {
for fIdx := range w.Filters {
f := &w.Filters[fIdx]
for _, inc := range f.IncludeTypes {
i := findComponentByType(w.Components, inc)
if i == -1 {
panic(fmt.Sprintf(`filter "%s" requested "%s" as include constraint that not exist in world "%s"`,
f.Name, inc, w.Name))
}
f.IncludeIndices = append(f.IncludeIndices, strconv.Itoa(i))
}
for _, exc := range f.ExcludeTypes {
i := findComponentByType(w.Components, exc)
if i == -1 {
panic(fmt.Sprintf(`filter "%s" requested "%s" as exclude constraint that not exist in world "%s"`,
f.Name, exc, w.Name))
}
f.ExcludeIndices = append(f.ExcludeIndices, strconv.Itoa(i))
}
}
fmt.Printf("world \"%s\" info:\n", w.Name)
var cNames []string
for _, c := range w.Components {
cNames = append(cNames, c.Name)
}
fmt.Printf("components: %v\n", cNames)
for _, f := range w.Filters {
fmt.Printf("filter \"%s\": include=%v, exclude=%v\n", f.Name, f.IncludeTypes, f.ExcludeTypes)
}
}
func joinSlice(s []string) string {
res := strings.Join(s, ",")
if len(res) > 0 {
res += ","
}
return res
}
var templateFuncs = template.FuncMap{
"joinSlice": joinSlice,
}
var packageTemplate = template.Must(template.New("").Funcs(templateFuncs).Parse(
`// Code generated by "go generate", DO NOT EDIT.
package {{ .Package }}
import (
"sort"
{{ range $i,$import := .Imports }}
{{$import}}
{{- end}}
)
{{ range $worldIdx,$world := .Worlds }}
{{- $worldName := $world.Name }}
// New{{$worldName}} returns new instance of {{$worldName}}.
func New{{$worldName}}(entitiesCount uint32) *{{$worldName}} {
return &{{$worldName}}{
world: ecs.NewWorld(entitiesCount, []ecs.ComponentPool{
{{- range $i,$c := $world.Components }}
new{{$c.Name}}Pool(entitiesCount),
{{- end}}
},[]ecs.Filter{
{{- range $i,$f := $world.Filters }}
*ecs.NewFilter([]uint16{ {{ joinSlice $f.IncludeIndices }} }, []uint16{ {{ joinSlice $f.ExcludeIndices }} }, 512),
{{- end}}
}),
}
}
// InternalWorld returns internal ecs.World instance.
func (w {{$worldName}}) InternalWorld() *ecs.World { return w.world }
// Destroy processes cleanup of data inside world.
func (w *{{$worldName}}) Destroy() { w.world.Destroy(); w.world = nil }
// NewEntity creates and returns new entity inside world.
func (w {{$worldName}}) NewEntity() ecs.Entity {
return w.world.NewEntity()
}
// DelEntity removes entity from world if exists. All attached components will be removed first.
func (w {{$worldName}}) DelEntity(entity ecs.Entity) { w.world.DelEntity(entity) }
// PackEntity packs Entity to save outside from world.
func (w {{$worldName}}) PackEntity(entity ecs.Entity) ecs.PackedEntity { return w.world.PackEntity(entity) }
// UnpackEntity tries to unpack data to Entity, returns unpacked entity and success of operation.
func (w {{$worldName}}) UnpackEntity(packedEntity ecs.PackedEntity) (ecs.Entity, bool) {
return w.world.UnpackEntity(packedEntity)
}
{{ range $i,$c := $world.Components }}
type pool{{$c.Name}} []{{$c.Type}}
func new{{$c.Name}}Pool(cap uint32) *pool{{$c.Name}} {
var pool pool{{$c.Name}} = make([]{{$c.Type}}, 0, cap)
return &pool
}
func (p *pool{{$c.Name}}) New() {
*p = append(*p, {{$c.Type}}{})
}
func (p *pool{{$c.Name}}) Recycle(idx uint32) {
(*p)[idx] = {{$c.Type}}{}
}
// Set{{$c.Name}} adds or returns exist {{$c.Name}} component on entity.
func (w {{$worldName}}) Set{{$c.Name}}(entity ecs.Entity) *{{$c.Type}} {
entityData := &w.world.Entities[entity]
pool := w.world.Pools[{{$i}}].(*pool{{$c.Name}})
if !entityData.BitMask.Get({{$i}}) {
entityData.BitMask.Set({{$i}})
maskIdx := sort.Search(len(entityData.Mask), func(i int) bool { return entityData.Mask[i] > {{$i}} })
entityData.Mask = append(entityData.Mask, 0)
copy(entityData.Mask[maskIdx+1:], entityData.Mask[maskIdx:])
entityData.Mask[maskIdx] = {{$i}}
w.world.UpdateFilters(entity, {{$i}}, true)
}
return &(*pool)[entity]
}
// Get{{$c.Name}} returns exist {{$c.Name}} component on entity or nil.
func (w {{$worldName}}) Get{{$c.Name}}(entity ecs.Entity) *{{$c.Type}} {
if !w.world.Entities[entity].BitMask.Get({{$i}}) {
return nil
}
return &(*w.world.Pools[{{$i}}].(*pool{{$c.Name}}))[entity]
}
// Get{{$c.Name}}Unsafe returns exist {{$c.Name}} component on entity or nil.
func (w {{$worldName}}) Get{{$c.Name}}Unsafe(entity ecs.Entity) *{{$c.Type}} {
return &(*w.world.Pools[{{$i}}].(*pool{{$c.Name}}))[entity]
}
// Del{{$c.Name}} removes {{$c.Name}} component or do nothing.
// If entity is empty after removing - it will be destroyed automatically.
func (w {{$worldName}}) Del{{$c.Name}}(entity ecs.Entity) {
entityData := &w.world.Entities[entity]
if entityData.BitMask.Get({{$i}}) {
if len(entityData.Mask) > 1 {
w.world.UpdateFilters(entity, {{$i}}, false)
w.world.Pools[{{$i}}].(*pool{{$c.Name}}).Recycle(entity)
maskLen := len(entityData.Mask)
maskIdx := sort.Search(maskLen, func(i int) bool { return entityData.Mask[i] >= {{$i}} })
copy(entityData.Mask[maskIdx:], entityData.Mask[maskIdx+1:])
entityData.Mask = entityData.Mask[:maskLen-1]
entityData.BitMask.Unset({{$i}})
} else {
w.DelEntity(entity)
}
}
}
{{- end}}
{{- range $i,$f := $world.Filters }}
// {{$f.Name}} returns user filter.
func (w {{$worldName}}) {{$f.Name}}() *ecs.Filter {
return w.world.Filter({{$i}})
}
{{- end}}
{{- end}}
`))
| [
"\"GOPACKAGE\"",
"\"GOFILE\""
]
| []
| [
"GOFILE",
"GOPACKAGE"
]
| [] | ["GOFILE", "GOPACKAGE"] | go | 2 | 0 | |
Model Codes/RetrieveEdit/hstone_eval_ctxvae.py | # -*- coding: utf-8 -*-
import paths
import os
#os.environ['COPY_EDIT_DATA']=paths.data_dir
os.environ['CUDA_VISIBLE_DEVICES']='0'
from gtd.utils import Config, bleu
from editor_code.copy_editor.retrieve_edit_run import RetrieveEditTrainingRun
from editor_code.copy_editor.editor import EditExample
print os.environ['COPY_EDIT_DATA']
import paths
import io
field_delims = ['NAME_END','ATK_END','DEF_END','COST_END','DUR_END','TYPE_END','PLAYER_CLS_END','RACE_END','RARITY_END']
field_prefix = ['','ATK','DEF','COST','DUR','','','','','']
def cut_by_substring(string, field_delims):
next_start = 0
subs_list = []
substring_list = []
for delim in field_delims:
delim_start = string.find(delim)
subs_list.append((next_start, delim_start))
substring_list.append(string[next_start:delim_start])
next_start = delim_start + len(delim)
substring_list.append(string[next_start:(len(string)-1)])
return substring_list
def load_input(filename):
lsplit = []
with io.open(filename+'.in','r') as fopen:
for line in fopen:
ssl=cut_by_substring(line.strip(), field_delims)
slis=[field_prefix[i]+ssl[i].strip()for i in range(len(ssl))]
lsplit.append(slis)
return lsplit
def proc_str(strin):
strin = strin.replace(' ',u'\U0001D7D6')
strin = strin.replace(' ',u'\U0001D7D2')
strin = strin.replace(' ',u'\U0001D7D0')
strin = strin.replace(' ', u'\U0001D7CF')
return strin
delim_chars = [u'\xa7',u'§',u' ',u'.',u'=',
u'(',u'\"',u')',u':',u',',u']',u'[',
u'\U0001D7D6',u'\U0001D7D2',
u'\U0001D7D0',u'\U0001D7CF']
def tok_str(strin):
tok=''
all_list = []
for i in range(len(strin)):
if strin[i] in delim_chars:
if len(tok) > 0:
all_list.append(tok)
all_list.append(strin[i])
tok = ''
else:
tok += strin[i]
return all_list
import regex
def make_eexs(inlist, outlist):
fline = []
for instr, outstr in zip(inlist, outlist):
cardname = regex.sub('[\p{P}\p{Sm}]+', '', ''.join(instr[0].split(' ')))
i1 = [cardname]+instr[0].split(' ')
i2 = instr[1:9]
i3 = instr[9].split(' ')
tmp=EditExample([i1,i2,i3],outstr)
fline.append(tmp)
return fline
import editdistance
def map_vocab(dynamic_vocab, str):
tmp = []
for i in range(len(str)):
if str[i] in dynamic_vocab.copy_token_to_word:
tmp.append(dynamic_vocab.copy_token_to_word[str[i]])
else:
tmp.append(str[i])
return tmp
def format_ex(exin):
input = [' '.join(sub) for sub in exin.input_words]
target = ' '.join(invert_str(exin.target_words))
ctx_in='\n'.join(['CONTEXT:',input[0],input[1],input[2]])
ret_in='\n'.join(['RET-CTX:',input[3],input[4],input[5]])
ret_out='RET-TRG:'+' '.join(invert_str(input[6].split(' ')))
return '\n'.join([ctx_in, ret_in, ret_out, 'TARGET:'+target])
sub_list = {u'\U0001D7D6':[' ']*8, u'\U0001D7D2':[' ']*4, u'\U0001D7D0':[' ']*2, u'\U0001D7CF':[' ']}
def invert_str(strin):
tmp = []
for item in strin:
if item in sub_list:
tmp.extend(sub_list[item])
else:
tmp.append(item)
return tmp
import regex as re
def tokenize_for_bleu_eval(code):
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
def tok_wrapper(strin):
return tokenize_for_bleu_eval(' '.join(invert_str(strin)))
validation_dir = os.environ['COPY_EDIT_DATA']+'/datasets/card2code/third_party/hearthstone/test_hs'
output_list = []
with io.open(validation_dir+'.out','r',encoding='utf-8') as fopen:
for line in fopen:
output_list.append(line.strip())
out_proc = [tok_str(proc_str(out)) for out in output_list]
iin = load_input(validation_dir)
valid_ex = make_eexs(iin, out_proc)
#no-profile
profile=False
config = Config.from_file('editor_code/configs/editor/default.txt')
src_dir = os.environ['COPY_EDIT_DATA']+'/edit_runs/0'
print 'loading model'
print src_dir
load_expt = RetrieveEditTrainingRun(config,src_dir) #highest valid bleu.
import numpy as np
vae_editor = load_expt.editor.vae_model
ret_model = load_expt.editor.ret_model
edit_model = load_expt.editor.edit_model
examples = load_expt._examples
new_vecs = ret_model.batch_embed(examples.train, train_mode=False)
full_lsh = ret_model.make_lsh(new_vecs)
valid_eval = ret_model.ret_and_make_ex(valid_ex, full_lsh, examples.train, 0, train_mode=False)
beam_list, edit_traces = edit_model.edit(valid_eval,max_seq_length=150,verbose=True, beam_size=5)
edlist = []
gen_out = []
ex_out = []
for i in range(len(edit_traces)):
trg = edit_traces[i].example.target_words
gen = beam_list[i][0]
edlist.append(editdistance.eval(tok_wrapper(gen), tok_wrapper(trg)))
ex_out.append(edit_traces[i].example)
gen_out.append(gen)
edsort = np.argsort(edlist)
blist = [bleu(tok_wrapper(edit_traces[i].example.target_words), tok_wrapper(gen_out[i])) for i in range(len(gen_out))]
print 'model BLEU and accuracy'
print np.mean(blist)
print np.mean(np.array(edlist)==0.0)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"COPY_EDIT_DATA"
]
| [] | ["CUDA_VISIBLE_DEVICES", "COPY_EDIT_DATA"] | python | 2 | 0 | |
vendor/github.com/kubevirt/cluster-network-addons-operator/pkg/network/kubemacpool.go | package network
import (
"crypto/rand"
"net"
"os"
"path/filepath"
"reflect"
"github.com/kubevirt/cluster-network-addons-operator/pkg/render"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
opv1alpha1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1alpha1"
)
// ValidateMultus validates the combination of DisableMultiNetwork and AddtionalNetworks
func validateKubeMacPool(conf *opv1alpha1.NetworkAddonsConfigSpec) []error {
if conf.KubeMacPool == nil {
return []error{}
}
// If the range is not configured by the administrator we generate a random range.
// This random range spans from 02:XX:XX:00:00:00 to 02:XX:XX:FF:FF:FF,
// where 02 makes the address local unicast and XX:XX is a random prefix.
if conf.KubeMacPool.StartPoolRange == "" && conf.KubeMacPool.EndPoolRange == "" {
return []error{}
}
if (conf.KubeMacPool.StartPoolRange == "" && conf.KubeMacPool.EndPoolRange != "") ||
(conf.KubeMacPool.StartPoolRange != "" && conf.KubeMacPool.EndPoolRange == "") {
return []error{errors.Errorf("both or none of the KubeMacPool ranges needs to be configured")}
}
if _, err := net.ParseMAC(conf.KubeMacPool.StartPoolRange); err != nil {
return []error{errors.Errorf("failed to parse startPoolRange invalid mac address")}
}
if _, err := net.ParseMAC(conf.KubeMacPool.EndPoolRange); err != nil {
return []error{errors.Errorf("failed to parse endPoolRange invalid mac address")}
}
return []error{}
}
func changeSafeKubeMacPool(prev, next *opv1alpha1.NetworkAddonsConfigSpec) []error {
if prev.KubeMacPool != nil && !reflect.DeepEqual(prev.KubeMacPool, next.KubeMacPool) {
return []error{errors.Errorf("cannot modify KubeMacPool configuration once it is deployed")}
}
return nil
}
// renderLinuxBridge generates the manifests of Linux Bridge
func renderKubeMacPool(conf *opv1alpha1.NetworkAddonsConfigSpec, manifestDir string) ([]*unstructured.Unstructured, error) {
if conf.KubeMacPool == nil {
return nil, nil
}
if conf.KubeMacPool.StartPoolRange == "" || conf.KubeMacPool.EndPoolRange == "" {
prefix, err := generateRandomMacPrefix()
if err != nil {
return nil, errors.Wrap(err, "failed to generate random mac address prefix")
}
startPoolRange := net.HardwareAddr(append(prefix, 0x00, 0x00, 0x00))
conf.KubeMacPool.StartPoolRange = startPoolRange.String()
endPoolRange := net.HardwareAddr(append(prefix, 0xFF, 0xFF, 0xFF))
conf.KubeMacPool.EndPoolRange = endPoolRange.String()
}
// render the manifests on disk
data := render.MakeRenderData()
data.Data["KubeMacPoolImage"] = os.Getenv("KUBEMACPOOL_IMAGE")
data.Data["ImagePullPolicy"] = conf.ImagePullPolicy
data.Data["StartPoolRange"] = conf.KubeMacPool.StartPoolRange
data.Data["EndPoolRange"] = conf.KubeMacPool.EndPoolRange
objs, err := render.RenderDir(filepath.Join(manifestDir, "kubemacpool"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render kubemacpool manifests")
}
return objs, nil
}
func generateRandomMacPrefix() ([]byte, error) {
suffix := make([]byte, 2)
_, err := rand.Read(suffix)
if err != nil {
return []byte{}, err
}
prefix := append([]byte{0x02}, suffix...)
return prefix, nil
}
| [
"\"KUBEMACPOOL_IMAGE\""
]
| []
| [
"KUBEMACPOOL_IMAGE"
]
| [] | ["KUBEMACPOOL_IMAGE"] | go | 1 | 0 | |
cmd/createuser/helpers.go | package createuser
import (
"context"
"fmt"
"log"
"os"
"cloud.google.com/go/firestore"
"github.com/pixelogicdev/gruveebackend/pkg/sawmill"
)
// initWithEnv takes our yaml env variables and maps them properly.
// Unfortunately, we had to do this is main because in init we weren't able to access env variables
func initWithEnv() error {
// Get paths
var currentProject string
if os.Getenv("ENVIRONMENT") == "DEV" {
currentProject = os.Getenv("FIREBASE_PROJECTID_DEV")
} else if os.Getenv("ENVIRONMENT") == "PROD" {
currentProject = os.Getenv("FIREBASE_PROJECTID_PROD")
}
// Initialize Firestore
client, err := firestore.NewClient(context.Background(), currentProject)
if err != nil {
return fmt.Errorf("CreateUser [Init Firestore]: %v", err)
}
// Initialize Sawmill
sawmillLogger, err := sawmill.InitClient(currentProject, os.Getenv("GCLOUD_CONFIG"), os.Getenv("ENVIRONMENT"), "CreateUser")
if err != nil {
log.Printf("CreateSocial Playlist [Init Sawmill]: %v", err)
}
firestoreClient = client
logger = sawmillLogger
return nil
}
| [
"\"ENVIRONMENT\"",
"\"FIREBASE_PROJECTID_DEV\"",
"\"ENVIRONMENT\"",
"\"FIREBASE_PROJECTID_PROD\"",
"\"GCLOUD_CONFIG\"",
"\"ENVIRONMENT\""
]
| []
| [
"ENVIRONMENT",
"FIREBASE_PROJECTID_DEV",
"GCLOUD_CONFIG",
"FIREBASE_PROJECTID_PROD"
]
| [] | ["ENVIRONMENT", "FIREBASE_PROJECTID_DEV", "GCLOUD_CONFIG", "FIREBASE_PROJECTID_PROD"] | go | 4 | 0 | |
csv_cti/blueprints/web_api/views/dids.py | from csv_cti.blueprints.web_api import web_api
from flask import request,current_app
from csv_cti.blueprints.op.md5_token import encrypt_md5
from csv_cti.blueprints.op.dids import Dids_op
#did-queue
@web_api.route('/dids-add/',methods=['POST'])
def dids_add():
return_data={}
r_token=request.json.get('token')
if r_token in encrypt_md5(current_app.config['MD5_KEY']):
r_data=request.json.get('data')
'''
{
"token":"[email protected]",
"data":[{
"did":"8888888",//必选
"queue":"test",//必选
"group":"C68",//必选
}]
}
'''
try:
Dids_op.add(r_data)
except Exception as e:
current_app.logger.debug("/dids-add/ 数据库操作失败:%s",e)
return_data['msg']='Voice abnormal, Please contact the Voice engineer'
return return_data,500
else:
current_app.logger.info("/dids-add/ 添加成功")
return_data['msg']='Add OK'
return return_data,200
else:
return_data['msg']='Auth Fail'
return return_data,401
@web_api.route('/dids-rm/',methods=['POST'])
def dids_rm():
return_data={}
r_token=request.json.get('token')
if r_token in encrypt_md5(current_app.config['MD5_KEY']):
r_data=request.json.get('data')
'''
{
"token":"[email protected]",
"data":
[{
"agent":"50008",
"queue":"osmond2",//必选
"group":"C68"//必选
}]
}
'''
try:
Dids_op.remove(r_data)
except Exception as e:
current_app.logger.debug("/dids-rm/ 数据库操作失败:%s",e)
return_data['msg']='Voice abnormal, Please contact the Voice engineer'
return return_data,500
else:
current_app.logger.info("/dids-rm/ 删除成功")
return_data['msg']='Remove OK'
return return_data,200
else:
return_data['msg']='Auth Fail'
return return_data,401
@web_api.route('/dids-list/',methods=['POST'])
def dids_list():
return_data={}
r_token=request.json.get('token')
if r_token in encrypt_md5(current_app.config['MD5_KEY']):
r_data=request.json.get('data')
'''
{
"token":"[email protected]",
"data"://留空字符串查所有
{
"queue":"osmond2",//必选
"group":"C68"//必选
}
}
'''
try:
list=Dids_op.query(r_data)
except Exception as e:
current_app.logger.debug("/dids-list/ 数据库操作失败:%s",e)
return_data['msg']='Voice abnormal, Please contact the Voice engineer'
return return_data,500
else:
current_app.logger.info("/dids-list/ 查询成功")
return_data['msg']='Query OK'
return_data['data']=list[0:-1]
return_data['total']=list[-1]
return_data['page_size']=r_data['page_size']
return_data['page_index']=r_data['page_index']
return return_data,200
else:
return_data['msg']='Auth Fail'
return return_data,401 | []
| []
| []
| [] | [] | python | null | null | null |
audiobonsai/wsgi.py | """
WSGI config for audiobonsai project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "audiobonsai.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/hashicorp/nomad/client/driver/rkt_test.go | // +build linux
package driver
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
"time"
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
ctestutils "github.com/hashicorp/nomad/client/testutil"
)
func TestRktVersionRegex(t *testing.T) {
t.Parallel()
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("NOMAD_TEST_RKT unset, skipping")
}
inputRkt := "rkt version 0.8.1"
inputAppc := "appc version 1.2.0"
expectedRkt := "0.8.1"
expectedAppc := "1.2.0"
rktMatches := reRktVersion.FindStringSubmatch(inputRkt)
appcMatches := reAppcVersion.FindStringSubmatch(inputAppc)
if rktMatches[1] != expectedRkt {
fmt.Printf("Test failed; got %q; want %q\n", rktMatches[1], expectedRkt)
}
if appcMatches[1] != expectedAppc {
fmt.Printf("Test failed; got %q; want %q\n", appcMatches[1], expectedAppc)
}
}
// The fingerprinter test should always pass, even if rkt is not installed.
func TestRktDriver_Fingerprint(t *testing.T) {
t.Parallel()
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
ctx := testDriverContexts(t, &structs.Task{Name: "foo", Driver: "rkt"})
d := NewRktDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: make(map[string]string),
}
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
attributes := response.Attributes
if attributes == nil {
t.Fatalf("expected attributes to not equal nil")
}
if attributes["driver.rkt"] != "1" {
t.Fatalf("Missing Rkt driver")
}
if attributes["driver.rkt.version"] == "" {
t.Fatalf("Missing Rkt driver version")
}
if attributes["driver.rkt.appc.version"] == "" {
t.Fatalf("Missing appc version for the Rkt driver")
}
}
func TestRktDriver_Start_DNS(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
// TODO: use test server to load from a fixture
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
"dns_servers": []string{"8.8.8.8", "8.8.4.4"},
"dns_search_domains": []string{"example.com", "example.org", "example.net"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
defer resp.Handle.Kill()
// Attempt to open
handle2, err := d.Open(ctx.ExecCtx, resp.Handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
if handle2 == nil {
t.Fatalf("missing handle")
}
handle2.Kill()
}
func TestRktDriver_Start_Wait(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
"args": []string{"--version"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
handle := resp.Handle.(*rktHandle)
defer handle.Kill()
// Update should be a no-op
if err := handle.Update(task); err != nil {
t.Fatalf("err: %v", err)
}
// Signal should be an error
if err := resp.Handle.Signal(syscall.SIGTERM); err == nil {
t.Fatalf("err: %v", err)
}
select {
case res := <-resp.Handle.WaitCh():
if !res.Successful() {
t.Fatalf("err: %v", res)
}
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
t.Fatalf("timeout")
}
// Make sure pod was removed #3561
var stderr bytes.Buffer
cmd := exec.Command(rktCmd, "status", handle.uuid)
cmd.Stdout = ioutil.Discard
cmd.Stderr = &stderr
if err := cmd.Run(); err == nil {
t.Fatalf("expected error running 'rkt status %s' on removed container", handle.uuid)
}
if out := stderr.String(); !strings.Contains(out, "no matches found") {
t.Fatalf("expected 'no matches found' but received: %s", out)
}
}
func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
"args": []string{"--version"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
defer resp.Handle.Kill()
// Update should be a no-op
err = resp.Handle.Update(task)
if err != nil {
t.Fatalf("err: %v", err)
}
select {
case res := <-resp.Handle.WaitCh():
if !res.Successful() {
t.Fatalf("err: %v", res)
}
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
t.Fatalf("timeout")
}
}
func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
exp := []byte{'w', 'i', 'n'}
file := "output.txt"
tmpvol, err := ioutil.TempDir("", "nomadtest_rktdriver_volumes")
if err != nil {
t.Fatalf("error creating temporary dir: %v", err)
}
defer os.RemoveAll(tmpvol)
hostpath := filepath.Join(tmpvol, file)
task := &structs.Task{
Name: "rkttest_alpine",
Driver: "rkt",
Config: map[string]interface{}{
"image": "docker://alpine",
"command": "/bin/sh",
"args": []string{
"-c",
fmt.Sprintf(`echo -n %s > foo/%s`, string(exp), file),
},
"net": []string{"none"},
"volumes": []string{fmt.Sprintf("%s:/foo", tmpvol)},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
defer resp.Handle.Kill()
select {
case res := <-resp.Handle.WaitCh():
if !res.Successful() {
t.Fatalf("err: %v", res)
}
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
t.Fatalf("timeout")
}
// Check that data was written to the shared alloc directory.
act, err := ioutil.ReadFile(hostpath)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
}
if !reflect.DeepEqual(act, exp) {
t.Fatalf("Command output is %v; expected %v", act, exp)
}
}
// TestRktDriver_UserGroup asserts tasks may override the user and group of the
// rkt image.
func TestRktDriver_UserGroup(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
require := assert.New(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
User: "nobody",
Config: map[string]interface{}{
"image": "docker://redis:3.2",
"group": "nogroup",
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
tctx := testDriverContexts(t, task)
defer tctx.AllocDir.Destroy()
d := NewRktDriver(tctx.DriverCtx)
_, err := d.Prestart(tctx.ExecCtx, task)
require.Nil(err)
resp, err := d.Start(tctx.ExecCtx, task)
require.Nil(err)
defer resp.Handle.Kill()
timeout := time.Duration(testutil.TestMultiplier()*15) * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// WaitUntil we can determine the user/group redis is running as
expected := []byte("redis-server *:6379 nobody nogroup\n")
testutil.WaitForResult(func() (bool, error) {
raw, code, err := resp.Handle.Exec(ctx, "/bin/bash", []string{"-c", "ps -eo args,user,group | grep ^redis"})
if err != nil {
return false, err
}
if code != 0 {
return false, fmt.Errorf("unexpected exit code: %d", code)
}
return bytes.Equal(expected, raw), fmt.Errorf("expected %q but found %q", expected, raw)
}, func(err error) {
t.Fatalf("err: %v", err)
})
require.Nil(resp.Handle.Kill())
}
func TestRktTrustPrefix(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "example.com/invalid",
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
"args": []string{"--version"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err == nil {
resp.Handle.Kill()
t.Fatalf("Should've failed")
}
msg := "Error running rkt trust"
if !strings.Contains(err.Error(), msg) {
t.Fatalf("Expecting '%v' in '%v'", msg, err)
}
}
func TestRktTaskValidate(t *testing.T) {
t.Parallel()
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
"args": []string{"--version"},
"dns_servers": []string{"8.8.8.8", "8.8.4.4"},
"dns_search_domains": []string{"example.com", "example.org", "example.net"},
},
Resources: basicResources,
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if err := d.Validate(task.Config); err != nil {
t.Fatalf("Validation error in TaskConfig : '%v'", err)
}
}
// TODO: Port Mapping test should be ran with proper ACI image and test the port access.
func TestRktDriver_PortsMapping(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"image": "docker://redis:3.2",
"port_map": []map[string]string{
{
"main": "6379-tcp",
},
},
"debug": "true",
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 256,
CPU: 512,
Networks: []*structs.NetworkResource{
{
IP: "127.0.0.1",
ReservedPorts: []structs.Port{{Label: "main", Value: 8080}},
},
},
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
if resp.Network == nil {
t.Fatalf("Expected driver to set a DriverNetwork, but it did not!")
}
failCh := make(chan error, 1)
go func() {
time.Sleep(1 * time.Second)
if err := resp.Handle.Kill(); err != nil {
failCh <- err
}
}()
select {
case err := <-failCh:
t.Fatalf("failed to kill handle: %v", err)
case <-resp.Handle.WaitCh():
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
t.Fatalf("timeout")
}
}
// TestRktDriver_PortsMapping_Host asserts that port_map isn't required when
// host networking is used.
func TestRktDriver_PortsMapping_Host(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"image": "docker://redis:latest",
"net": []string{"host"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 256,
CPU: 512,
Networks: []*structs.NetworkResource{
{
IP: "127.0.0.1",
ReservedPorts: []structs.Port{{Label: "main", Value: 8080}},
},
},
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
if resp.Network != nil {
t.Fatalf("No network should be returned with --net=host but found: %#v", resp.Network)
}
failCh := make(chan error, 1)
go func() {
time.Sleep(1 * time.Second)
if err := resp.Handle.Kill(); err != nil {
failCh <- err
}
}()
select {
case err := <-failCh:
t.Fatalf("failed to kill handle: %v", err)
case <-resp.Handle.WaitCh():
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
t.Fatalf("timeout")
}
}
func TestRktDriver_HandlerExec(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
MemoryMB: 128,
CPU: 100,
},
}
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
// Give the pod a second to start
time.Sleep(time.Second)
// Exec a command that should work
out, code, err := resp.Handle.Exec(context.TODO(), "/etcd", []string{"--version"})
if err != nil {
t.Fatalf("error exec'ing etcd --version: %v", err)
}
if code != 0 {
t.Fatalf("expected `etcd --version` to succeed but exit code was: %d\n%s", code, string(out))
}
if expected := []byte("etcd version "); !bytes.HasPrefix(out, expected) {
t.Fatalf("expected output to start with %q but found:\n%q", expected, out)
}
// Exec a command that should fail
out, code, err = resp.Handle.Exec(context.TODO(), "/etcd", []string{"--kaljdshf"})
if err != nil {
t.Fatalf("error exec'ing bad command: %v", err)
}
if code == 0 {
t.Fatalf("expected `stat` to fail but exit code was: %d", code)
}
if expected := "flag provided but not defined"; !bytes.Contains(out, []byte(expected)) {
t.Fatalf("expected output to contain %q but found: %q", expected, out)
}
if err := resp.Handle.Kill(); err != nil {
t.Fatalf("error killing handle: %v", err)
}
}
func TestRktDriver_Remove_Error(t *testing.T) {
if !testutil.IsTravis() {
t.Parallel()
}
if os.Getenv("NOMAD_TEST_RKT") == "" {
t.Skip("skipping rkt tests")
}
ctestutils.RktCompatible(t)
// Removing a nonexistent pod should return an error
if err := rktRemove("00000000-0000-0000-0000-000000000000"); err == nil {
t.Fatalf("expected an error")
}
if err := rktRemove("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"); err == nil {
t.Fatalf("expected an error")
}
}
| [
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\"",
"\"NOMAD_TEST_RKT\""
]
| []
| [
"NOMAD_TEST_RKT"
]
| [] | ["NOMAD_TEST_RKT"] | go | 1 | 0 | |
scripts/build/Platform/Android/run_test_on_android_simulator.py | #
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import os
import pathlib
import re
import sys
import subprocess
import time
import logging
CURRENT_PATH = pathlib.Path(os.path.dirname(__file__)).absolute()
# The engine root is based on the location of this file (<ENGINE_ROOT>/scripts/build/Platform/Android). Walk up to calculate the engine root
ENGINE_ROOT = CURRENT_PATH.parents[3]
class AndroidEmuError(Exception):
pass
def get_android_sdk_path():
try:
android_sdk_path = pathlib.Path(os.getenv('LY_ANDROID_SDK'))
if not android_sdk_path:
raise AndroidEmuError(f"LY_ANDROID_SDK environment variable is not set")
if not android_sdk_path.is_dir():
raise AndroidEmuError(f"Android SDK Path ('{android_sdk_path}') set with the LY_ANDROID_SDK variable is invalid")
#TODO: Sanity check on necessary files
return android_sdk_path
except Exception as err:
raise AndroidEmuError(f"Unable to determine android SDK path: {err}")
class Command(object):
def __init__(self, tool_name, tool_path, run_as_shell=True):
if not tool_path.is_file():
raise AndroidEmuError(f"Invalid path for {tool_name}. Cannot find ('{tool_path.absolute()}')")
self.tool_path = tool_path
self.run_as_shell = run_as_shell
def run_return_output(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_return_output: {full_cmd}")
run_result = subprocess.run(args,
capture_output=True,
encoding='UTF-8',
errors='ignore',
shell=self.run_as_shell)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
return run_result.stdout
def run(self, cmd_args, cwd=None, suppress_output=False):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run: {full_cmd}")
run_result = subprocess.run(args,
#stdout=subprocess.DEVNULL if suppress_output else subprocess.STDOUT,
capture_output=False,
shell=self.run_as_shell,
cwd=cwd)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
def run_process(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_process: {full_cmd}")
process = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.NORMAL_PRIORITY_CLASS |
subprocess.CREATE_NO_WINDOW,
encoding='UTF-8',
errors='ignore')
return process
class AndroidEmulatorManager(object):
UNIT_TEST_AVD_NAME = "LY_UNITTEST_AVD"
UNIT_TEST_SYSTEM_IMAGE_PACKAGE = "android-30;google_apis;x86_64"
UNIT_TEST_DEVICE_TEMPLATE_NAME = "pixel_xl"
UNIT_TEST_DEVICE_SETTINGS_MAP = {
"disk.dataPartition.size": "32G",
"vm.heapSize": "1024",
"hw.ramSize": "2048",
"hw.sdCard": "no"
}
EMULATOR_STARTUP_TIMEOUT_SECS = 60*5 # Set the emulator startup timeout to 5 minutes
def __init__(self, base_android_sdk_path, hide_emulator_windows=True, force_avd_creation=False, emulator_startup_timeout=EMULATOR_STARTUP_TIMEOUT_SECS):
self.android_sdk_path = base_android_sdk_path
self.force_avd_creation = force_avd_creation
self.unit_test_avd_name = AndroidEmulatorManager.UNIT_TEST_AVD_NAME
self.unit_test_device_template_name = AndroidEmulatorManager.UNIT_TEST_DEVICE_TEMPLATE_NAME
self.unit_test_device_settings_map = AndroidEmulatorManager.UNIT_TEST_DEVICE_SETTINGS_MAP
self.unit_test_avd_system_image = AndroidEmulatorManager.UNIT_TEST_SYSTEM_IMAGE_PACKAGE
self.hide_emulator_windows = hide_emulator_windows
self.emulator_startup_timeout = emulator_startup_timeout
self.emulator_cmd = Command("Emulator", self.android_sdk_path / 'emulator' / 'emulator.exe')
self.avd_manager_cmd = Command("AVD Manager", self.android_sdk_path / 'tools' / 'bin' / 'avdmanager.bat')
self.sdk_manager_cmd = Command("SDK Manager", self.android_sdk_path / 'tools' / 'bin' / 'sdkmanager.bat')
self.adb_cmd = Command("ADB", self.android_sdk_path / 'platform-tools' / 'adb.exe')
def collect_android_sdk_list(self):
"""
Use the SDK Manager to get the list of installed, available, and updateable packages
:return: tuple of 3 lists: installed, available, and updateable packages
"""
result_str = self.sdk_manager_cmd.run_return_output(['--list'])
# the result will be listed out in 3 sections: Installed packages, Available Packages, and Available updates
# and each item is represented by 3 columns separated by a '|' character
installed_packages = []
available_packages = []
available_updates = []
current_append_list = None
for avd_item in result_str.split('\n'):
avd_item_stripped = avd_item.strip()
if not avd_item_stripped:
continue
if '|' not in avd_item_stripped:
if avd_item_stripped.upper() == 'INSTALLED PACKAGES:':
current_append_list = installed_packages
elif avd_item_stripped.upper() == 'AVAILABLE PACKAGES:':
current_append_list = available_packages
elif avd_item_stripped.upper() == 'AVAILABLE UPDATES:':
current_append_list = available_updates
else:
current_append_list = None
continue
item_parts = [split.strip() for split in avd_item_stripped.split('|')]
if len(item_parts) < 3:
continue
elif item_parts[1].upper() in ('VERSION', 'INSTALLED', '-------'):
continue
elif current_append_list is None:
continue
if current_append_list is not None:
current_append_list.append(item_parts)
return installed_packages, available_packages, available_updates
def update_installed_sdks(self):
"""
Run an SDK Manager update to make sure the SDKs are all up-to-date
"""
logging.info(f"Updating android SDK...")
self.sdk_manager_cmd.run(['--update'])
def install_system_package_if_necessary(self):
"""
Make sure that we have the correct system image installed, and install if not
"""
installed_packages, available_packages, _ = self.collect_android_sdk_list()
unit_test_sdk_package_name = f'system-images;{self.unit_test_avd_system_image}'
detected_sdk_package_version = None
for package_line_items in installed_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_sdk_package_version = package_line_items[0]
if detected_sdk_package_version:
# Already installed
logging.info(f"Detected installed system image {self.unit_test_avd_system_image} version {detected_sdk_package_version}")
return
# Make sure its an available image to install
detected_available_sdk_package_version = None
for package_line_items in available_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_available_sdk_package_version = package_line_items[0]
if not detected_available_sdk_package_version:
raise AndroidEmuError(f"Unable to install required system image {self.unit_test_avd_system_image}, not found by the Android SDK Manager")
# Install the package
logging.info(f"Installing system image {self.unit_test_avd_system_image}...")
self.sdk_manager_cmd.run(['--install', unit_test_sdk_package_name])
logging.info(f"Installed Completed")
def find_device_id_by_name(self, device_name):
"""
Find a device id (from AVD Manager) by the device name
:param device_name: Name to lookup
:return: The device id
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'device'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
result_line_count = len(result_lines)
current_index = 0
device_to_id_map = {}
while current_index < result_line_count:
current_line = result_lines[current_index]
current_index += 1
# This assumes the pattern "id: <id> or "<device name>"
if current_line.startswith('id:') and 'or' in current_line:
id_and_name_combo = current_line.split('or')
id_and_value_combo = id_and_name_combo[0].split(' ')
name = id_and_name_combo[1].replace('"', '').strip().upper()
id = id_and_value_combo[1]
device_to_id_map[name] = id
if current_line.startswith('Available Android targets:'):
break
device_id = device_to_id_map.get(device_name.upper())
if not device_id:
raise AndroidEmuError(f"Unable to locate device id for '{device_name}'")
return device_id
def query_installed_avds(self):
"""
Get maps of all valid and invalid AVDs installed on the current system
:return: tuple of 2 maps (AVD Name -> Path): Valid and invalid
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'avd'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
line_count = len(result_lines)
current_index = 0
current_name = None
current_path = None
valid_avd_to_path_map = {}
invalid_avd_to_path_map = {}
current_avd_to_path_map = valid_avd_to_path_map
while current_index < line_count:
current_line = result_lines[current_index]
current_index += 1
if current_line.startswith('Name:'):
name = current_line[6:].strip()
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_path = None
current_name = name
elif current_line.startswith('Path:'):
current_path = current_line[6:].strip()
elif current_line.startswith('Device:'):
pass
elif 'could not be loaded:' in current_line:
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_avd_to_path_map = invalid_avd_to_path_map
current_path = None
current_name = None
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
return valid_avd_to_path_map, invalid_avd_to_path_map
def create_unitest_avd(self):
"""Create the unit test AVD"""
self.install_system_package_if_necessary()
device_id = self.find_device_id_by_name(self.unit_test_device_template_name)
self.avd_manager_cmd.run(['--silent',
'create', 'avd',
'--name', self.unit_test_avd_name,
'--package', f'system-images;{self.unit_test_avd_system_image}',
'--device', device_id])
valid_avd_map, _ = self.query_installed_avds()
unit_test_avd_path = valid_avd_map.get(self.unit_test_avd_name)
if not unit_test_avd_path:
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}")
unit_test_avd_config_path = pathlib.Path(unit_test_avd_path) / 'config.ini'
if not unit_test_avd_config_path.is_file():
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}: The expected config file '{unit_test_avd_config_path}' does not exist.")
config_content_full = unit_test_avd_config_path.read_text(encoding='UTF-8', errors='ignore')
for item, value in self.unit_test_device_settings_map.items():
regex_friendly_str = item.replace('.', '\\.')
repl_pattern = f"{regex_friendly_str}\\s*=\\s*[\\d]+"
repl_value = f"{item}={value}"
if re.search(repl_pattern, config_content_full):
config_content_full = re.sub(repl_pattern, repl_value, config_content_full)
else:
if not config_content_full.endswith('\n'):
config_content_full += '\n'
config_content_full += f"{repl_value}\n"
unit_test_avd_config_path.write_text(config_content_full)
def query_emulator_device_id(self):
result_str = self.adb_cmd.run_return_output(['devices', '-l'])
emulators = []
for result_line in result_str.split('\n'):
if not result_line.startswith('emulator-'):
continue
emulator = result_line[:result_line.find(' ')].strip()
emulators.append(emulator)
if len(emulators) > 1:
logging.warning(f"Found multiple emulators connect ({','.join(emulators)}). Defaulting to {emulators[0]}")
return emulators[0] if len(emulators) > 0 else None
def install_unit_test_avd(self):
"""
Install the unit test AVD (Android Virtual Device)
"""
valid_avd_map, invalid_avd_map = self.query_installed_avds()
if not self.unit_test_avd_name in valid_avd_map:
create_avd = True
elif self.force_avd_creation or self.unit_test_avd_name in invalid_avd_map:
logging.info(f"Deleting AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
create_avd = True
else:
create_avd = False
if create_avd:
self.create_unitest_avd()
def uninstall_unit_test_avd(self):
"""
Uninstall the unit test AVD
"""
logging.info(f"Uninstalling AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
def launch_emulator_process(self):
"""
Launch the emulator process for the unit test avd and return the process handle and its device id
:return: tuple of the process handle and the device id for the emulator
"""
emulator_device_id = None
process = None
try:
# Launch the emulator process
emulator_process_args = [
"-avd",
self.unit_test_avd_name
]
if self.hide_emulator_windows:
emulator_process_args.append("-no-window")
process = self.emulator_cmd.run_process(emulator_process_args)
# Wait for the emulator to signal that its bootup is complete
boot_completed = False
start_time = time.time()
timeout_secs = 360
while process.poll() is None:
elapsed_time = time.time() - start_time
if elapsed_time > timeout_secs > 0:
break
line = process.stdout.readline()
print(line, end='')
if "boot completed" in line:
boot_completed = True
break
if not boot_completed:
raise AndroidEmuError("Bootup of emulator timed out")
# query ADB to get the emulator ID
emulator_device_id = self.query_emulator_device_id()
return process, emulator_device_id
except Exception:
if process:
if emulator_device_id:
self.terminate_emulator_process(emulator_device_id)
else:
process.kill()
raise
def terminate_emulator_process(self, device_id):
# Terminate the emulator
kill_emu_args = [
'-s', device_id,
'emu', 'kill'
]
self.adb_cmd.run(kill_emu_args)
def run_emulation_process(self, process_func):
"""
Execute a function that relies on the session based android simulator.
:param process_func: The process function to execute. Function requires one argument which will be the device id
:return: The return value of the process function
"""
emulator_device_id = None
try:
emulator_process, emulator_device_id = self.launch_emulator_process()
return process_func(emulator_device_id)
finally:
if emulator_device_id is not None:
self.terminate_emulator_process(emulator_device_id)
def process_unit_test_on_simulator(base_android_sdk_path, build_path, build_config):
"""
Run the android unit tests on a sessioned simulator
:param base_android_sdk_path: The path to where the Android SDK exists
:param build_path: The build path relative to the engine root where the android unit test project is configured and built
:param build_config: The configuration of the build unit test APK to run
"""
python_cmd = Command("Python", ENGINE_ROOT / 'python' / 'python.cmd')
android_script_root = ENGINE_ROOT / 'cmake' / 'Tools' / 'Platform' / 'Android'
assert android_script_root.is_dir(), "Missing the android scripts path in the engine folder hierarchy"
deploy_android_py_path = android_script_root / 'deploy_android.py'
assert deploy_android_py_path.is_file(), "Missing the android deployment script in the engine folder hierarchy"
launch_android_ptest_py_path = android_script_root / 'launch_android_test.py'
assert launch_android_ptest_py_path.is_file(), "Missing the android unit test launcher script in the engine folder hierarchy"
def _install_and_run_unit_tests(emulator_id):
# install unit test on the emulator
install_apk_args = [
str(deploy_android_py_path),
'-b', build_path,
'-c', build_config,
'--device-id-filter', emulator_id,
'--clean'
]
python_cmd.run(cmd_args=install_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
try:
# Launch the unit test on the emulator
launch_apk_args = [
str(launch_android_ptest_py_path),
'-b', build_path,
'-c', build_config,
'--device-serial', emulator_id
]
python_cmd.run(cmd_args=launch_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
return True
except AndroidEmuError:
print("\n\n")
raise AndroidEmuError("Unit Tests Failed")
# Prepare the emulator manager
manager = AndroidEmulatorManager(base_android_sdk_path=base_android_sdk_path,
force_avd_creation=True)
# Make sure that the android SDK is up to date
manager.update_installed_sdks()
# First Install or overwrite the unit test emulator
manager.install_unit_test_avd()
# Run the emulator-dependent process based on the session AVD created by the manager
manager.run_emulation_process(_install_and_run_unit_tests)
# Uninstall the AVD when done
manager.uninstall_unit_test_avd()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Install and an android unit test APK on a android simulator.")
parser.add_argument('--android-sdk-path',
help='Path to the Android SDK')
parser.add_argument('--build-path',
help='The build path (relative to the engine root) where the project was generated and the APK is built',
required=True)
parser.add_argument('--build-config',
help='The build config of the built APK',
required=True)
parser.add_argument('--debug',
help='Enable debug messages from this script',
action="store_true")
parsed_args = parser.parse_args(sys.argv[1:])
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if parsed_args.debug else logging.INFO)
try:
base_android_sdk_path = pathlib.Path(parsed_args.android_sdk_path) if parsed_args.android_sdk_path else get_android_sdk_path()
process_unit_test_on_simulator(base_android_sdk_path=base_android_sdk_path,
build_path=parsed_args.build_path,
build_config=parsed_args.build_config)
exit(0)
except AndroidEmuError as e:
print(e)
exit(1)
| []
| []
| [
"LY_ANDROID_SDK"
]
| [] | ["LY_ANDROID_SDK"] | python | 1 | 0 | |
assessments/ch9/question2/main.go | package main
import (
"encoding/json"
"net/http"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/external"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
"github.com/aws/aws-sdk-go-v2/service/dynamodb/expression"
)
type Movie struct {
ID string `json:"id"`
Name string `json:"name"`
}
func filter(keyword string) (events.APIGatewayProxyResponse, error) {
cfg, err := external.LoadDefaultAWSConfig()
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: http.StatusInternalServerError,
Body: "Error while retrieving AWS credentials",
}, nil
}
filter := expression.Name("category").Contains(keyword)
projection := expression.NamesList(expression.Name("id"), expression.Name("name"), expression.Name("description"))
expr, err := expression.NewBuilder().WithFilter(filter).WithProjection(projection).Build()
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: http.StatusInternalServerError,
Body: "Error while building DynamoDB expression",
}, nil
}
svc := dynamodb.New(cfg)
req := svc.ScanRequest(&dynamodb.ScanInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
FilterExpression: expr.Filter(),
ProjectionExpression: expr.Projection(),
})
res, err := req.Send()
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: http.StatusInternalServerError,
Body: "Error while scanning DynamoDB",
}, nil
}
movies := make([]Movie, 0)
for _, item := range res.Items {
movies = append(movies, Movie{
ID: *item["ID"].S,
Name: *item["Name"].S,
})
}
response, err := json.Marshal(movies)
if err != nil {
return events.APIGatewayProxyResponse{
StatusCode: http.StatusInternalServerError,
Body: "Error while decoding to string value",
}, nil
}
return events.APIGatewayProxyResponse{
StatusCode: 200,
Headers: map[string]string{
"Content-Type": "application/json",
},
Body: string(response),
}, nil
}
func main() {
lambda.Start(filter)
}
| [
"\"TABLE_NAME\""
]
| []
| [
"TABLE_NAME"
]
| [] | ["TABLE_NAME"] | go | 1 | 0 | |
pdf_struct/core/download.py | # Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
# Copyright (c) 2021, Hitachi America Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import contextlib
import hashlib
import os
import shutil
import sys
import tempfile
import filelock
import urllib
_dataset_root = os.environ.get(
'PDFSTRUCT_DATASET_ROOT',
os.path.join(os.path.expanduser('~'), '.pdf-struct', 'model'))
_url_root = os.environ.get(
'PDFSTRUCT_URL_ROOT',
'https://github.com/stanfordnlp/pdf-struct-models/raw/0.1.0/models/')
def get_cache_root():
"""Gets the path to the root directory to download and cache datasets.
Returns:
str: The path to the dataset root directory.
"""
return _dataset_root
def get_model_url(model_name):
return _url_root + model_name + '.joblib'
def get_cache_filename(url):
return hashlib.md5(url.encode('utf-8')).hexdigest()
def cached_download(url):
cache_root = get_cache_root()
try:
os.makedirs(cache_root)
except OSError:
if not os.path.isdir(cache_root):
raise
lock_path = os.path.join(cache_root, '_dl_lock')
cache_path = os.path.join(cache_root, get_cache_filename(url))
with filelock.FileLock(lock_path):
if os.path.exists(cache_path):
return cache_path
with tempdir(dir=cache_root) as temp_root:
temp_path = os.path.join(temp_root, 'dl')
sys.stderr.write('Downloading from {}...\n'.format(url))
sys.stderr.flush()
urllib.request.urlretrieve(url, temp_path)
with filelock.FileLock(lock_path):
shutil.move(temp_path, cache_path)
return cache_path
def cached_model_download(model_name):
url = get_model_url(model_name)
try:
path = cached_download(url)
except urllib.error.HTTPError as e:
if e.code == 404:
return None
else:
raise e
return path
@contextlib.contextmanager
def tempdir(**kwargs):
# A context manager that defines a lifetime of a temporary directory.
ignore_errors = kwargs.pop('ignore_errors', False)
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=ignore_errors)
| []
| []
| [
"PDFSTRUCT_URL_ROOT",
"PDFSTRUCT_DATASET_ROOT"
]
| [] | ["PDFSTRUCT_URL_ROOT", "PDFSTRUCT_DATASET_ROOT"] | python | 2 | 0 | |
src/test/java/com/aliyuncs/fc/FunctionComputeClientTest.java | package com.aliyuncs.fc;
import com.aliyuncs.DefaultAcsClient;
import com.aliyuncs.auth.BasicSessionCredentials;
import com.aliyuncs.auth.InstanceProfileCredentialsProvider;
import com.aliyuncs.fc.auth.AcsURLEncoder;
import com.aliyuncs.fc.auth.SignURLConfig;
import com.aliyuncs.fc.client.FunctionComputeClient;
import com.aliyuncs.fc.client.PopClient;
import com.aliyuncs.fc.config.Config;
import com.aliyuncs.fc.constants.Const;
import com.aliyuncs.fc.exceptions.ClientException;
import com.aliyuncs.fc.exceptions.ErrorCodes;
import com.aliyuncs.fc.model.*;
import com.aliyuncs.fc.model.NasConfig.NasMountConfig;
import com.aliyuncs.fc.request.*;
import com.aliyuncs.fc.response.*;
import com.aliyuncs.fc.utils.Util;
import com.aliyuncs.http.MethodType;
import com.aliyuncs.http.ProtocolType;
import com.aliyuncs.profile.DefaultProfile;
import com.aliyuncs.profile.IClientProfile;
import com.aliyuncs.sts.model.v20150401.AssumeRoleRequest;
import com.aliyuncs.sts.model.v20150401.AssumeRoleResponse;
import com.aliyuncs.sts.model.v20150401.AssumeRoleResponse.Credentials;
import com.google.common.base.Strings;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import org.apache.commons.lang.StringUtils;
import org.json.JSONException;
import org.junit.*;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.NoSuchAlgorithmException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import static com.aliyuncs.fc.constants.Const.DEFAULT_REGEX;
import static com.aliyuncs.fc.constants.Const.NONE;
import static com.aliyuncs.fc.constants.HeaderKeys.OPENTRACING_SPANCONTEXT;
import static com.aliyuncs.fc.constants.HeaderKeys.OPENTRACING_SPANCONTEXT_BAGGAGE_PREFIX;
import static com.aliyuncs.fc.model.HttpAuthType.ANONYMOUS;
import static com.aliyuncs.fc.model.HttpAuthType.FUNCTION;
import static com.aliyuncs.fc.model.HttpMethod.*;
import static java.util.Arrays.asList;
import static java.util.Arrays.deepEquals;
import static junit.framework.TestCase.assertEquals;
import static junit.framework.TestCase.assertFalse;
import static junit.framework.TestCase.assertNull;
import static junit.framework.TestCase.fail;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Validation for FunctionComputeClient, tests including create/list/get/update
* service/function/trigger
*/
public class FunctionComputeClientTest {
public static final String STS_API_VERSION = "2015-04-01";
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
private static final String VALIDATE_MSG = "cannot be blank";
private static final String REGION = System.getenv("REGION");
private static final String ENDPOINT = System.getenv("ENDPOINT");
private static final String ROLE = System.getenv("ROLE");
private static final String STS_ROLE = System.getenv("STS_ROLE");
private static final String ACCESS_KEY = System.getenv("ACCESS_KEY");
private static final String SECRET_KEY = System.getenv("SECRET_KEY");
private static final String ACCOUNT_ID = System.getenv("ACCOUNT_ID");
private static final String CODE_BUCKET = System.getenv("CODE_BUCKET");
private static final String INVOCATION_ROLE = System.getenv("INVOCATION_ROLE");
private static final String LOG_PROJECT = System.getenv("LOG_PROJECT");
private static final String LOG_STORE = System.getenv("LOG_STORE");
private static final String VPC_ID = System.getenv("VPC_ID");
private static final String VSWITCH_IDS = System.getenv("VSWITCH_IDS");
private static final String SECURITY_GROUP_ID = System.getenv("SECURITY_GROUP_ID");
private static final String USER_ID = System.getenv("USER_ID");
private static final String GROUP_ID = System.getenv("GROUP_ID");
private static final String NAS_SERVER_ADDR = System.getenv("NAS_SERVER_ADDR");
private static final String NAS_MOUNT_DIR = System.getenv("NAS_MOUNT_DIR");
private static final String PUBLIC_KEY_CERTIFICATE_01 = System.getenv("PUBLIC_KEY_CERTIFICATE_01");
private static final String PRIVATE_KEY_01 = System.getenv("PRIVATE_KEY_01");
private static final String PUBLIC_KEY_CERTIFICATE_02 = System.getenv("PUBLIC_KEY_CERTIFICATE_02");
private static final String PRIVATE_KEY_02 = System.getenv("PRIVATE_KEY_02");
private static final String JAEGER_ENDPOINT = System.getenv("JAEGER_ENDPOINT");
private static final String OSS_SOURCE_ARN =
String.format("acs:oss:%s:%s:%s", REGION, ACCOUNT_ID, CODE_BUCKET);
private static final String LOG_SOURCE_ARN =
String.format("acs:log:%s:%s:project/%s", REGION, ACCOUNT_ID, LOG_PROJECT);
private static final String CDN_SOURCE_ARN =
String.format("acs:cdn:*:%s", ACCOUNT_ID);
private static final String SERVICE_NAME = "testServiceJavaSDK";
private static final String SERVICE_DESC_OLD = "service desc";
private static final String SERVICE_DESC_NEW = "service desc updated";
private static final String FUNCTION_NAME = "testFunction";
private static final String FUNCTION_DESC_OLD = "function desc";
private static final String FUNCTION_DESC_NEW = "function desc updated";
private static final String TRIGGER_NAME = "testTrigger";
private static final String TRIGGER_TYPE_OSS = "oss";
private static final String TRIGGER_TYPE_HTTP = "http";
private static final String TRIGGER_TYPE_LOG = "log";
private static final String TRIGGER_TYPE_CDN = "cdn_events";
private static final String TRIGGER_TYPE_TIMER = "timer";
private static final String CUSTOMDOMAIN_NAME = String.format("java-sdk.cn-hongkong.%s.cname-test.functioncompute.com", ACCOUNT_ID);
private static final String CERT_NAME = "CERT_NAME";
private static final Gson gson = new Gson();
private FunctionComputeClient client;
@BeforeClass
public static void setupSuite() {
System.out.println("ENDPOINT: " + ENDPOINT);
System.out.println("ROLE: " + ROLE);
System.out.println("VPC_ID: " + VPC_ID);
System.out.println("STS_ROLE: " + STS_ROLE);
}
@Before
public void setup() {
// Create or clean up everything under the test service
client = new FunctionComputeClient(REGION, ACCOUNT_ID, ACCESS_KEY, SECRET_KEY);
if (!Strings.isNullOrEmpty(ENDPOINT)) {
client.setEndpoint(ENDPOINT);
}
GetServiceRequest getSReq = new GetServiceRequest(SERVICE_NAME);
try {
client.getService(getSReq);
cleanUpAliases(SERVICE_NAME);
cleanUpVersions(SERVICE_NAME);
cleanUpFunctions(SERVICE_NAME);
cleanupService(SERVICE_NAME);
cleanUpFunctions(SERVICE_NAME + "-nas");
cleanupService(SERVICE_NAME + "-nas");
} catch (ClientException e) {
if (!ErrorCodes.SERVICE_NOT_FOUND.equals(e.getErrorCode())) {
throw e;
}
}
}
public FunctionComputeClient overrideFCClient(boolean useSts, boolean useHttps)
throws com.aliyuncs.exceptions.ClientException {
if (useSts) {
Credentials creds = getAssumeRoleCredentials(null);
FunctionComputeClient fcClient = new FunctionComputeClient(
new Config(REGION, ACCOUNT_ID,
creds.getAccessKeyId(), creds.getAccessKeySecret(), creds.getSecurityToken(),
useHttps));
if (!Strings.isNullOrEmpty(ENDPOINT)) {
fcClient.setEndpoint(ENDPOINT);
}
return fcClient;
}
return new FunctionComputeClient(new Config(REGION, ACCOUNT_ID,
ACCESS_KEY, SECRET_KEY, null, useHttps));
}
private void cleanupService(String serviceName) {
DeleteServiceRequest request = new DeleteServiceRequest(serviceName);
try {
client.deleteService(request);
} catch (ClientException e) {
if (!ErrorCodes.SERVICE_NOT_FOUND.equals(e.getErrorCode())) {
throw e;
}
}
System.out.println("Service " + serviceName + " is deleted");
}
private void cleanupProvision(String serviceName, String aliasName, String functionName) {
Integer target = 0;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(new ScheduledAction[0]);
PutProvisionConfigResponse provisionConfigResponse = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse.getStatus());
assertEquals(target, provisionConfigResponse.getTarget());
try {
// retry 30s for release provision container,
int retryTimes = 0;
while (retryTimes < 30) {
// get provisionConfig
GetProvisionConfigRequest getProvisionConfigRequest = new GetProvisionConfigRequest(serviceName, aliasName, functionName);
GetProvisionConfigResponse getProvisionConfigResponse = client.getProvisionConfig(getProvisionConfigRequest);
if (getProvisionConfigResponse.getCurrent() != 0) {
Thread.sleep(1000); // sleep 1s
retryTimes++;
continue;
}
break;
}
assertEquals(true, retryTimes < 30);
} catch (Exception e) {
assertNull(e);
}
}
private void cleanupCustomDomain(String customDomainName) {
DeleteCustomDomainRequest request = new DeleteCustomDomainRequest(customDomainName);
try {
client.deleteCustomDomain(request);
} catch (ClientException e) {
if (!ErrorCodes.DOMAIN_NAME_NOT_FOUND.equals(e.getErrorCode())) {
throw e;
}
}
System.out.println("CustomDomain " + customDomainName + " is deleted");
}
private TriggerMetadata[] listTriggers(String serviceName, String functionName) {
ListTriggersRequest listReq = new ListTriggersRequest(serviceName,
functionName);
ListTriggersResponse listResp = client.listTriggers(listReq);
assertFalse(Strings.isNullOrEmpty(listResp.getRequestId()));
return listResp.getTriggers();
}
private void cleanUpFunctions(String serviceName) {
ListFunctionsRequest listFReq = new ListFunctionsRequest(serviceName);
ListFunctionsResponse listFResp = client.listFunctions(listFReq);
FunctionMetadata[] functions = listFResp.getFunctions();
for (FunctionMetadata function : functions) {
TriggerMetadata[] triggers = listTriggers(serviceName, function.getFunctionName());
cleanUpTriggers(serviceName, function.getFunctionName(), triggers);
System.out.println(
"All triggers for Function " + function.getFunctionName() + " are deleted");
DeleteFunctionRequest deleteFReq = new DeleteFunctionRequest(serviceName,
function.getFunctionName());
client.deleteFunction(deleteFReq);
}
}
private void cleanUpTriggers(String serviceName, String functionName,
TriggerMetadata[] triggers) {
for (TriggerMetadata trigger : triggers) {
DeleteTriggerResponse response = deleteTrigger(serviceName, functionName,
trigger.getTriggerName());
assertTrue(response.isSuccess());
System.out.println("Trigger " + trigger.getTriggerName() + " is deleted");
}
}
private String cleanUpVersions(String serviceName) {
ListVersionsRequest listVersionsReq = new ListVersionsRequest(serviceName);
ListVersionsResponse listVersionResp = client.listVersions(listVersionsReq);
VersionMetaData[] versions = listVersionResp.getVersions();
for (VersionMetaData version : versions) {
DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(serviceName,
version.getVersionId());
DeleteVersionResponse response = client.deleteVersion(deleteVersionRequest);
assertTrue(response.isSuccess());
System.out.println("Version " + version.getVersionId() + " is deleted");
}
return (versions.length > 0) ? versions[0].getVersionId() : "0";
}
private void cleanUpAliases(String serviceName) {
ListAliasesRequest listAliasesRequest = new ListAliasesRequest(serviceName);
ListAliasesResponse listAliasesResponse = client.listAliases(listAliasesRequest);
AliasMetaData[] aliases = listAliasesResponse.getAliases();
for (AliasMetaData alias : aliases) {
DeleteAliasRequest deleteAliasRequest = new DeleteAliasRequest(serviceName,
alias.getAliasName());
DeleteAliasResponse response = client.deleteAlias(deleteAliasRequest);
assertTrue(response.isSuccess());
System.out.println(alias.getAliasName() + " is deleted");
}
}
private CreateFunctionResponse createFunction(String functionName) throws IOException {
return createFunction(SERVICE_NAME, functionName);
}
private CreateFunctionResponse createFunction(String serviceName, String functionName) throws IOException {
String source = "exports.handler = function(event, context, callback) {\n" +
" callback(null, 'hello world');\n" +
"};";
byte[] code = Util.createZipByteData("hello_world.js", source);
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(serviceName);
createFuncReq.setFunctionName(functionName);
createFuncReq.setDescription(FUNCTION_DESC_OLD);
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("hello_world.handler");
createFuncReq.setRuntime("nodejs4.4");
Map<String, String> environmentVariables = new HashMap<String, String>();
environmentVariables.put("testKey", "testValue");
createFuncReq.setEnvironmentVariables(environmentVariables);
createFuncReq.setCode(new Code().setZipFile(code));
createFuncReq.setTimeout(10);
CreateFunctionResponse response = client.createFunction(createFuncReq);
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
assertFalse(Strings.isNullOrEmpty(response.getFunctionId()));
assertEquals(functionName, response.getFunctionName());
assertEquals(FUNCTION_DESC_OLD, response.getDescription());
environmentVariables = response.getEnvironmentVariables();
assertEquals(1, environmentVariables.size());
assertEquals("testValue", environmentVariables.get("testKey"));
assertEquals(functionName, response.getFunctionName());
assertEquals(FUNCTION_DESC_OLD, response.getDescription());
return response;
}
private CreateServiceResponse createService(String serviceName) {
return createService(serviceName, true);
}
private CreateServiceResponse createService(String serviceName, boolean check) {
CreateServiceRequest createSReq = new CreateServiceRequest();
createSReq.setServiceName(serviceName);
createSReq.setDescription(SERVICE_DESC_OLD);
createSReq.setRole(ROLE);
CreateServiceResponse response = client.createService(createSReq);
if (check) {
assertEquals(serviceName, response.getServiceName());
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
assertFalse(Strings.isNullOrEmpty(response.getServiceId()));
assertEquals(SERVICE_DESC_OLD, response.getDescription());
assertEquals(ROLE, response.getRole());
}
return response;
}
private CreateServiceResponse createVPCService(String serviceName) {
CreateServiceRequest createSReq = new CreateServiceRequest();
createSReq.setServiceName(serviceName);
createSReq.setDescription(SERVICE_DESC_OLD);
createSReq.setRole(ROLE);
createSReq
.setVpcConfig(new VpcConfig(VPC_ID, new String[]{VSWITCH_IDS}, SECURITY_GROUP_ID));
createSReq.setNasConfig(new NasConfig(Integer.parseInt(USER_ID), Integer.parseInt(GROUP_ID),
new NasMountConfig[]{
new NasMountConfig(NAS_SERVER_ADDR, NAS_MOUNT_DIR)
}));
CreateServiceResponse response = client.createService(createSReq);
assertEquals(serviceName, response.getServiceName());
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
assertFalse(Strings.isNullOrEmpty(response.getServiceId()));
assertEquals(SERVICE_DESC_OLD, response.getDescription());
assertEquals(ROLE, response.getRole());
assertEquals(VPC_ID, response.getVpcConfig().getVpcId());
assertEquals(SECURITY_GROUP_ID, response.getVpcConfig().getSecurityGroupId());
return response;
}
private CreateTriggerResponse createHttpTrigger(String triggerName, HttpAuthType authType,
HttpMethod[] methods) {
return createHttpTriggerWithQualifier(triggerName, "", authType, methods);
}
private CreateTriggerResponse createHttpTriggerWithQualifier(String triggerName,
String qualifier,
HttpAuthType authType, HttpMethod[] methods) {
CreateTriggerRequest createReq = new CreateTriggerRequest(SERVICE_NAME, FUNCTION_NAME);
createReq.setTriggerName(triggerName);
createReq.setTriggerType(TRIGGER_TYPE_HTTP);
createReq.setTriggerConfig(new HttpTriggerConfig(authType, methods));
if (!qualifier.isEmpty()) {
createReq.setQualifier(qualifier);
}
return client.createTrigger(createReq);
}
private CreateTriggerResponse createOssTrigger(String triggerName, String prefix,
String suffix) {
CreateTriggerRequest createTReq = new CreateTriggerRequest(SERVICE_NAME, FUNCTION_NAME);
createTReq.setTriggerName(triggerName);
createTReq.setTriggerType(TRIGGER_TYPE_OSS);
createTReq.setInvocationRole(INVOCATION_ROLE);
createTReq.setSourceArn(OSS_SOURCE_ARN);
createTReq.setTriggerConfig(
new OSSTriggerConfig(new String[]{"oss:ObjectCreated:*"}, prefix, suffix));
CreateTriggerResponse resp = client.createTrigger(createTReq);
try {
// Add some sleep since OSS notifications create is not strongly consistent
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return resp;
}
private DeleteTriggerResponse deleteTrigger(String serviceName, String funcName,
String triggerName) {
DeleteTriggerRequest req = new DeleteTriggerRequest(serviceName, funcName, triggerName);
DeleteTriggerResponse resp = client.deleteTrigger(req);
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return resp;
}
private UpdateTriggerResponse updateTrigger(UpdateTriggerRequest req) {
UpdateTriggerResponse resp = client.updateTrigger(req);
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return resp;
}
// fetch from OSS url and returns crc64 header value
private String fetchFromURL(String urlString) {
try {
URL url = new URL(urlString);
HttpURLConnection httpConn = (HttpURLConnection) url.openConnection();
httpConn.setRequestMethod("GET");
httpConn.setConnectTimeout(60 * 1000);
httpConn.setReadTimeout(120 * 1000);
httpConn.connect();
assertEquals(200, httpConn.getResponseCode());
String headerKey = "X-Oss-Hash-Crc64ecma";
Map<String, List<String>> headers = httpConn.getHeaderFields();
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
String key = entry.getKey();
if (null == key || !key.equalsIgnoreCase(headerKey)) {
continue;
}
List<String> values = entry.getValue();
StringBuilder builder = new StringBuilder(values.get(0));
for (int i = 1; i < values.size(); i++) {
builder.append(",");
builder.append(values.get(i));
}
return builder.toString();
}
} catch (Exception e) {
assertFalse(String.format("fetchFromURL %s error: %s", urlString, e.toString()), true);
}
return "";
}
@Test
public void testNewRegions() {
FunctionComputeClient clientHz = new FunctionComputeClient("cn-hangzhou", ACCOUNT_ID,
ACCESS_KEY, SECRET_KEY);
ListServicesResponse lrHz = clientHz.listServices(new ListServicesRequest());
assertTrue(lrHz.getStatus() == HttpURLConnection.HTTP_OK);
FunctionComputeClient clientBj = new FunctionComputeClient("cn-beijing", ACCOUNT_ID,
ACCESS_KEY, SECRET_KEY);
ListServicesResponse lrBj = clientBj.listServices(new ListServicesRequest());
assertTrue(lrBj.getStatus() == HttpURLConnection.HTTP_OK);
}
@Test
public void testCRUD()
throws ClientException, JSONException, NoSuchAlgorithmException, InterruptedException, ParseException, IOException {
testCRUDHelper(true);
}
@Test
public void testCRUDStsToken() throws com.aliyuncs.exceptions.ClientException,
ParseException, InterruptedException, IOException {
client = overrideFCClient(true, false);
testCRUDHelper(false);
}
@Test
public void testCRUDStsTokenHttps() throws com.aliyuncs.exceptions.ClientException,
ParseException, InterruptedException, IOException {
client = overrideFCClient(true, true);
testCRUDHelper(false);
}
private String generateNASPythonCode() {
return "# -*- coding: utf-8 -*-\n"
+ "import logging \n"
+ "import random\n"
+ "import string\n"
+ "import os.path\n"
+ "import shutil\n"
+ "from os import path\n"
+ "\n"
+ "def handler(event, context):\n"
+ " logger = logging.getLogger()\n"
+ " root_dir1 = \"" + NAS_MOUNT_DIR + "\"\n"
+ " logger.info('uid : ' + str(os.geteuid()))\n"
+ " logger.info('gid : ' + str(os.getgid()))\n"
+ " file_name = randomString(6)+'.txt'\n"
+ " dir1 = root_dir1 + '/rzhang-test/'\n"
+ " content = \"NAS here I come\"\n"
+ " os.makedirs(dir1)\n"
+ " fw = open(dir1+file_name, \"w+\")\n"
+ " fw.write(content)\n"
+ " fw.close()\n"
+ " fr = open(dir1+file_name)\n"
+ " line = fr.readline()\n"
+ " if line != content:\n"
+ " return False\n"
+ " fr.close()\n"
+ " os.remove(dir1+file_name)\n"
+ " os.rmdir(dir1)\n"
+ " return True\n"
+ " \n"
+ "def randomString(n):\n"
+ " return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))\n";
}
@Test
public void testCreateNASService() throws IOException {
String service_name = SERVICE_NAME + "-nas";
createVPCService(service_name);
// Create a function that uses NAS
String source = generateNASPythonCode();
byte[] data = Util.createZipByteData("main.py", source);
String funcName = FUNCTION_NAME + "-nas";
// create function
createFunction(service_name, funcName, "main.handler", "python2.7", data);
// Invoke the function
InvokeFunctionRequest request = new InvokeFunctionRequest(service_name, funcName);
request.setPayload("".getBytes());
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals("true", new String(response.getPayload()));
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(service_name, funcName));
client.deleteService(new DeleteServiceRequest(service_name));
}
private void preTestProvisionConfig(String serviceName, String functionName, String aliasName) throws Exception {
// create service
createService(serviceName, false);
// create function
createFunction(serviceName, functionName);
// publish a version
String lastVersion = cleanUpVersions(serviceName);
;
PublishVersionRequest publishVersionRequest = new PublishVersionRequest(serviceName);
PublishVersionResponse publishVersionResponse = client.publishVersion(publishVersionRequest);
assertEquals(String.format("%d", Integer.parseInt(lastVersion) + 1), publishVersionResponse.getVersionId());
//Create a Alias against it
String versionId = publishVersionResponse.getVersionId();
CreateAliasRequest createAliasRequest = new CreateAliasRequest(serviceName, aliasName, versionId);
CreateAliasResponse createAliasResponse = client.createAlias(createAliasRequest);
assertEquals(HttpURLConnection.HTTP_OK, createAliasResponse.getStatus());
assertEquals(versionId, createAliasResponse.getVersionId());
assertEquals(aliasName, createAliasResponse.getAliasName());
}
private void afterTestProvisionConfig(String serviceName) {
cleanUpAliases(serviceName);
cleanUpVersions(serviceName);
cleanUpFunctions(serviceName);
cleanupService(serviceName);
}
@Test
public void testProvisionConfig() {
String serviceName = SERVICE_NAME + UUID.randomUUID().toString().substring(0, 5);
String functionName = "hello_world" + UUID.randomUUID().toString().substring(0, 5);
String aliasName = "myAlias";
try {
preTestProvisionConfig(serviceName, functionName, aliasName);
try {
// create provision config
Integer target = 3;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
provisionConfigRequest.setTarget(target);
PutProvisionConfigResponse provisionConfigResponse = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse.getStatus());
assertEquals(target, provisionConfigResponse.getTarget());
// listProvisionConfig
ListProvisionConfigsRequest listProvisionConfigsRequest = new ListProvisionConfigsRequest();
listProvisionConfigsRequest.setServiceName(serviceName);
listProvisionConfigsRequest.setQualifier(aliasName);
listProvisionConfigsRequest.setLimit(100);
ListProvisionConfigsResponse listProvisionConfigsResponse = client.listProvisionConfigs(listProvisionConfigsRequest);
assertEquals(HttpURLConnection.HTTP_OK, listProvisionConfigsResponse.getStatus());
assertEquals(target, listProvisionConfigsResponse.getProvisionConfigs()[0].getTarget());
} catch (Exception e0) {
assertNull(e0);
} finally {
cleanupProvision(serviceName, aliasName, functionName);
}
} catch (Exception e) {
assertNull(e);
} finally {
afterTestProvisionConfig(serviceName);
}
}
@Test
public void testProvisionConfigWithScheduledAction() {
String serviceName = SERVICE_NAME + UUID.randomUUID().toString().substring(0, 5);
String functionName = "hello_world" + UUID.randomUUID().toString().substring(0, 5);
String aliasName = "myAlias";
try {
preTestProvisionConfig(serviceName, functionName, aliasName);
try {
// create provision config
Integer target = 3;
Integer scheduledActionTarget1 = 5;
Integer scheduledActionTarget2 = 5;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
ScheduledAction[] scheduledActions = new ScheduledAction[2];
scheduledActions[0] = new ScheduledAction("a1", "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget1, "at(2020-10-20T10:10:10Z)");
scheduledActions[1] = new ScheduledAction("a2", "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget2, "cron(0 */30 * * * *)");
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(scheduledActions);
PutProvisionConfigResponse provisionConfigResponse = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse.getStatus());
assertEquals(target, provisionConfigResponse.getTarget());
assertEquals(scheduledActions.length, provisionConfigResponse.getScheduledActions().length);
assertEquals(scheduledActionTarget1, provisionConfigResponse.getScheduledActions()[0].getTarget());
assertEquals(scheduledActionTarget2, provisionConfigResponse.getScheduledActions()[1].getTarget());
// retry 120s for autoScalingLoop
int retryTimes = 0;
while (retryTimes < 120) {
// get provisionConfig
GetProvisionConfigRequest getProvisionConfigRequest = new GetProvisionConfigRequest(serviceName, aliasName, functionName);
GetProvisionConfigResponse getProvisionConfigResponse = client.getProvisionConfig(getProvisionConfigRequest);
if (getProvisionConfigResponse.getCurrent() != scheduledActionTarget2) {
Thread.sleep(1000); // sleep 1s
retryTimes++;
continue;
}
assertEquals(scheduledActionTarget2, getProvisionConfigResponse.getTarget());
assertEquals(scheduledActionTarget2, getProvisionConfigResponse.getCurrent());
assertEquals(scheduledActions.length, provisionConfigResponse.getScheduledActions().length);
assertEquals(scheduledActionTarget1, provisionConfigResponse.getScheduledActions()[0].getTarget());
assertEquals(scheduledActionTarget2, provisionConfigResponse.getScheduledActions()[1].getTarget());
break;
}
assertEquals(true, retryTimes < 120);
// set scheduledActions to null, assert scheduledActions will not be modified
provisionConfigRequest.setScheduledActions(null);
PutProvisionConfigResponse provisionConfigResponse2 = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse2.getStatus());
assertEquals(scheduledActions.length, provisionConfigResponse2.getScheduledActions().length);
// set scheduledActions to [], assert scheduledActions will be modified to empty
provisionConfigRequest.setScheduledActions(new ScheduledAction[0]);
PutProvisionConfigResponse provisionConfigResponse3 = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse3.getStatus());
assertEquals(0, provisionConfigResponse3.getScheduledActions().length);
} catch (Exception e0) {
e0.printStackTrace();
assertNull(e0);
} finally {
cleanupProvision(serviceName, aliasName, functionName);
}
} catch (Exception e) {
e.printStackTrace();
assertNull(e);
} finally {
afterTestProvisionConfig(serviceName);
}
}
@Test
public void testProvisionConfigWithScheduledActionValidate() {
String serviceName = SERVICE_NAME + UUID.randomUUID().toString().substring(0, 5);
String functionName = "hello_world" + UUID.randomUUID().toString().substring(0, 5);
String aliasName = "myAlias";
try {
preTestProvisionConfig(serviceName, functionName, aliasName);
try {
// actionName repeated
scheduledActionValidate1(serviceName, functionName, aliasName);
// utc time format error
scheduledActionValidate2(serviceName, functionName, aliasName);
// scheduleExpression error
scheduledActionValidate3(serviceName, functionName, aliasName);
// actions out of size
scheduledActionValidate4(serviceName, functionName, aliasName);
} catch (Exception e0) {
assertNull(e0);
}
} catch (Exception e) {
assertNull(e);
} finally {
afterTestProvisionConfig(serviceName);
}
}
@Test
public void testProvisionConfigWithTargetTrackingPolicies() {
String serviceName = SERVICE_NAME + UUID.randomUUID().toString().substring(0, 5);
String functionName = "hello_world" + UUID.randomUUID().toString().substring(0, 5);
String aliasName = "myAlias";
try {
preTestProvisionConfig(serviceName, functionName, aliasName);
try {
// create provision config
Integer target = 3;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
TargetTrackingPolicy[] policies = new TargetTrackingPolicy[1];
policies[0] = new TargetTrackingPolicy("p1", "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", "ProvisionedConcurrencyUtilization", new Double(0.6f), 5, 200);
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setTargetTrackingPolicies(policies);
PutProvisionConfigResponse provisionConfigResponse = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse.getStatus());
assertEquals(target, provisionConfigResponse.getTarget());
assertEquals(1, provisionConfigResponse.getTargetTrackingPolicies().length);
assertEquals("p1", provisionConfigResponse.getTargetTrackingPolicies()[0].getName());
assertEquals("2020-10-10T10:10:10Z", provisionConfigResponse.getTargetTrackingPolicies()[0].getStartTime());
assertEquals("2030-10-10T10:10:10Z", provisionConfigResponse.getTargetTrackingPolicies()[0].getEndTime());
assertEquals("ProvisionedConcurrencyUtilization", provisionConfigResponse.getTargetTrackingPolicies()[0].getMetricType());
assertEquals(new Double(0.6f), provisionConfigResponse.getTargetTrackingPolicies()[0].getMetricTarget());
assertEquals(5, provisionConfigResponse.getTargetTrackingPolicies()[0].getMinCapacity().intValue());
assertEquals(200, provisionConfigResponse.getTargetTrackingPolicies()[0].getMaxCapacity().intValue());
// set targetTrackingPolicies to null, assert targetTrackingPolicies will not be modified
provisionConfigRequest.setTargetTrackingPolicies(null);
PutProvisionConfigResponse provisionConfigResponse2 = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse2.getStatus());
assertEquals(1, provisionConfigResponse2.getTargetTrackingPolicies().length);
// set targetTrackingPolicies to [], assert targetTrackingPolicies will be modified to empty
provisionConfigRequest.setTargetTrackingPolicies(new TargetTrackingPolicy[0]);
PutProvisionConfigResponse provisionConfigResponse3 = client.putProvisionConfig(provisionConfigRequest);
assertEquals(HttpURLConnection.HTTP_OK, provisionConfigResponse3.getStatus());
assertEquals(0, provisionConfigResponse3.getTargetTrackingPolicies().length);
} catch (Exception e0) {
e0.printStackTrace();
assertNull(e0);
} finally {
cleanupProvision(serviceName, aliasName, functionName);
}
} catch (Exception e) {
e.printStackTrace();
assertNull(e);
} finally {
afterTestProvisionConfig(serviceName);
}
}
// actionName repeated
private void scheduledActionValidate1(String serviceName, String functionName, String aliasName) {
String actionName = "action1";
try {
// create provision config
Integer target = 3;
Integer scheduledActionTarget1 = 5;
Integer scheduledActionTarget2 = 5;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
ScheduledAction[] scheduledActions = new ScheduledAction[2];
scheduledActions[0] = new ScheduledAction(actionName, "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget1, "at(2020-10-20T10:10:10Z)");
scheduledActions[1] = new ScheduledAction(actionName, "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget2, "cron(0 */30 * * * *)");
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(scheduledActions);
client.putProvisionConfig(provisionConfigRequest);
} catch (ClientException clientException) {
assertEquals("Duplicate action name '" + actionName + "' in ScheduledActions is not allowed",
clientException.getErrorMessage());
}
}
// utc time format error
private void scheduledActionValidate2(String serviceName, String functionName, String aliasName) {
try {
// create provision config
Integer target = 3;
Integer scheduledActionTarget1 = 5;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
ScheduledAction[] scheduledActions = new ScheduledAction[1];
scheduledActions[0] = new ScheduledAction("a1", "2020-10-10T10:10:10",
"2030-10-10T10:10:10Z", scheduledActionTarget1, "at(2020-10-20T10:10:10Z)");
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(scheduledActions);
client.putProvisionConfig(provisionConfigRequest);
} catch (ClientException clientException) {
assertEquals("The StartTime is not in UTC time format (example: '2020-10-10T10:10:10Z', " +
"actual: '2020-10-10T10:10:10')", clientException.getErrorMessage());
}
}
// scheduleExpression error
private void scheduledActionValidate3(String serviceName, String functionName, String aliasName) {
try {
// create provision config
Integer target = 3;
Integer scheduledActionTarget1 = 5;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
ScheduledAction[] scheduledActions = new ScheduledAction[1];
scheduledActions[0] = new ScheduledAction("a1", "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget1, "cron(0s */30 * * * *)");
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(scheduledActions);
client.putProvisionConfig(provisionConfigRequest);
} catch (ClientException clientException) {
assertEquals("The ScheduleExpression should be atTime or cron expression " +
"(example: ['at(2020-10-10T10:10:10Z)', 'cron(0 */30 * * * *)'], actual: 'cron(0s */30 * * * *)')",
clientException.getErrorMessage());
}
}
// actions out of size
private void scheduledActionValidate4(String serviceName, String functionName, String aliasName) {
try {
// create provision config
Integer target = 3;
Integer scheduledActionTarget = 5;
PutProvisionConfigRequest provisionConfigRequest = new PutProvisionConfigRequest(serviceName, aliasName, functionName);
ScheduledAction[] scheduledActions = new ScheduledAction[110];
for (int index = 0; index < 110; index++) {
scheduledActions[index] = new ScheduledAction("action_" + index, "2020-10-10T10:10:10Z",
"2030-10-10T10:10:10Z", scheduledActionTarget, "cron(0 */30 * * * *)");
}
provisionConfigRequest.setTarget(target);
provisionConfigRequest.setScheduledActions(scheduledActions);
client.putProvisionConfig(provisionConfigRequest);
} catch (ClientException clientException) {
assertEquals("ScheduledActions contains too many values (max: 100, actual: 110)",
clientException.getErrorMessage());
}
}
@Test
public void testServiceWithTracingConfig() {
String serviceName = SERVICE_NAME + "-tracing";
String functionName = "hello_world";
JaegerConfig jaegerConfig = new JaegerConfig();
jaegerConfig.setEndpoint(JAEGER_ENDPOINT);
TracingConfig tracingConfig = new TracingConfig();
tracingConfig.setJaegerConfig(jaegerConfig);
try {
// create service with tracingConfig
CreateServiceRequest req = new CreateServiceRequest();
req.setServiceName(serviceName);
req.setTracingConfig(tracingConfig);
CreateServiceResponse resp = client.createService(req);
assertNotNull(resp.getTracingConfig());
assertNotNull(resp.getTracingConfig().getJaegerConfig());
assertEquals(JAEGER_ENDPOINT, resp.getTracingConfig().getJaegerConfig().getEndpoint());
// get service with tracingConfig
GetServiceRequest getServiceRequest = new GetServiceRequest(serviceName);
GetServiceResponse getServiceResponse = client.getService(getServiceRequest);
assertNotNull(getServiceResponse.getTracingConfig());
assertNotNull(getServiceResponse.getTracingConfig().getJaegerConfig());
assertEquals(JAEGER_ENDPOINT, getServiceResponse.getTracingConfig().getJaegerConfig().getEndpoint());
// create function
String source = "exports.handler = function(event, context, callback) {\n" +
" callback(null, context.tracing.openTracingSpanContext + '|' + context.tracing.openTracingSpanBaggages['key']);\n" +
"};";
byte[] code = Util.createZipByteData("hello_world.js", source);
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(serviceName);
createFuncReq.setFunctionName(functionName);
createFuncReq.setDescription(FUNCTION_DESC_OLD);
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("hello_world.handler");
createFuncReq.setRuntime("nodejs4.4");
createFuncReq.setCode(new Code().setZipFile(code));
createFuncReq.setTimeout(10);
CreateFunctionResponse response = client.createFunction(createFuncReq);
assertEquals(functionName, response.getFunctionName());
// invokeFunction with injected span context
InvokeFunctionRequest invokeFunctionRequest = new InvokeFunctionRequest(serviceName, functionName);
invokeFunctionRequest.setHeader(OPENTRACING_SPANCONTEXT, "124ed43254b54966:124ed43254b54966:0:1");
invokeFunctionRequest.setHeader(OPENTRACING_SPANCONTEXT_BAGGAGE_PREFIX + "key", "val");
InvokeFunctionResponse invokeFunctionResponse = client.invokeFunction(invokeFunctionRequest);
String payload = new String(invokeFunctionResponse.getPayload());
assertTrue(payload.contains("124ed43254b54966"));
assertTrue(payload.contains("val"));
// update service and disable tracingConfig
UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest(serviceName);
updateServiceRequest.setTracingConfig(new TracingConfig());
UpdateServiceResponse updateServiceResponse = client.updateService(updateServiceRequest);
assertNotNull(updateServiceResponse.getTracingConfig());
assertNull(updateServiceResponse.getTracingConfig().getType());
assertNull(updateServiceResponse.getTracingConfig().getParams());
} catch (Exception e) {
e.printStackTrace();
// assert case fail
assertNull(e);
} finally {
cleanUpFunctions(serviceName);
cleanupService(serviceName);
}
}
@Test
public void testServiceWithRequestMetrics() {
String serviceName = SERVICE_NAME + "-requestMetrics";
LogConfig logConfig = new LogConfig(LOG_PROJECT, LOG_STORE, true);
try {
// create service with enableRequestMetrics
CreateServiceRequest req = new CreateServiceRequest();
req.setServiceName(serviceName);
req.setRole(ROLE);
req.setLogConfig(logConfig);
CreateServiceResponse resp = client.createService(req);
assertNotNull(resp.getLogConfig());
assertTrue(resp.getLogConfig().getEnableRequestMetrics());
assertFalse(resp.getLogConfig().getEnableInstanceMetrics());
// get service
GetServiceRequest getServiceRequest = new GetServiceRequest(serviceName);
GetServiceResponse getServiceResponse = client.getService(getServiceRequest);
assertNotNull(getServiceResponse.getLogConfig());
assertTrue(getServiceResponse.getLogConfig().getEnableRequestMetrics());
assertFalse(getServiceResponse.getLogConfig().getEnableInstanceMetrics());
// update service and disable requestMetrics
logConfig.setEnableRequestMetrics(false);
UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest(serviceName);
updateServiceRequest.setLogConfig(logConfig);
UpdateServiceResponse updateServiceResponse = client.updateService(updateServiceRequest);
assertNotNull(updateServiceResponse.getLogConfig());
assertEquals(LOG_PROJECT, updateServiceResponse.getLogConfig().getProject());
assertEquals(LOG_STORE, updateServiceResponse.getLogConfig().getLogStore());
assertFalse(updateServiceResponse.getLogConfig().getEnableRequestMetrics());
assertFalse(updateServiceResponse.getLogConfig().getEnableInstanceMetrics());
// update service and disable logs
logConfig = new LogConfig("", "", false);
updateServiceRequest = new UpdateServiceRequest(serviceName);
updateServiceRequest.setLogConfig(logConfig);
updateServiceResponse = client.updateService(updateServiceRequest);
assertNotNull(updateServiceResponse.getLogConfig());
assertEquals("", updateServiceResponse.getLogConfig().getProject());
assertEquals("", updateServiceResponse.getLogConfig().getLogStore());
assertFalse(updateServiceResponse.getLogConfig().getEnableRequestMetrics());
assertFalse(updateServiceResponse.getLogConfig().getEnableInstanceMetrics());
} catch (Exception e) {
e.printStackTrace();
// assert case fail
assertNull(e);
} finally {
cleanUpFunctions(serviceName);
cleanupService(serviceName);
}
}
@Test
public void testServiceWithRequestMetricsAndInstanceMetrics() {
String serviceName = SERVICE_NAME + "-instanceMetrics";
LogConfig logConfig = new LogConfig(LOG_PROJECT, LOG_STORE, true, true, NONE);
try {
// create service with enableRequestMetrics and enableInstanceMetrics
CreateServiceRequest req = new CreateServiceRequest();
req.setServiceName(serviceName);
req.setRole(ROLE);
req.setLogConfig(logConfig);
CreateServiceResponse resp = client.createService(req);
assertNotNull(resp.getLogConfig());
assertTrue(resp.getLogConfig().getEnableRequestMetrics());
assertTrue(resp.getLogConfig().getEnableInstanceMetrics());
assertEquals(NONE, resp.getLogConfig().getLogBeginRule());
// get service
GetServiceRequest getServiceRequest = new GetServiceRequest(serviceName);
GetServiceResponse getServiceResponse = client.getService(getServiceRequest);
assertNotNull(getServiceResponse.getLogConfig());
assertTrue(getServiceResponse.getLogConfig().getEnableRequestMetrics());
assertTrue(getServiceResponse.getLogConfig().getEnableInstanceMetrics());
assertEquals(NONE, getServiceResponse.getLogConfig().getLogBeginRule());
// update service and disable requestMetrics and instance metrics
logConfig.setEnableRequestMetrics(false);
logConfig.setEnableInstanceMetrics(false);
logConfig.setLogBeginRule(DEFAULT_REGEX);
UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest(serviceName);
updateServiceRequest.setLogConfig(logConfig);
UpdateServiceResponse updateServiceResponse = client.updateService(updateServiceRequest);
assertNotNull(updateServiceResponse.getLogConfig());
assertEquals(LOG_PROJECT, updateServiceResponse.getLogConfig().getProject());
assertEquals(LOG_STORE, updateServiceResponse.getLogConfig().getLogStore());
assertFalse(updateServiceResponse.getLogConfig().getEnableRequestMetrics());
assertFalse(updateServiceResponse.getLogConfig().getEnableInstanceMetrics());
assertEquals(DEFAULT_REGEX, updateServiceResponse.getLogConfig().getLogBeginRule());
// update service and disable logs
logConfig = new LogConfig("", "",
false, false, NONE);
updateServiceRequest = new UpdateServiceRequest(serviceName);
updateServiceRequest.setLogConfig(logConfig);
updateServiceResponse = client.updateService(updateServiceRequest);
assertNotNull(updateServiceResponse.getLogConfig());
assertEquals("", updateServiceResponse.getLogConfig().getProject());
assertEquals("", updateServiceResponse.getLogConfig().getLogStore());
assertFalse(updateServiceResponse.getLogConfig().getEnableRequestMetrics());
assertFalse(updateServiceResponse.getLogConfig().getEnableInstanceMetrics());
assertEquals(NONE, updateServiceResponse.getLogConfig().getLogBeginRule());
} catch (Exception e) {
e.printStackTrace();
// assert case fail
assertNull(e);
} finally {
cleanUpFunctions(serviceName);
cleanupService(serviceName);
}
}
@Test
public void testCreateServiceStsTokenNoPassRole()
throws com.aliyuncs.exceptions.ClientException {
// Use a policy that does not have ram:PassRole, this policy will intersect with the role policy
// Access denied is expected if using STS without PassRole allowed
// Policy intersection doc: https://help.aliyun.com/document_detail/31935.html
String policy = "{\"Version\": \"1\",\"Statement\": [{\"Effect\": \"Allow\",\"Action\": [\"fc:*\"],\"Resource\": [\"*\"]}]}";
Credentials creds = getAssumeRoleCredentials(policy);
client = new FunctionComputeClient(new Config(REGION, ACCOUNT_ID,
creds.getAccessKeyId(), creds.getAccessKeySecret(), creds.getSecurityToken(),
false));
try {
createService(SERVICE_NAME);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getErrorMessage(), e.getErrorMessage()
.contains("the caller is not authorized to perform 'ram:PassRole'"));
}
}
@Test
public void testCRUDHttpTrigger() throws ParseException, InterruptedException, IOException {
// create service
createService(SERVICE_NAME);
// Create Function
createFunction(FUNCTION_NAME);
// create http trigger
createHttpTrigger(TRIGGER_NAME, ANONYMOUS, new HttpMethod[]{GET, POST});
// List Triggers
TriggerMetadata[] triggers = listTriggers(SERVICE_NAME, FUNCTION_NAME);
assertEquals(1, triggers.length);
TriggerMetadata trigger = triggers[0];
assertEquals(TRIGGER_NAME, trigger.getTriggerName());
assertEquals("http", trigger.getTriggerType());
// retrieve http trigger
GetTriggerRequest getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
TRIGGER_NAME);
GetTriggerResponse getTResp = client.getTrigger(getTReq);
HttpTriggerConfig triggerConfig = gson
.fromJson(gson.toJson(getTResp.getTriggerConfig()), HttpTriggerConfig.class);
assertFalse(Strings.isNullOrEmpty(getTResp.getRequestId()));
assertEquals(TRIGGER_NAME, getTResp.getTriggerName());
assertEquals(TRIGGER_TYPE_HTTP, getTResp.getTriggerType());
assertTrue(deepEquals(new HttpMethod[]{GET, POST}, triggerConfig.getMethods()));
// update http trigger
GetTriggerResponse triggerOld = getTResp;
HttpTriggerConfig updateTriggerConfig = new HttpTriggerConfig(
FUNCTION, new HttpMethod[]{POST});
UpdateTriggerRequest updateTReq = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
TRIGGER_NAME);
updateTReq.setTriggerConfig(updateTriggerConfig);
Thread.sleep(1000);
UpdateTriggerResponse updateTResp = updateTrigger(updateTReq);
assertEquals(triggerOld.getTriggerName(), updateTResp.getTriggerName());
Gson gson = new Gson();
HttpTriggerConfig tcOld = gson
.fromJson(gson.toJson(triggerOld.getTriggerConfig()), HttpTriggerConfig.class);
HttpTriggerConfig tcNew = gson
.fromJson(gson.toJson(updateTResp.getTriggerConfig()), HttpTriggerConfig.class);
assertFalse(deepEquals(tcOld.getMethods(), tcNew.getMethods()));
assertNotEquals(tcOld.getAuthType(), tcNew.getAuthType());
assertEquals(triggerOld.getCreatedTime(), updateTResp.getCreatedTime());
assertEquals(triggerOld.getTriggerType(), updateTResp.getTriggerType());
Date dateOld = DATE_FORMAT.parse(triggerOld.getLastModifiedTime());
Date dateNew = DATE_FORMAT.parse(updateTResp.getLastModifiedTime());
assertTrue(dateOld.before(dateNew));
// delete http trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME);
getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
TRIGGER_NAME);
try {
client.getTrigger(getTReq);
} catch (ClientException e) {
assertEquals(404, e.getStatusCode());
}
cleanUpFunctions(SERVICE_NAME);
cleanupService(SERVICE_NAME);
}
@Test
public void testListServices() {
final int numServices = 10;
final int limit = 3;
final String serviceNamePrefix = SERVICE_NAME + "_listService_test_";
// Create multiple services
for (int i = 0; i < numServices; i++) {
try {
client.getService(new GetServiceRequest(serviceNamePrefix + i));
cleanupService(serviceNamePrefix + i);
} catch (ClientException e) {
if (!ErrorCodes.SERVICE_NOT_FOUND.equals(e.getErrorCode())) {
throw new RuntimeException("Cleanup failed");
}
}
CreateServiceRequest request = new CreateServiceRequest();
request.setServiceName(serviceNamePrefix + i);
request.setDescription(SERVICE_DESC_OLD);
request.setRole(ROLE);
CreateServiceResponse response = client.createService(request);
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
TagResourceRequest req = new TagResourceRequest();
req.setResourceArn(String.format("acs:fc:%s:%s:services/%s", REGION, ACCOUNT_ID, serviceNamePrefix + i));
Map<String, String> tags = new HashMap<String, String>();
if (i % 2 == 0) {
tags.put("k1", "v1");
} else {
tags.put("k2", "v2");
}
tags.put("k3", "v3");
req.setTags(tags);
TagResourceResponse resp = client.tagResource(req);
assertFalse(Strings.isNullOrEmpty(resp.getRequestId()));
}
ListServicesRequest listRequest = new ListServicesRequest();
listRequest.setLimit(limit);
listRequest.setPrefix(serviceNamePrefix);
ListServicesResponse listResponse = client.listServices(listRequest);
int numCalled = 1;
String nextToken = listResponse.getNextToken();
while (nextToken != null) {
listRequest.setNextToken(nextToken);
listResponse = client.listServices(listRequest);
nextToken = listResponse.getNextToken();
numCalled++;
}
assertEquals(numServices / limit + 1, numCalled);
listRequest = new ListServicesRequest();
listRequest.setPrefix(serviceNamePrefix);
Map<String, String> tags = new HashMap<String, String>();
tags.put("k3", "v3");
listRequest.setTags(tags);
listResponse = client.listServices(listRequest);
assertEquals(numServices, listResponse.getServices().length);
tags.put("k1", "v1");
listRequest.setTags(tags);
listResponse = client.listServices(listRequest);
assertEquals(numServices / 2, listResponse.getServices().length);
tags.put("k2", "v2");
listRequest.setTags(tags);
listResponse = client.listServices(listRequest);
assertEquals(0, listResponse.getServices().length);
// Delete services
for (int i = 0; i < numServices; i++) {
String resourceArn = String.format("acs:fc:%s:%s:services/%s", REGION, ACCOUNT_ID, serviceNamePrefix + i);
UntagResourceRequest req = new UntagResourceRequest();
req.setResourceArn(resourceArn);
String[] tagKeys = new String[]{};
req.setTagKeys(tagKeys);
req.setAll(true);
UntagResourceResponse resp = client.untagResource(req);
assertFalse(Strings.isNullOrEmpty(resp.getRequestId()));
cleanupService(serviceNamePrefix + i);
}
}
@Test
public void testListFunctions() throws IOException {
final int numServices = 10;
final int limit = 3;
// Create service
createService(SERVICE_NAME);
// Create multiple functions under the test service
for (int i = 0; i < numServices; i++) {
CreateFunctionResponse createFResp = createFunction(FUNCTION_NAME + i);
assertFalse(Strings.isNullOrEmpty(createFResp.getRequestId()));
}
ListFunctionsRequest listRequest = new ListFunctionsRequest(SERVICE_NAME);
listRequest.setLimit(limit);
listRequest.setPrefix(FUNCTION_NAME);
ListFunctionsResponse listResponse = client.listFunctions(listRequest);
int numCalled = 1;
String nextToken = listResponse.getNextToken();
while (nextToken != null) {
listRequest.setNextToken(nextToken);
listResponse = client.listFunctions(listRequest);
nextToken = listResponse.getNextToken();
numCalled++;
}
assertEquals(numServices / limit + 1, numCalled);
}
public void ignoreTestListTriggers() throws IOException {
final int numTriggers = 5;
final int limit = 2;
// Create service
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
// Create multiple trigger under the test function
for (int i = 0; i < numTriggers; i++) {
String prefix = "prefix";
String suffix = "suffix";
CreateTriggerResponse createTResp = createOssTrigger(TRIGGER_NAME + i,
prefix + i, suffix + i);
assertFalse(Strings.isNullOrEmpty(createTResp.getRequestId()));
}
ListTriggersRequest listTReq = new ListTriggersRequest(SERVICE_NAME, FUNCTION_NAME);
listTReq.setLimit(limit);
ListTriggersResponse listTResp = client.listTriggers(listTReq);
int numCalled = 1;
String nextToken = listTResp.getNextToken();
while (nextToken != null) {
listTReq.setNextToken(nextToken);
listTResp = client.listTriggers(listTReq);
nextToken = listTResp.getNextToken();
numCalled++;
}
assertEquals(numTriggers / limit + 1, numCalled);
for (int i = 0; i < numTriggers; i++) {
DeleteTriggerResponse deleteTResp = deleteTrigger(
SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME + i);
assertFalse(Strings.isNullOrEmpty(deleteTResp.getRequestId()));
}
}
@Test
public void testOnDemandConfig() throws Exception {
final int numOnDemandConfigs = 5;
for (int i = 0; i < numOnDemandConfigs; i++) {
// put first
String qualifier = String.format("test-qualifier-%d", i);
PutOnDemandConfigRequest putOnDemandConfigRequest = new PutOnDemandConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME, i);
PutOnDemandConfigResponse putOnDemandConfigResponse = client.putOnDemandConfig(putOnDemandConfigRequest);
// validate put response
assertNotEquals("", putOnDemandConfigResponse.getRequestId());
String resource = putOnDemandConfigResponse.getResource();
assertEquals(String.format("services/%s.%s/functions/%s", SERVICE_NAME, qualifier, FUNCTION_NAME), resource);
assertEquals(i, putOnDemandConfigResponse.getMaximumInstanceCount());
// validate get response
GetOnDemandConfigRequest getOnDemandConfigRequest = new GetOnDemandConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
GetOnDemandConfigResponse getOnDemandConfigResponse = client.getOnDemandConfig(getOnDemandConfigRequest);
assertNotEquals("", getOnDemandConfigResponse.getRequestId());
resource = getOnDemandConfigResponse.getResource();
assertEquals(String.format("services/%s.%s/functions/%s", SERVICE_NAME, qualifier, FUNCTION_NAME), resource);
assertEquals(i, putOnDemandConfigResponse.getMaximumInstanceCount());
}
// validate list response
ListOnDemandConfigsRequest listOnDemandConfigsRequest = new ListOnDemandConfigsRequest();
ListOnDemandConfigsResponse listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertNotEquals("", listOnDemandConfigsResponse.getRequestId());
assertEquals(numOnDemandConfigs, listOnDemandConfigsResponse.getOnDemandConfigs().length);
// with prefix
listOnDemandConfigsRequest = new ListOnDemandConfigsRequest();
listOnDemandConfigsRequest.setPrefix(String.format("services/%s.%s", SERVICE_NAME, "test-qualifier-1"));
listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertNotEquals("", listOnDemandConfigsResponse.getRequestId());
assertEquals(1, listOnDemandConfigsResponse.getOnDemandConfigs().length);
listOnDemandConfigsRequest = new ListOnDemandConfigsRequest();
listOnDemandConfigsRequest.setStartKey(String.format("services/%s.%s", SERVICE_NAME, "test-qualifier-3"));
listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertNotEquals("", listOnDemandConfigsResponse.getRequestId());
assertEquals(2, listOnDemandConfigsResponse.getOnDemandConfigs().length);
// with limit
listOnDemandConfigsRequest = new ListOnDemandConfigsRequest();
listOnDemandConfigsRequest.setLimit(3);
listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertEquals(3, listOnDemandConfigsResponse.getOnDemandConfigs().length);
HashMap<String, OnDemandConfigMetadata> map = new HashMap<String, OnDemandConfigMetadata>();
for (OnDemandConfigMetadata data : listOnDemandConfigsResponse.getOnDemandConfigs()) {
map.put(data.getResource(), data);
}
// list again
listOnDemandConfigsRequest.setNextToken(listOnDemandConfigsResponse.getNextToken());
listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertEquals(2, listOnDemandConfigsResponse.getOnDemandConfigs().length);
for (OnDemandConfigMetadata data : listOnDemandConfigsResponse.getOnDemandConfigs()) {
map.put(data.getResource(), data);
}
// validate there's no dup resource
assertEquals(numOnDemandConfigs, map.keySet().size());
for (int i = 0; i < numOnDemandConfigs; i++) {
// delete configs
String qualifier = String.format("test-qualifier-%d", i);
DeleteOnDemandConfigRequest deleteOnDemandConfigRequest = new DeleteOnDemandConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
DeleteOnDemandConfigResponse deleteOnDemandConfigResponse = client.deleteOnDemandConfig(deleteOnDemandConfigRequest);
assertNotEquals("", deleteOnDemandConfigResponse.getRequestId());
try {
// validate config not found
GetOnDemandConfigRequest getOnDemandConfigRequest = new GetOnDemandConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
GetOnDemandConfigResponse getOnDemandConfigResponse = client.getOnDemandConfig(getOnDemandConfigRequest);
} catch (ClientException e) {
assertEquals("OnDemandConfigNotFound", e.getErrorCode());
}
}
// validate no configs can be listed
listOnDemandConfigsRequest = new ListOnDemandConfigsRequest();
listOnDemandConfigsResponse = client.listOnDemandConfigs(listOnDemandConfigsRequest);
assertNotEquals("", listOnDemandConfigsResponse.getRequestId());
assertEquals(null, listOnDemandConfigsResponse.getOnDemandConfigs());
}
@Test
public void testFunctionAsyncConfig() throws Exception {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
String lastVersion = cleanUpVersions(SERVICE_NAME);
// publish a version
PublishVersionRequest publishVersionRequest = new PublishVersionRequest(SERVICE_NAME);
PublishVersionResponse publishVersionResponse = client
.publishVersion(publishVersionRequest);
assertEquals(String.format("%d", Integer.parseInt(lastVersion) + 1),
publishVersionResponse.getVersionId());
lastVersion = publishVersionResponse.getVersionId();
String destFmt = "acs:mns:%s:%s:/queues/qx/messages";
Destination dest = new Destination();
dest.setDestination(String
.format(destFmt, REGION, ACCOUNT_ID));
AsyncConfig config = new AsyncConfig();
config.setDestinationConfig(new DestinationConfig());
config.destinationConfig.setonFailure(dest);
config.setMaxAsyncEventAgeInSeconds(null);
config.setMaxAsyncRetryAttempts(1);
final int numConfigs = 3;
for (int i = 0; i < numConfigs; i++) {
String qualifier = String.format("test-qualifier-%d", i);
//Create a Alias against it
String aliasName = qualifier;
CreateAliasRequest createAliasRequest = new CreateAliasRequest(SERVICE_NAME, aliasName,
lastVersion);
CreateAliasResponse createAliasResponse = client.createAlias(createAliasRequest);
// put first
PutFunctionAsyncConfigRequest putFunctionAsyncConfigRequest = new PutFunctionAsyncConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
putFunctionAsyncConfigRequest.setAsyncConfig(config);
PutFunctionAsyncConfigResponse pResp = client.putFunctionAsyncConfig(putFunctionAsyncConfigRequest);
// validate put response
assertNotEquals("", pResp.getRequestId());
assertNull(pResp.getAsyncConfig().getMaxAsyncEventAgeInSeconds());
assertEquals(config.getMaxAsyncRetryAttempts(), pResp.getAsyncConfig().getMaxAsyncRetryAttempts());
assertEquals(String.format(destFmt, REGION, ACCOUNT_ID),
pResp.getAsyncConfig().getDestinationConfig().getOnFailure().getDestination());
// validate get response
GetFunctionAsyncConfigRequest getFunctionAsyncConfigRequest = new GetFunctionAsyncConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
GetFunctionAsyncConfigResponse gResp = client.getFunctionAsyncConfig(getFunctionAsyncConfigRequest);
assertNotEquals("", gResp.getRequestId());
assertEquals(String.format(destFmt, REGION, ACCOUNT_ID),
gResp.getAsyncConfig().getDestinationConfig().getOnFailure().getDestination());
}
// validate list response
ListFunctionAsyncConfigsRequest listFunctionAsyncConfigsRequest = new ListFunctionAsyncConfigsRequest(SERVICE_NAME, FUNCTION_NAME);
ListFunctionAsyncConfigsResponse lResp = client.listFunctionAsyncConfigs(listFunctionAsyncConfigsRequest);
assertNotEquals("", lResp.getRequestId());
assertEquals(numConfigs, lResp.getAsyncConfigs().length);
assertEquals("", lResp.getNextToken());
// with limit
listFunctionAsyncConfigsRequest = new ListFunctionAsyncConfigsRequest(SERVICE_NAME, FUNCTION_NAME);
listFunctionAsyncConfigsRequest.setLimit(1);
lResp = client.listFunctionAsyncConfigs(listFunctionAsyncConfigsRequest);
assertEquals(1, lResp.getAsyncConfigs().length);
assertEquals("test-qualifier-1", lResp.getNextToken());
// list again
ListFunctionAsyncConfigsRequest listFunctionAsyncConfigsRequest2 = new ListFunctionAsyncConfigsRequest(SERVICE_NAME, FUNCTION_NAME);
listFunctionAsyncConfigsRequest2.setNextToken(lResp.getNextToken());
lResp = client.listFunctionAsyncConfigs(listFunctionAsyncConfigsRequest2);
assertEquals(2, lResp.getAsyncConfigs().length);
for (int i = 0; i < numConfigs; i++) {
// delete configs
String qualifier = String.format("test-qualifier-%d", i);
DeleteFunctionAsyncConfigRequest deleteFunctionAsyncConfigRequest = new DeleteFunctionAsyncConfigRequest(SERVICE_NAME, qualifier, FUNCTION_NAME);
DeleteFunctionAsyncConfigResponse deleteFunctionAsyncConfigResponse = client.deleteFunctionAsyncConfig(deleteFunctionAsyncConfigRequest);
assertNotEquals("", deleteFunctionAsyncConfigResponse.getRequestId());
}
// validate no configs can be listed
ListFunctionAsyncConfigsRequest listFunctionAsyncConfigsRequest3 = new ListFunctionAsyncConfigsRequest(SERVICE_NAME, FUNCTION_NAME);
lResp = client.listFunctionAsyncConfigs(listFunctionAsyncConfigsRequest3);
assertNotEquals("", lResp.getRequestId());
assertEquals(null, lResp.getAsyncConfigs());
}
@Test
@Ignore
public void testStatefulAsyncInvocation() throws Exception {
long timeNow = System.currentTimeMillis();
String invocationID = String.format("stateful-invocationId-%d-1", timeNow);
String invocationID2 = String.format("stateful-invocationId-%d-2", timeNow);
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
AsyncConfig config = new AsyncConfig();
config.setStatefulInvocation(true);
PutFunctionAsyncConfigRequest putFunctionAsyncConfigRequest = new PutFunctionAsyncConfigRequest(SERVICE_NAME, "", FUNCTION_NAME);
putFunctionAsyncConfigRequest.setAsyncConfig(config);
PutFunctionAsyncConfigResponse pResp = client.putFunctionAsyncConfig(putFunctionAsyncConfigRequest);
// validate put response
assertTrue(pResp.getAsyncConfig().getStatefulInvocation());
// validate get response
GetFunctionAsyncConfigRequest getFunctionAsyncConfigRequest = new GetFunctionAsyncConfigRequest(SERVICE_NAME, "", FUNCTION_NAME);
GetFunctionAsyncConfigResponse gResp = client.getFunctionAsyncConfig(getFunctionAsyncConfigRequest);
assertTrue(gResp.getAsyncConfig().getStatefulInvocation());
// async invocation
// Headers passed in through setHeader should be respected
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setHeader("x-fc-invocation-type", Const.INVOCATION_TYPE_ASYNC);
request.setStatefulAsyncInvocationId(invocationID);
InvokeFunctionResponse ivkResp = client.invokeFunction(request);
request.setStatefulAsyncInvocationId(invocationID2);
client.invokeFunction(request);
// get stateful invocation
GetStatefulAsyncInvocationRequest req = new GetStatefulAsyncInvocationRequest(SERVICE_NAME, "", FUNCTION_NAME, invocationID);
GetStatefulAsyncInvocationResponse resp = client.getStatefulAsyncInvocation(req);
while (!resp.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Failed.toString()) &&
!resp.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Succeeded.toString()) &&
!resp.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Stopped.toString())) {
resp = client.getStatefulAsyncInvocation(req);
assertEquals(invocationID, resp.getStatefulAsyncInvocation().invocationId);
}
assertNotNull(resp.getStatefulAsyncInvocation());
assertEquals(StatefulInvocationStatus.Succeeded.toString(), resp.getStatefulAsyncInvocation().status);
assertEquals(ivkResp.getRequestId(), resp.getStatefulAsyncInvocation().getRequestId());
GetStatefulAsyncInvocationResponse resp2 = client.getStatefulAsyncInvocation(req);
// get stateful invocation 2
while (!resp2.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Failed.toString()) &&
!resp2.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Succeeded.toString()) &&
!resp2.getStatefulAsyncInvocation().status.equals(StatefulInvocationStatus.Stopped.toString())) {
GetStatefulAsyncInvocationRequest req2 = new GetStatefulAsyncInvocationRequest(SERVICE_NAME, "", FUNCTION_NAME, invocationID2);
resp2 = client.getStatefulAsyncInvocation(req2);
assertEquals(invocationID2, resp2.getStatefulAsyncInvocation().invocationId);
}
assertNotNull(resp2.getStatefulAsyncInvocation());
assertEquals(StatefulInvocationStatus.Succeeded.toString(), resp2.getStatefulAsyncInvocation().status);
Thread.sleep(5000);
// list stateful invocation
ListStatefulAsyncInvocationsRequest lReq = new ListStatefulAsyncInvocationsRequest(SERVICE_NAME, FUNCTION_NAME);
lReq.setInvocationIdPrefix(String.format("stateful-invocationId-%d", timeNow));
lReq.setIncludePayload(true);
lReq.setLimit(100);
lReq.setSortOrderByTime(SortOrder.desc);
lReq.setStartedTimeBegin(String.format("%d", timeNow - 5 * 1000));
ListStatefulAsyncInvocationsResponse lResp = client.listStatefulAsyncInvocations(lReq);
assertNotNull(lResp.getStatefulAsyncInvocations());
assertEquals(2, lResp.getStatefulAsyncInvocations().length);
long timeNowx = System.currentTimeMillis();
lReq.setStartedTimeBegin(String.format("%d", timeNowx));
ListStatefulAsyncInvocationsResponse lResp2 = client.listStatefulAsyncInvocations(lReq);
assertNull(lResp2.getStatefulAsyncInvocations());
// stop an unsupported status invocation
StopStatefulAsyncInvocationRequest sReq = new StopStatefulAsyncInvocationRequest(SERVICE_NAME, "", FUNCTION_NAME, invocationID);
StopStatefulAsyncInvocationResponse sResp = new StopStatefulAsyncInvocationResponse();
try {
sResp = client.stopStatefulAsyncInvocation(sReq);
fail("should get InvalidArgument");
} catch (ClientException ex) {
assertEquals("InvalidArgument", ex.getErrorCode());
}
// delete async config for stateful async invocation
DeleteFunctionAsyncConfigRequest deleteFunctionAsyncConfigRequest = new DeleteFunctionAsyncConfigRequest(SERVICE_NAME, "", FUNCTION_NAME);
DeleteFunctionAsyncConfigResponse deleteFunctionAsyncConfigResponse = client.deleteFunctionAsyncConfig(deleteFunctionAsyncConfigRequest);
assertNotEquals("", deleteFunctionAsyncConfigResponse.getRequestId());
}
@Test
public void testOpenFunctionCompute() {
OpenFunctionComputeRequest request = new OpenFunctionComputeRequest();
request.setAction("OpenFCService");
OpenFunctionComputeResponse response = client.openFunctionComputeService(request);
if (response.isSuccess()) {
assertEquals(true, StringUtils.isNotEmpty(response.getOrderId()));
assertEquals(true, StringUtils.isNotEmpty(response.getRequestId()));
} else {
assertEquals(PopClient.ERROR_CODE_ORDER_OPENED, response.getCode());
}
}
@Test
public void testOpenFunctionComputeValidate() {
OpenFunctionComputeRequest request = new OpenFunctionComputeRequest();
request.setAction("OpenFCService222");
try {
client.openFunctionComputeService(request);
} catch (ClientException e) {
assertEquals("action value must be OpenFCService", e.getMessage());
}
request = new OpenFunctionComputeRequest();
request.setAction(null);
try {
client.openFunctionComputeService(request);
} catch (ClientException e) {
assertEquals("action cannot be blank", e.getMessage());
}
}
@Test
public void testCreateServiceValidate() {
try {
CreateServiceRequest request = new CreateServiceRequest();
client.createService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertEquals(ErrorCodes.INVALID_ARGUMENT, e.getErrorCode());
}
try {
CreateServiceRequest request = new CreateServiceRequest();
client.createService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertEquals(ErrorCodes.INVALID_ARGUMENT, e.getErrorCode());
}
}
@Test
public void testCreateFunctionValidate() {
try {
CreateFunctionRequest request = new CreateFunctionRequest(null);
client.createFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
CreateFunctionRequest request = new CreateFunctionRequest("");
client.createFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testCreateTriggerValidate() {
try {
CreateTriggerRequest request = new CreateTriggerRequest(SERVICE_NAME, null);
client.createTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
CreateTriggerRequest request = new CreateTriggerRequest(SERVICE_NAME, "");
client.createTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
CreateTriggerRequest request = new CreateTriggerRequest(null, FUNCTION_NAME);
client.createTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
CreateTriggerRequest request = new CreateTriggerRequest("", FUNCTION_NAME);
client.createTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testGetServiceValidate() {
try {
GetServiceRequest request = new GetServiceRequest(null);
client.getService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetServiceRequest request = new GetServiceRequest("");
client.getService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testGetFunctionValidate() {
try {
GetFunctionRequest request = new GetFunctionRequest(SERVICE_NAME, null);
client.getFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetFunctionRequest request = new GetFunctionRequest(SERVICE_NAME, "");
client.getFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetFunctionRequest request = new GetFunctionRequest(null, FUNCTION_NAME);
client.getFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetFunctionRequest request = new GetFunctionRequest("", FUNCTION_NAME);
client.getFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testGetTriggerValidate() {
try {
GetTriggerRequest request = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME, null);
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetTriggerRequest request = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME, "");
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetTriggerRequest request = new GetTriggerRequest(SERVICE_NAME, null, TRIGGER_NAME);
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetTriggerRequest request = new GetTriggerRequest(SERVICE_NAME, "", TRIGGER_NAME);
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetTriggerRequest request = new GetTriggerRequest(null, FUNCTION_NAME, TRIGGER_NAME);
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
GetTriggerRequest request = new GetTriggerRequest("", FUNCTION_NAME, TRIGGER_NAME);
client.getTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testInvokeFunctionValidate() {
try {
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, null);
client.invokeFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, "");
client.invokeFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
InvokeFunctionRequest request = new InvokeFunctionRequest("", FUNCTION_NAME);
client.invokeFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
InvokeFunctionRequest request = new InvokeFunctionRequest(null, FUNCTION_NAME);
client.invokeFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testListFunctionsValidate() {
try {
ListFunctionsRequest request = new ListFunctionsRequest(null);
client.listFunctions(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
ListFunctionsRequest request = new ListFunctionsRequest("");
client.listFunctions(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testListTriggersValidate() {
try {
listTriggers(null, FUNCTION_NAME);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
listTriggers("", FUNCTION_NAME);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
listTriggers(SERVICE_NAME, null);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
listTriggers(SERVICE_NAME, "");
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testUpdateServiceValidate() {
try {
UpdateServiceRequest request = new UpdateServiceRequest(null);
client.updateService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateServiceRequest request = new UpdateServiceRequest("");
client.updateService(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testUpdateFunctionValidate() {
try {
UpdateFunctionRequest request = new UpdateFunctionRequest(SERVICE_NAME, null);
client.updateFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateFunctionRequest request = new UpdateFunctionRequest(SERVICE_NAME, "");
client.updateFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateFunctionRequest request = new UpdateFunctionRequest(null, FUNCTION_NAME);
client.updateFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateFunctionRequest request = new UpdateFunctionRequest("", FUNCTION_NAME);
client.updateFunction(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testUpdateTriggerValidate() {
try {
UpdateTriggerRequest request = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
null);
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateTriggerRequest request = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
"");
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateTriggerRequest request = new UpdateTriggerRequest(SERVICE_NAME, null,
TRIGGER_NAME);
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateTriggerRequest request = new UpdateTriggerRequest(SERVICE_NAME, "", TRIGGER_NAME);
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateTriggerRequest request = new UpdateTriggerRequest(null, FUNCTION_NAME,
TRIGGER_NAME);
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
try {
UpdateTriggerRequest request = new UpdateTriggerRequest("", FUNCTION_NAME,
TRIGGER_NAME);
client.updateTrigger(request);
fail("ClientException is expected");
} catch (ClientException e) {
assertTrue(e.getMessage().contains(VALIDATE_MSG));
}
}
@Test
public void testCreateFunctionSetDir() throws IOException {
createService(SERVICE_NAME);
// Create a function
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(SERVICE_NAME);
createFuncReq.setFunctionName(FUNCTION_NAME);
createFuncReq.setDescription("Function for test");
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("hello_world.handler");
createFuncReq.setRuntime("nodejs4.4");
// Setup code directory
String tmpDir = "/tmp/fc_test_" + UUID.randomUUID();
String funcFilePath = tmpDir + "/" + "hello_world.js";
new File(tmpDir).mkdir();
PrintWriter out = new PrintWriter(funcFilePath);
out.println(
"'use strict'; module.exports.handler = function(event, context, callback) {console.log('hello world'); callback(null, 'hello world');};");
out.close();
Code code = new Code().setDir(tmpDir);
createFuncReq.setCode(code);
createFuncReq.setTimeout(10);
client.createFunction(createFuncReq);
// Invoke the function
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals("hello world", new String(response.getPayload()));
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
new File(funcFilePath).delete();
new File(tmpDir).delete();
}
@Test
public void testInvokeFunctionLogTypeSyncTail() throws IOException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setLogType("Tail");
InvokeFunctionResponse response = client.invokeFunction(request);
assertNotNull(response.getLogResult());
assertTrue(response.getLogResult().contains("Duration"));
assertTrue(response.getLogResult().contains("Billed Duration"));
assertTrue(response.getLogResult().contains("Memory Size: 128 MB"));
assertEquals("hello world", new String(response.getPayload()));
}
@Test(expected = ClientException.class)
public void testInvokeFunctionTimeout() throws IOException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
// 1 milliseconds timeout
client.getConfig().setReadTimeoutMillis(1);
client.invokeFunction(request);
}
@Test
public void testInvokeFunctionLogTypeSyncNone() throws IOException {
createService(SERVICE_NAME);
String funcName = FUNCTION_NAME + "-logtype";
createFunction(funcName);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, funcName);
request.setLogType("None");
InvokeFunctionResponse response = client.invokeFunction(request);
assertNull(response.getLogResult());
assertEquals("hello world", new String(response.getPayload()));
}
@Test
public void testInvokeFunctionLogTypeSyncInvalid() throws IOException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setLogType("Invalid");
try {
client.invokeFunction(request);
} catch (ClientException e) {
assertEquals("InvalidArgument", e.getErrorCode());
assertEquals(
"LogType is set to an invalid value (allowed: Tail | None, actual: 'Invalid')",
e.getErrorMessage());
return;
}
fail("ClientException is expected");
}
@Test
public void testInvokeFunctionLogTypeAsyncNone() throws IOException, InterruptedException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setInvocationType(Const.INVOCATION_TYPE_ASYNC);
request.setLogType("None");
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals(HttpURLConnection.HTTP_ACCEPTED, response.getStatus());
Thread.sleep(1000);
}
@Test
public void testInvokeFunctionLogTypeAsyncInvalid() throws IOException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setInvocationType(Const.INVOCATION_TYPE_ASYNC);
// Anything other than None for async invoke is invalid
request.setLogType("Tail");
try {
client.invokeFunction(request);
} catch (ClientException e) {
assertEquals("InvalidArgument", e.getErrorCode());
assertEquals("LogType is set to an invalid value (allowed: None, actual: 'Tail')",
e.getErrorMessage());
return;
}
fail("ClientException is expected");
}
private void createFunction(String serviceName, String functionName, String handler,
String runtime, byte[] data) {
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(serviceName);
Code code = new Code().setZipFile(data);
createFuncReq.setFunctionName(functionName);
createFuncReq.setDescription("test");
createFuncReq.setHandler(handler);
createFuncReq.setMemorySize(128);
createFuncReq.setRuntime(runtime);
createFuncReq.setCode(code);
createFuncReq.setTimeout(10);
client.createFunction(createFuncReq);
}
@Test
public void testHttpInvokeFunction() throws IOException, InterruptedException, Exception {
createService(SERVICE_NAME);
// Create a function
String source = generatePythonHttpCode();
byte[] data = Util.createZipByteData("main.py", source);
// create function
createFunction(SERVICE_NAME, FUNCTION_NAME, "main.echo_handler", "python2.7", data);
for (HttpAuthType auth : new HttpAuthType[]{ANONYMOUS, FUNCTION}) {
// create http trigger
createHttpTrigger(TRIGGER_NAME.concat(auth.toString()), auth,
new HttpMethod[]{GET, POST});
// sleep sometime so that the function cache in the API server will
// be updated (default is 10 seconds)
Thread.sleep(15000);
{
// Invoke the function
HttpInvokeFunctionRequest request = new HttpInvokeFunctionRequest(SERVICE_NAME,
FUNCTION_NAME, auth, POST, "/test/path/中文");
request.addQuery("a", "1");
request.addQuery("aaa", null);
request.setHeader("Test-Header-Key", "testHeaderValue");
request.setHeader("Content-Type", "application/json");
request.setPayload(new String("data").getBytes());
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals(200, response.getStatus());
assertTrue(response.getHeader("Content-Type").startsWith("application/json"));
assertEquals("testHeaderValue", response.getHeader("Test-Header-Key"));
JsonObject jsonObject = gson
.fromJson(new String(response.getPayload()), JsonObject.class);
assertEquals("/test/path/中文", jsonObject.get("path").getAsString());
assertEquals("aaa=&a=1", jsonObject.get("queries").getAsString());
assertEquals("data", jsonObject.get("body").getAsString());
}
if (auth == FUNCTION) {
Date expires = new Date();
expires.setTime(expires.getTime() + 5000);
SignURLConfig input = new SignURLConfig(POST, SERVICE_NAME, FUNCTION_NAME, expires);
input.setQualifier("LATEST");
input.setEscapedPath("/test/path/" + AcsURLEncoder.percentEncode("中文"));
// with header
HashMap<String, String> header = new HashMap<String, String>();
header.put("Test-Header-Key", "testHeaderValue");
header.put("Content-Type", "application/json");
input.setHeader(header);
HashMap<String, String[]> queries = new HashMap<String, String[]>();
queries.put("a", new String[]{"2", "3", "1"});
queries.put("aaa", null);
input.setQueries(queries);
String urlLink = client.SignURL(input);
URL url = new URL(urlLink);
HttpURLConnection httpConn = (HttpURLConnection) url.openConnection();
httpConn.setRequestMethod("POST");
for (String k : header.keySet()) {
httpConn.setRequestProperty(k, header.get(k));
}
httpConn.setConnectTimeout(60 * 1000);
httpConn.setReadTimeout(120 * 1000);
httpConn.connect();
assertEquals(200, httpConn.getResponseCode());
Map<String, List<String>> respHeaders = httpConn.getHeaderFields();
assertTrue(respHeaders.get("Content-Type").contains("application/json"));
assertTrue(respHeaders.get("Test-Header-Key").contains("testHeaderValue"));
// expires
{
Thread.sleep(7000);
URL urlExpires = new URL(urlLink);
HttpURLConnection httpConnExpires = (HttpURLConnection) urlExpires.openConnection();
httpConnExpires.setRequestMethod("POST");
for (String k : header.keySet()) {
httpConnExpires.setRequestProperty(k, header.get(k));
}
httpConnExpires.setConnectTimeout(60 * 1000);
httpConnExpires.setReadTimeout(120 * 1000);
httpConnExpires.connect();
assertEquals(403, httpConnExpires.getResponseCode());
}
}
// delete trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME.concat(auth.toString()));
}
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
}
private String generatePythonHttpCode() {
return "import json\n" +
"from cgi import parse_qs, escape\n" +
"\n" +
"def echo_handler(environ, start_response):\n" +
" \n" +
" resp_body_map = {\n" +
" \"headers\": {},\n" +
" \"queries\": environ.get('QUERY_STRING',''),\n" +
" \"body\": environ[\"wsgi.input\"].read(int(environ.get('CONTENT_LENGTH', 0))),\n"
+
" \"path\": environ[\"PATH_INFO\"],\n" +
" \"request_uri\": environ['fc.request_uri']\n" +
" }\n" +
" \n" +
" for k, v in environ.items():\n" +
" if k.startswith(\"HTTP_\"):\n" +
" resp_body_map[\"headers\"][k[5:]] = v\n" +
" \n" +
" body = json.dumps(resp_body_map)\n" +
" \n" +
" # do something here\n" +
" status = '200 OK'\n" +
" response_headers = [('Content-type', 'application/json'),('Test-Header-Key', environ['HTTP_TEST_HEADER_KEY'])]\n"
+
" start_response(status, response_headers)\n" +
" return [body]";
}
@Test
public void testHttpInvokeFunctionWithoutQueriesAndBody() throws IOException {
createService(SERVICE_NAME);
// Create a function
String source = generatePythonHttpCode();
byte[] data = Util.createZipByteData("main.py", source);
// create function
createFunction(SERVICE_NAME, FUNCTION_NAME, "main.echo_handler", "python2.7", data);
for (HttpAuthType auth : new HttpAuthType[]{ANONYMOUS, FUNCTION}) {
// create http trigger
createHttpTrigger(TRIGGER_NAME, auth, new HttpMethod[]{GET, POST, PUT, HEAD, DELETE, PATCH});
// Invoke the function
HttpInvokeFunctionRequest request = new HttpInvokeFunctionRequest(SERVICE_NAME,
FUNCTION_NAME, auth, POST, "/test/path");
request.setHeader("Test-Header-Key", "testHeaderValue");
request.setHeader("Content-Type", "application/json");
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals(200, response.getStatus());
assertTrue(response.getHeader("Content-Type").startsWith("application/json"));
assertEquals("testHeaderValue", response.getHeader("Test-Header-Key"));
JsonObject jsonObject = gson
.fromJson(new String(response.getPayload()), JsonObject.class);
assertEquals("/test/path", jsonObject.get("path").getAsString());
assertEquals("", jsonObject.get("body").getAsString());
// delete trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME);
}
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
}
@Test
public void testCreateFunctionSetZipFile() throws IOException {
createService(SERVICE_NAME);
String source = "'use strict'; module.exports.handler = function(event, context, callback) {console.log('hello world'); callback(null, 'hello world');};";
byte[] data = Util.createZipByteData("hello_world.js", source);
// Create a function
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(SERVICE_NAME);
createFuncReq.setFunctionName(FUNCTION_NAME);
createFuncReq.setDescription("Function for test");
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("hello_world.handler");
createFuncReq.setRuntime("nodejs4.4");
Code code = new Code().setZipFile(data);
createFuncReq.setCode(code);
createFuncReq.setTimeout(10);
client.createFunction(createFuncReq);
// Invoke the function
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals("hello world", new String(response.getPayload()));
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
}
@Test
public void testFunctionInstanceConcurrency() throws IOException {
createService(SERVICE_NAME);
String functionName = "testInstanceConcurrency";
String source = "exports.handler = function(event, context, callback) {\n" +
" callback(null, 'hello world');\n" +
"};";
byte[] code = Util.createZipByteData("hello_world.js", source);
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(SERVICE_NAME);
createFuncReq.setFunctionName(functionName);
createFuncReq.setDescription(FUNCTION_DESC_OLD);
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("hello_world.handler");
createFuncReq.setRuntime("nodejs4.4");
Map<String, String> environmentVariables = new HashMap<String, String>();
environmentVariables.put("testKey", "testValue");
createFuncReq.setEnvironmentVariables(environmentVariables);
createFuncReq.setCode(new Code().setZipFile(code));
createFuncReq.setTimeout(10);
createFuncReq.setInstanceConcurrency(10);
CreateFunctionResponse response = client.createFunction(createFuncReq);
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
assertFalse(Strings.isNullOrEmpty(response.getFunctionId()));
assertEquals(functionName, response.getFunctionName());
assertEquals(FUNCTION_DESC_OLD, response.getDescription());
assertEquals(10, response.getInstanceConcurrency().intValue());
GetFunctionRequest getReq = new GetFunctionRequest(SERVICE_NAME, functionName);
GetFunctionResponse getResp = client.getFunction(getReq);
assertEquals(10, getResp.getInstanceConcurrency().intValue());
UpdateFunctionRequest updateReq = new UpdateFunctionRequest(SERVICE_NAME, functionName);
updateReq.setInstanceConcurrency(20);
UpdateFunctionResponse updateResp = client.updateFunction(updateReq);
assertEquals(20, updateResp.getInstanceConcurrency().intValue());
getResp = client.getFunction(getReq);
assertEquals(20, getResp.getInstanceConcurrency().intValue());
}
@Test
public void testInvokeFunctionSetHeader() throws IOException, InterruptedException {
createService(SERVICE_NAME);
createFunction(FUNCTION_NAME);
// Headers passed in through setHeader should be respected
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
request.setHeader("x-fc-invocation-type", Const.INVOCATION_TYPE_ASYNC);
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals(HttpURLConnection.HTTP_ACCEPTED, response.getStatus());
Thread.sleep(5000);
}
private Credentials getAssumeRoleCredentials(String policy)
throws com.aliyuncs.exceptions.ClientException {
IClientProfile profile = DefaultProfile
.getProfile(REGION, ACCESS_KEY, SECRET_KEY);
//DefaultProfile.addEndpoint("sts.us-west-1.aliyuncs.com", "us-west-1", "Sts", "sts.us-west-1.aliyuncs.com");
DefaultAcsClient client = new DefaultAcsClient(profile);
AssumeRoleRequest request = new AssumeRoleRequest();
request.setVersion(STS_API_VERSION);
request.setMethod(MethodType.POST);
request.setProtocol(ProtocolType.HTTPS);
request.setRoleArn(STS_ROLE);
request.setRoleSessionName("test-session");
if (policy != null) {
request.setPolicy(policy);
}
AssumeRoleResponse stsResponse;
try {
stsResponse = client.getAcsResponse(request);
} catch (com.aliyuncs.exceptions.ClientException e) {
throw new RuntimeException(e);
}
String accessKey = stsResponse.getCredentials().getAccessKeyId();
String secretKey = stsResponse.getCredentials().getAccessKeySecret();
String stsToken = stsResponse.getCredentials().getSecurityToken();
assertNotNull(accessKey);
assertNotNull(secretKey);
assertNotNull(stsToken);
return stsResponse.getCredentials();
}
private void testCRUDHelper(boolean testTrigger)
throws ParseException, InterruptedException, IOException {
// Create Service
createService(SERVICE_NAME);
GetServiceResponse svcOldResp = client.getService(new GetServiceRequest(SERVICE_NAME));
// Update Service
UpdateServiceRequest updateSReq = new UpdateServiceRequest(SERVICE_NAME);
updateSReq.setDescription(SERVICE_DESC_NEW);
Thread.sleep(1000L);
UpdateServiceResponse updateSResp = client.updateService(updateSReq);
verifyUpdate(svcOldResp.getServiceName(), updateSResp.getServiceName(),
svcOldResp.getServiceId(), updateSResp.getServiceId(),
svcOldResp.getLastModifiedTime(), updateSResp.getLastModifiedTime(),
svcOldResp.getCreatedTime(), updateSResp.getCreatedTime(),
svcOldResp.getDescription(), updateSResp.getDescription());
// Get Service
GetServiceRequest getSReq = new GetServiceRequest(SERVICE_NAME);
GetServiceResponse getSResp = client.getService(getSReq);
assertEquals(SERVICE_NAME, getSResp.getServiceName());
assertEquals(svcOldResp.getServiceId(), getSResp.getServiceId());
assertEquals(ROLE, getSResp.getRole());
// Create Function
CreateFunctionResponse createFResp = createFunction(FUNCTION_NAME);
assertFalse(Strings.isNullOrEmpty(createFResp.getRequestId()));
assertFalse(Strings.isNullOrEmpty(createFResp.getFunctionId()));
Map<String, String> environmentVariables = createFResp.getEnvironmentVariables();
assertEquals(1, environmentVariables.size());
assertEquals("testValue", environmentVariables.get("testKey"));
assertEquals(FUNCTION_NAME, createFResp.getFunctionName());
assertEquals(FUNCTION_DESC_OLD, createFResp.getDescription());
// List Functions
ListFunctionsRequest listFReq = new ListFunctionsRequest(SERVICE_NAME);
ListFunctionsResponse listFResp = client.listFunctions(listFReq);
assertFalse(Strings.isNullOrEmpty(listFResp.getRequestId()));
assertEquals(1, listFResp.getFunctions().length);
FunctionMetadata funcOld = listFResp.getFunctions()[0];
assertEquals(FUNCTION_NAME, funcOld.getFunctionName());
// Update Function
UpdateFunctionRequest updateFReq = new UpdateFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
updateFReq.setDescription(FUNCTION_DESC_NEW);
GetFunctionRequest getFReq = new GetFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
GetFunctionResponse getFResp = client.getFunction(getFReq);
Map<String, String> envOriginal = getFResp.getEnvironmentVariables();
envOriginal.put("testKey", "testValueNew");
updateFReq.setEnvironmentVariables(envOriginal);
Thread.sleep(1000L);
UpdateFunctionResponse updateFResp = client.updateFunction(updateFReq);
listFResp = client.listFunctions(listFReq);
Assert.assertEquals("testValueNew", updateFResp.getEnvironmentVariables().get("testKey"));
assertFalse(Strings.isNullOrEmpty(listFResp.getRequestId()));
assertEquals(1, listFResp.getFunctions().length);
FunctionMetadata funcNew = listFResp.getFunctions()[0];
verifyUpdate(funcOld.getFunctionName(), funcNew.getFunctionName(),
funcOld.getFunctionId(), funcNew.getFunctionId(),
funcOld.getLastModifiedTime(), funcNew.getLastModifiedTime(),
funcOld.getCreatedTime(), funcNew.getCreatedTime(),
funcOld.getDescription(), funcNew.getDescription());
// Get Function
getFReq = new GetFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
getFResp = client.getFunction(getFReq);
Map<String, String> envGet = getFResp.getEnvironmentVariables();
assertEquals(1, envGet.size());
assertEquals("testValueNew", envGet.get("testKey"));
assertFalse(Strings.isNullOrEmpty(getFResp.getRequestId()));
assertEquals(FUNCTION_NAME, getFResp.getFunctionName());
// Get Function Code
GetFunctionCodeRequest getFCReq = new GetFunctionCodeRequest(SERVICE_NAME, FUNCTION_NAME);
GetFunctionCodeResponse getFCResp = client.getFunctionCode(getFCReq);
assertFalse(Strings.isNullOrEmpty(getFResp.getRequestId()));
String crc64 = fetchFromURL(getFCResp.getCodeUrl());
assertEquals(crc64, getFCResp.getCodeChecksum());
// Invoke Function
InvokeFunctionRequest invkReq = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
InvokeFunctionResponse invkResp = client.invokeFunction(invkReq);
assertTrue(!Strings.isNullOrEmpty(invkResp.getRequestId()));
assertEquals("hello world", new String(invkResp.getContent()));
// Invoke Function Async
invkReq.setInvocationType(Const.INVOCATION_TYPE_ASYNC);
invkResp = client.invokeFunction(invkReq);
assertEquals(HttpURLConnection.HTTP_ACCEPTED, invkResp.getStatus());
if (testTrigger) {
// Create Trigger
String tfPrefix = "prefix";
String tfSuffix = "suffix";
createOssTrigger(TRIGGER_NAME, tfPrefix, tfSuffix);
// List Triggers
TriggerMetadata[] triggers = listTriggers(SERVICE_NAME, FUNCTION_NAME);
assertEquals(1, triggers.length);
TriggerMetadata triggerOld = triggers[0];
assertEquals(TRIGGER_NAME, triggerOld.getTriggerName());
// Update Trigger
String newInvocationRole = INVOCATION_ROLE + "_new";
String tfPrefixNew = "prefix_new";
String tfSuffixNew = "suffix_new";
String[] eventsNew = new String[]{"oss:ObjectCreated:PutObject"};
OSSTriggerConfig updateTriggerConfig = new OSSTriggerConfig(
new String[]{"oss:ObjectCreated:PutObject"}, tfPrefixNew, tfSuffixNew);
UpdateTriggerRequest updateTReq = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
TRIGGER_NAME);
updateTReq.setInvocationRole(newInvocationRole);
updateTReq.setTriggerConfig(updateTriggerConfig);
UpdateTriggerResponse updateTResp = updateTrigger(updateTReq);
assertEquals(triggerOld.getTriggerName(), updateTResp.getTriggerName());
assertNotEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
assertEquals(triggerOld.getSourceArn(), updateTResp.getSourceArn());
Gson gson = new Gson();
OSSTriggerConfig tcOld = gson
.fromJson(gson.toJson(triggerOld.getTriggerConfig()), OSSTriggerConfig.class);
OSSTriggerConfig tcNew = gson
.fromJson(gson.toJson(updateTResp.getTriggerConfig()), OSSTriggerConfig.class);
assertFalse(deepEquals(tcOld.getEvents(), tcNew.getEvents()));
assertNotEquals(tcOld.getFilter().getKey().getPrefix(),
tcNew.getFilter().getKey().getPrefix());
assertNotEquals(tcOld.getFilter().getKey().getSuffix(),
tcNew.getFilter().getKey().getSuffix());
assertEquals(triggerOld.getCreatedTime(), updateTResp.getCreatedTime());
assertEquals(triggerOld.getTriggerType(), updateTResp.getTriggerType());
assertNotEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
Date dateOld = DATE_FORMAT.parse(triggerOld.getLastModifiedTime());
Date dateNew = DATE_FORMAT.parse(updateTResp.getLastModifiedTime());
assertTrue(dateOld.before(dateNew));
// Get Trigger
GetTriggerRequest getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
TRIGGER_NAME);
GetTriggerResponse getTResp = client.getTrigger(getTReq);
OSSTriggerConfig getTConfig = gson
.fromJson(gson.toJson(getTResp.getTriggerConfig()), OSSTriggerConfig.class);
assertFalse(Strings.isNullOrEmpty(getTResp.getRequestId()));
assertEquals(TRIGGER_NAME, getTResp.getTriggerName());
assertEquals(OSS_SOURCE_ARN, getTResp.getSourceARN());
assertEquals(TRIGGER_TYPE_OSS, getTResp.getTriggerType());
assertEquals(newInvocationRole, getTResp.getInvocationRole());
assertEquals(tfPrefixNew, getTConfig.getFilter().getKey().getPrefix());
assertEquals(tfSuffixNew, getTConfig.getFilter().getKey().getSuffix());
assertTrue(deepEquals(eventsNew, getTConfig.getEvents()));
// Delete Trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME);
}
testLogTrigger();
testTimeTrigger();
testCdnEventsTrigger();
// Delete Function
DeleteFunctionRequest deleteFReq = new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
int numFunctionsOld = listFResp.getFunctions().length;
DeleteFunctionResponse deleteFResp = client.deleteFunction(deleteFReq);
assertFalse(Strings.isNullOrEmpty(deleteFResp.getRequestId()));
listFResp = client.listFunctions(listFReq);
try {
getFunction(FUNCTION_NAME, listFResp.getFunctions());
fail("Function " + FUNCTION_NAME + " failed to be deleted");
} catch (RuntimeException e) {
int numFunctionsNew = (listFResp.getFunctions() == null) ? 0 :
listFResp.getFunctions().length;
assertEquals(numFunctionsOld, numFunctionsNew + 1);
}
GetFunctionResponse getFResp2 = null;
try {
getFResp2 = client.getFunction(getFReq);
fail(
"Get Function " + FUNCTION_NAME + " should have no function returned after delete");
} catch (ClientException e) {
assertNull(getFResp2);
}
// Delete Service
DeleteServiceRequest deleteSReq = new DeleteServiceRequest(SERVICE_NAME);
DeleteServiceResponse deleteSResp = client.deleteService(deleteSReq);
assertFalse(Strings.isNullOrEmpty(deleteSResp.getRequestId()));
GetServiceResponse getSResp2 = null;
try {
getSResp2 = client.getService(getSReq);
fail("Get service " + FUNCTION_NAME + " should have no service returned after delete");
} catch (ClientException e) {
assertNull(getSResp2);
}
}
private void testCdnEventsTrigger() throws ParseException, InterruptedException {
String triggerName = TRIGGER_TYPE_CDN + "_" + TRIGGER_NAME;
String EVENT_NAME = "LogFileCreated";
String EVENT_VERSION = "1.0.0";
String NOTES = "notes";
String NEW_NOTES = "updateNotes";
CdnEventsTriggerConfig config = new CdnEventsTriggerConfig();
config.setEventName(EVENT_NAME);
config.setEventVersion(EVENT_VERSION);
config.setNotes(NOTES);
Map<String, List<String>> filters = new HashMap<String, List<String>>();
filters.put("domain", asList("www.taobao.com"));
filters.put("stream", asList("def"));
config.setFilter(filters);
CreateTriggerRequest createTReq = new CreateTriggerRequest(SERVICE_NAME, FUNCTION_NAME);
createTReq.setTriggerName(triggerName);
createTReq.setTriggerType(TRIGGER_TYPE_CDN);
createTReq.setInvocationRole(INVOCATION_ROLE);
createTReq.setSourceArn(CDN_SOURCE_ARN);
createTReq.setTriggerConfig(config);
client.createTrigger(createTReq);
// List Triggers
TriggerMetadata[] triggers = listTriggers(SERVICE_NAME, FUNCTION_NAME);
assertEquals(1, triggers.length);
TriggerMetadata triggerOld = triggers[0];
assertEquals(triggerName, triggerOld.getTriggerName());
Thread.sleep(300);
Map<String, List<String>> newFilters = new HashMap<String, List<String>>();
newFilters.put("a", asList("b"));
CdnEventsTriggerConfig updateConfig = new CdnEventsTriggerConfig();
updateConfig.setNotes(NEW_NOTES);
updateConfig.setFilter(newFilters);
UpdateTriggerRequest req = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
triggerName);
req.setInvocationRole(INVOCATION_ROLE);
req.setTriggerConfig(updateConfig);
UpdateTriggerResponse updateTResp = client.updateTrigger(req);
assertEquals(triggerOld.getTriggerName(), updateTResp.getTriggerName());
assertEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
assertEquals(triggerOld.getSourceArn(), updateTResp.getSourceArn());
Gson gson = new Gson();
CdnEventsTriggerConfig tcOld = gson
.fromJson(gson.toJson(triggerOld.getTriggerConfig()), CdnEventsTriggerConfig.class);
CdnEventsTriggerConfig tcNew = gson
.fromJson(gson.toJson(updateTResp.getTriggerConfig()), CdnEventsTriggerConfig.class);
assertEquals(triggerOld.getCreatedTime(), updateTResp.getCreatedTime());
assertEquals(triggerOld.getTriggerType(), updateTResp.getTriggerType());
assertEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
assertEquals(EVENT_NAME, tcNew.getEventName());
assertEquals(tcOld.getEventVersion(), tcNew.getEventVersion());
assertEquals(NEW_NOTES, tcNew.getNotes());
assertNotEquals(tcOld.getFilter(), tcNew.getFilter());
Date dateOld = DATE_FORMAT.parse(triggerOld.getLastModifiedTime());
Date dateNew = DATE_FORMAT.parse(updateTResp.getLastModifiedTime());
assertTrue(dateOld.before(dateNew));
// Get Trigger
GetTriggerRequest getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
triggerName);
GetTriggerResponse getTResp = client.getTrigger(getTReq);
config = gson
.fromJson(gson.toJson(getTResp.getTriggerConfig()), CdnEventsTriggerConfig.class);
assertFalse(Strings.isNullOrEmpty(getTResp.getRequestId()));
assertEquals(triggerName, getTResp.getTriggerName());
assertEquals(CDN_SOURCE_ARN, getTResp.getSourceARN());
assertEquals(TRIGGER_TYPE_CDN, getTResp.getTriggerType());
assertEquals(EVENT_NAME, config.getEventName());
assertEquals(EVENT_VERSION, config.getEventVersion());
// Delete Trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, triggerName);
}
private CreateTriggerResponse createLogTrigger(String triggerName,
LogTriggerConfig triggerConfig) {
CreateTriggerRequest createTReq = new CreateTriggerRequest(SERVICE_NAME, FUNCTION_NAME);
createTReq.setTriggerName(triggerName);
createTReq.setTriggerType(TRIGGER_TYPE_LOG);
createTReq.setInvocationRole(INVOCATION_ROLE);
createTReq.setSourceArn(LOG_SOURCE_ARN);
createTReq.setTriggerConfig(triggerConfig);
return client.createTrigger(createTReq);
}
private void testLogTrigger() throws ParseException {
String triggerName = TRIGGER_TYPE_LOG + "_" + TRIGGER_NAME;
LogTriggerConfig triggerConfig = new LogTriggerConfig()
.setSourceConfig(new LogTriggerConfig.SourceConfig(LOG_STORE)).
setJobConfig(
new LogTriggerConfig.JobConfig().setMaxRetryTime(3).setTriggerInterval(60)).
setLogConfig(new LogTriggerConfig.LogConfig("", "")).
setFunctionParameter(new HashMap<String, Object>()).setEnable(true);
createLogTrigger(triggerName, triggerConfig);
// List Triggers
TriggerMetadata[] triggers = listTriggers(SERVICE_NAME, FUNCTION_NAME);
assertEquals(1, triggers.length);
TriggerMetadata triggerOld = triggers[0];
assertEquals(triggerName, triggerOld.getTriggerName());
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
UpdateTriggerRequest req = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
triggerName);
req.setInvocationRole(INVOCATION_ROLE);
req.setTriggerConfig(
new LogTriggerConfig().
setJobConfig(
new LogTriggerConfig.JobConfig().setMaxRetryTime(5).setTriggerInterval(120)));
UpdateTriggerResponse updateTResp = client.updateTrigger(req);
assertEquals(triggerOld.getTriggerName(), updateTResp.getTriggerName());
assertEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
assertEquals(triggerOld.getSourceArn(), updateTResp.getSourceArn());
Gson gson = new Gson();
LogTriggerConfig tcOld = gson
.fromJson(gson.toJson(triggerOld.getTriggerConfig()), LogTriggerConfig.class);
LogTriggerConfig tcNew = gson
.fromJson(gson.toJson(updateTResp.getTriggerConfig()), LogTriggerConfig.class);
assertEquals(triggerOld.getCreatedTime(), updateTResp.getCreatedTime());
assertEquals(triggerOld.getTriggerType(), updateTResp.getTriggerType());
assertEquals(triggerOld.getInvocationRole(), updateTResp.getInvocationRole());
assertEquals(tcOld.getSourceConfig(), tcNew.getSourceConfig());
assertEquals(tcOld.getLogConfig(), tcNew.getLogConfig());
assertEquals(tcOld.isEnable(), tcNew.isEnable());
assertNotEquals(tcOld.getJobConfig(), tcNew.getJobConfig());
Date dateOld = DATE_FORMAT.parse(triggerOld.getLastModifiedTime());
Date dateNew = DATE_FORMAT.parse(updateTResp.getLastModifiedTime());
assertTrue(dateOld.before(dateNew));
// Get Trigger
GetTriggerRequest getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
triggerName);
GetTriggerResponse getTResp = client.getTrigger(getTReq);
LogTriggerConfig getTConfig = gson
.fromJson(gson.toJson(getTResp.getTriggerConfig()), LogTriggerConfig.class);
assertFalse(Strings.isNullOrEmpty(getTResp.getRequestId()));
assertEquals(triggerName, getTResp.getTriggerName());
assertEquals(LOG_SOURCE_ARN, getTResp.getSourceARN());
assertEquals(TRIGGER_TYPE_LOG, getTResp.getTriggerType());
assertEquals(5, getTConfig.getJobConfig().getMaxRetryTime().intValue());
assertEquals(120, getTConfig.getJobConfig().getTriggerInterval().intValue());
// Delete Trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, triggerName);
}
private CreateTriggerResponse createTimeTrigger(String triggerName,
TimeTriggerConfig timeTriggerConfig) {
CreateTriggerRequest createTReq = new CreateTriggerRequest(SERVICE_NAME, FUNCTION_NAME);
createTReq.setTriggerName(triggerName);
createTReq.setTriggerType(TRIGGER_TYPE_TIMER);
createTReq.setTriggerConfig(timeTriggerConfig);
return client.createTrigger(createTReq);
}
@Test
public void testInvokeFunctionWithInitializer()
throws ParseException, InterruptedException, IOException {
createService(SERVICE_NAME);
String functionName = "testInitializer";
String source = "'use strict';\n" +
"var counter = 0;\n" +
"exports.initializer = function(ctx, callback) {\n" +
"++counter;\n" +
"callback(null, '');};\n" +
"exports.handler = function(event, context, callback) {\n" +
"console.log('hello world, counter is %d', counter);\n" +
"callback(null, String(counter));};\n";
byte[] data = Util.createZipByteData("counter.js", source);
// Create a function
CreateFunctionRequest createFuncReq = new CreateFunctionRequest(SERVICE_NAME);
createFuncReq.setFunctionName(functionName);
createFuncReq.setDescription("Function for initializer test");
createFuncReq.setMemorySize(128);
createFuncReq.setHandler("counter.handler");
createFuncReq.setInitializer("counter.initializer");
createFuncReq.setRuntime("nodejs4.4");
Code code = new Code().setZipFile(data);
createFuncReq.setCode(code);
createFuncReq.setTimeout(10);
client.createFunction(createFuncReq);
// Update Function
ListFunctionsRequest listFReq = new ListFunctionsRequest(SERVICE_NAME);
ListFunctionsResponse listFResp = client.listFunctions(listFReq);
FunctionMetadata funcOld = listFResp.getFunctions()[0];
UpdateFunctionRequest updateFReq = new UpdateFunctionRequest(SERVICE_NAME, functionName);
updateFReq.setDescription(FUNCTION_DESC_NEW);
GetFunctionRequest getFReq = new GetFunctionRequest(SERVICE_NAME, functionName);
GetFunctionResponse getFResp = client.getFunction(getFReq);
Map<String, String> envOriginal = getFResp.getEnvironmentVariables();
envOriginal.put("testKey", "testValueNew");
updateFReq.setEnvironmentVariables(envOriginal);
Thread.sleep(1000L);
UpdateFunctionResponse updateFResp = client.updateFunction(updateFReq);
listFResp = client.listFunctions(listFReq);
Assert.assertEquals("testValueNew", updateFResp.getEnvironmentVariables().get("testKey"));
assertFalse(Strings.isNullOrEmpty(listFResp.getRequestId()));
assertEquals(1, listFResp.getFunctions().length);
FunctionMetadata funcNew = listFResp.getFunctions()[0];
verifyUpdate(funcOld.getFunctionName(), funcNew.getFunctionName(),
funcOld.getFunctionId(), funcNew.getFunctionId(),
funcOld.getLastModifiedTime(), funcNew.getLastModifiedTime(),
funcOld.getCreatedTime(), funcNew.getCreatedTime(),
funcOld.getDescription(), funcNew.getDescription());
// Invoke the function
InvokeFunctionRequest request = new InvokeFunctionRequest(SERVICE_NAME, functionName);
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals("1", new String(response.getPayload()));
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, functionName));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
}
private void testTimeTrigger() throws ParseException {
String cronEvery = "@every 5m";
String cronExpression = "0 2 * * * *";
String payload = "awesome-fc";
String triggerName = TRIGGER_TYPE_TIMER + "_" + TRIGGER_NAME;
Gson gson = new Gson();
// Create Trigger
TimeTriggerConfig timeTriggerConfig = new TimeTriggerConfig(cronEvery, payload, true);
CreateTriggerResponse createTriggerResponse = createTimeTrigger(triggerName,
timeTriggerConfig);
assertEquals(triggerName, createTriggerResponse.getTriggerName());
assertEquals(TRIGGER_TYPE_TIMER, createTriggerResponse.getTriggerType());
String createTime = createTriggerResponse.getCreatedTime();
String lastModifiedTime = createTriggerResponse.getLastModifiedTime();
TimeTriggerConfig tRConfig = gson
.fromJson(gson.toJson(createTriggerResponse.getTriggerConfig()),
TimeTriggerConfig.class);
assertEquals(timeTriggerConfig.getCronExpression(), tRConfig.getCronExpression());
assertEquals(timeTriggerConfig.getPayload(), tRConfig.getPayload());
assertEquals(timeTriggerConfig.isEnable(), tRConfig.isEnable());
// Get Trigger
GetTriggerRequest getTReq = new GetTriggerRequest(SERVICE_NAME, FUNCTION_NAME, triggerName);
GetTriggerResponse getTResp = client.getTrigger(getTReq);
TimeTriggerConfig getTConfig = gson
.fromJson(gson.toJson(getTResp.getTriggerConfig()), TimeTriggerConfig.class);
assertFalse(Strings.isNullOrEmpty(getTResp.getRequestId()));
assertEquals(triggerName, getTResp.getTriggerName());
assertEquals(TRIGGER_TYPE_TIMER, getTResp.getTriggerType());
assertEquals(timeTriggerConfig.getCronExpression(), getTConfig.getCronExpression());
assertEquals(timeTriggerConfig.getPayload(), getTConfig.getPayload());
assertEquals(timeTriggerConfig.isEnable(), getTConfig.isEnable());
assertEquals(createTime, getTResp.getCreatedTime());
assertEquals(lastModifiedTime, getTResp.getLastModifiedTime());
// List Triggers
TriggerMetadata[] triggers = listTriggers(SERVICE_NAME, FUNCTION_NAME);
assertEquals(1, triggers.length);
TriggerMetadata triggerOld = triggers[0];
assertEquals(triggerName, triggerOld.getTriggerName());
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
// Update Triggers
UpdateTriggerRequest req = new UpdateTriggerRequest(SERVICE_NAME, FUNCTION_NAME,
triggerName);
req.setTriggerConfig(
new TimeTriggerConfig().setCronExpression(cronExpression).setPayload(payload)
.setEnable(true));
UpdateTriggerResponse updateTResp = client.updateTrigger(req);
assertEquals(triggerOld.getTriggerName(), updateTResp.getTriggerName());
TimeTriggerConfig tcOld = gson
.fromJson(gson.toJson(triggerOld.getTriggerConfig()), TimeTriggerConfig.class);
TimeTriggerConfig tcNew = gson
.fromJson(gson.toJson(updateTResp.getTriggerConfig()), TimeTriggerConfig.class);
Date dateOld = DATE_FORMAT.parse(triggerOld.getLastModifiedTime());
Date dateNew = DATE_FORMAT.parse(updateTResp.getLastModifiedTime());
assertTrue(dateOld.before(dateNew));
assertNotEquals(tcOld.getCronExpression(), tcNew.getCronExpression());
assertEquals(tcOld.getPayload(), tcNew.getPayload());
assertEquals(tcOld.isEnable(), tcNew.isEnable());
// Delete Trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, triggerName);
}
private void verifyUpdate(String nameOld, String nameNew, String idOld,
String idNew, String lastModifiedTimeOld, String lastModifiedTimeNew,
String createdTimeOld, String createdTimeNew, String descOld, String descNew)
throws ParseException {
assertEquals(nameNew, nameOld);
assertEquals(idNew, idOld);
Date dateOld = DATE_FORMAT.parse(lastModifiedTimeOld);
Date dateNew = DATE_FORMAT.parse(lastModifiedTimeNew);
assertTrue(dateOld.before(dateNew));
Date cDateOld = DATE_FORMAT.parse(createdTimeOld);
Date cDateNew = DATE_FORMAT.parse(createdTimeNew);
assertEquals(cDateNew, cDateOld);
assertNotEquals(descNew, descOld);
}
private FunctionMetadata getFunction(String functionName, FunctionMetadata[] functions) {
for (FunctionMetadata function : functions) {
if (functionName.equals(function.getFunctionName())) {
return function;
}
}
throw new RuntimeException("Function " + functionName + " does not exist");
}
@Test
public void testVpcBinding() throws ClientException {
String serviceName = "testvpcbindforgithubtest";
cleanupService(serviceName);
// create service with role
CreateServiceRequest createSReq = new CreateServiceRequest();
createSReq.setServiceName(serviceName);
createSReq.setRole(ROLE);
CreateServiceResponse response = client.createService(createSReq);
assertEquals(serviceName, response.getServiceName());
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
assertFalse(Strings.isNullOrEmpty(response.getServiceId()));
assertEquals(ROLE, response.getRole());
// create vpc binding
CreateVpcBindingRequest request = new CreateVpcBindingRequest();
request.setServiceName(serviceName).setVpcId(VPC_ID);
CreateVpcBindingResponse createVpcBindingResponse = client.createVpcBinding(request);
assertEquals(200, createVpcBindingResponse.getStatus());
// list vpc binding
ListVpcBindingsRequest listVpcBindingsRequest = new ListVpcBindingsRequest();
listVpcBindingsRequest.setServiceName(serviceName);
ListVpcBindingsResponse listVpcBindingsResponse = client.listVpcBindings(listVpcBindingsRequest);
assertEquals(1, listVpcBindingsResponse.getVpcIDs().length);
assertEquals(VPC_ID, listVpcBindingsResponse.getVpcIDs()[0]);
// delete vpc binding
DeleteVpcBindingRequest deleteVpcBindingRequest = new DeleteVpcBindingRequest();
deleteVpcBindingRequest.setServiceName(serviceName).setVpcId(VPC_ID);
DeleteVpcBindingResponse deleteVpcBindingResponse = client.deleteVpcBinding(deleteVpcBindingRequest);
assertEquals(200, deleteVpcBindingResponse.getStatus());
cleanupService(serviceName);
}
@Test
public void testCustomDomain()
throws ClientException, JSONException, InterruptedException, ParseException, IOException {
cleanupCustomDomain(CUSTOMDOMAIN_NAME);
// Create custom domain
CreateCustomDomainRequest createCustomDomainRequest = new CreateCustomDomainRequest();
PathConfig pathConfig = new PathConfig("/", "serviceName", "functionName", null);
PathConfig[] routes = new PathConfig[1];
routes[0] = pathConfig;
RouteConfig routeConfig = new RouteConfig(routes);
createCustomDomainRequest.setDomainName(CUSTOMDOMAIN_NAME);
createCustomDomainRequest.setProtocol("HTTP");
createCustomDomainRequest.setRouteConfig(routeConfig);
CreateCustomDomainResponse createCustomDomainResponse = client
.createCustomDomain(createCustomDomainRequest);
assertEquals(CUSTOMDOMAIN_NAME, createCustomDomainResponse.getDomainName());
assertEquals("HTTP", createCustomDomainResponse.getProtocol());
assertNotNull(createCustomDomainResponse.getRouteConfig().getRoutes());
assertEqualsRouteConfig(routeConfig, createCustomDomainResponse.getRouteConfig());
// Update custom domain
UpdateCustomDomainRequest updateCustomDomainRequest = new UpdateCustomDomainRequest(
CUSTOMDOMAIN_NAME);
PathConfig pathConfig1 = new PathConfig("/login", "serviceName1", "functionName1", null);
PathConfig[] routes1 = new PathConfig[2];
routes1[0] = pathConfig;
routes1[1] = pathConfig1;
RouteConfig routeConfig1 = new RouteConfig(routes1);
updateCustomDomainRequest.setRouteConfig(routeConfig1);
UpdateCustomDomainResponse updateCustomDomainResponse = client
.updateCustomDomain(updateCustomDomainRequest);
assertEqualsRouteConfig(routeConfig1, updateCustomDomainResponse.getRouteConfig());
// Get custom domain
GetCustomDomainRequest getCustomDomainRequest = new GetCustomDomainRequest(
CUSTOMDOMAIN_NAME);
GetCustomDomainResponse getCustomDomainResponse = client
.getCustomDomain(getCustomDomainRequest);
assertEquals(CUSTOMDOMAIN_NAME, getCustomDomainResponse.getDomainName());
assertEquals("HTTP", getCustomDomainResponse.getProtocol());
assertEqualsRouteConfig(routeConfig1, getCustomDomainResponse.getRouteConfig());
// List custom domain
ListCustomDomainsRequest listCustomDomainsRequest = new ListCustomDomainsRequest();
ListCustomDomainsResponse listCustomDomainsResponse = client
.listCustomDomains(listCustomDomainsRequest);
assertTrue(listCustomDomainsResponse.getStatus() == HttpURLConnection.HTTP_OK);
// Delete custom domain
DeleteCustomDomainRequest deleteCustomDomainRequest = new DeleteCustomDomainRequest(
CUSTOMDOMAIN_NAME);
client.deleteCustomDomain(deleteCustomDomainRequest);
}
@Test
public void testCustomDomainWithHTTPS()
throws ClientException, JSONException, InterruptedException, ParseException, IOException {
cleanupCustomDomain(CUSTOMDOMAIN_NAME);
String certificate1 = PUBLIC_KEY_CERTIFICATE_01.replace("\\n", "\n").replace("\"", "");
String certificate2 = PUBLIC_KEY_CERTIFICATE_02.replace("\\n", "\n").replace("\"", "");
String privateKey1 = PRIVATE_KEY_01.replace("\\n", "\n").replace("\"", "");
String privateKey2 = PRIVATE_KEY_02.replace("\\n", "\n").replace("\"", "");
// Create custom domain
CreateCustomDomainRequest createCustomDomainRequest = new CreateCustomDomainRequest();
PathConfig pathConfig = new PathConfig("/", "serviceName", "functionName", null);
PathConfig[] routes = new PathConfig[1];
routes[0] = pathConfig;
RouteConfig routeConfig = new RouteConfig(routes);
CertConfig certConfig = new CertConfig(CERT_NAME, certificate1, privateKey1);
createCustomDomainRequest.setDomainName(CUSTOMDOMAIN_NAME);
createCustomDomainRequest.setProtocol("HTTP,HTTPS");
createCustomDomainRequest.setRouteConfig(routeConfig);
createCustomDomainRequest.setCertConfig(certConfig);
CreateCustomDomainResponse createCustomDomainResponse = client
.createCustomDomain(createCustomDomainRequest);
assertEquals(CUSTOMDOMAIN_NAME, createCustomDomainResponse.getDomainName());
assertEquals("HTTP,HTTPS", createCustomDomainResponse.getProtocol());
assertNotNull(createCustomDomainResponse.getRouteConfig().getRoutes());
assertEqualsRouteConfig(routeConfig, createCustomDomainResponse.getRouteConfig());
assertNotNull(createCustomDomainResponse.getCertConfig());
assertEquals(certificate1, createCustomDomainResponse.getCertConfig().getCertificate());
// Update custom domain
UpdateCustomDomainRequest updateCustomDomainRequest = new UpdateCustomDomainRequest(
CUSTOMDOMAIN_NAME);
PathConfig pathConfig1 = new PathConfig("/login", "serviceName1", "functionName1", null);
PathConfig[] routes1 = new PathConfig[2];
routes1[0] = pathConfig;
routes1[1] = pathConfig1;
RouteConfig routeConfig1 = new RouteConfig(routes1);
certConfig = new CertConfig(CERT_NAME, certificate2, privateKey2);
updateCustomDomainRequest.setRouteConfig(routeConfig1);
updateCustomDomainRequest.setCertConfig(certConfig);
UpdateCustomDomainResponse updateCustomDomainResponse = client
.updateCustomDomain(updateCustomDomainRequest);
assertEqualsRouteConfig(routeConfig1, updateCustomDomainResponse.getRouteConfig());
assertNotNull(updateCustomDomainResponse.getCertConfig());
assertEquals(certificate2, updateCustomDomainResponse.getCertConfig().getCertificate());
// Get custom domain
GetCustomDomainRequest getCustomDomainRequest = new GetCustomDomainRequest(
CUSTOMDOMAIN_NAME);
GetCustomDomainResponse getCustomDomainResponse = client
.getCustomDomain(getCustomDomainRequest);
assertEquals(CUSTOMDOMAIN_NAME, getCustomDomainResponse.getDomainName());
assertEquals("HTTP,HTTPS", getCustomDomainResponse.getProtocol());
assertEqualsRouteConfig(routeConfig1, getCustomDomainResponse.getRouteConfig());
assertNotNull(getCustomDomainResponse.getCertConfig());
assertEquals(certificate2, getCustomDomainResponse.getCertConfig().getCertificate());
// List custom domain
ListCustomDomainsRequest listCustomDomainsRequest = new ListCustomDomainsRequest();
ListCustomDomainsResponse listCustomDomainsResponse = client
.listCustomDomains(listCustomDomainsRequest);
assertTrue(listCustomDomainsResponse.getStatus() == HttpURLConnection.HTTP_OK);
// Delete custom domain
DeleteCustomDomainRequest deleteCustomDomainRequest = new DeleteCustomDomainRequest(
CUSTOMDOMAIN_NAME);
client.deleteCustomDomain(deleteCustomDomainRequest);
}
@Test
public void testTag()
throws ClientException, JSONException, InterruptedException, ParseException, IOException {
final int numServices = 3;
for (int i = 0; i < numServices; i++) {
try {
client.getService(new GetServiceRequest(SERVICE_NAME + i));
cleanupService(SERVICE_NAME + i);
} catch (ClientException e) {
if (!ErrorCodes.SERVICE_NOT_FOUND.equals(e.getErrorCode())) {
throw new RuntimeException("Cleanup failed");
}
}
CreateServiceRequest request = new CreateServiceRequest();
request.setServiceName(SERVICE_NAME + i);
request.setDescription(SERVICE_DESC_OLD);
request.setRole(ROLE);
CreateServiceResponse response = client.createService(request);
assertFalse(Strings.isNullOrEmpty(response.getRequestId()));
TagResourceRequest req = new TagResourceRequest();
req.setResourceArn(String.format("acs:fc:%s:%s:services/%s", REGION, ACCOUNT_ID, SERVICE_NAME + i));
Map<String, String> tags = new HashMap<String, String>();
if (i % 2 == 0) {
tags.put("k1", "v1");
} else {
tags.put("k2", "v2");
}
tags.put("k3", "v3");
req.setTags(tags);
TagResourceResponse resp = client.tagResource(req);
assertFalse(Strings.isNullOrEmpty(resp.getRequestId()));
}
for (int i = 0; i < numServices; i++) {
String resourceArn = String.format("acs:fc:%s:%s:services/%s", REGION, ACCOUNT_ID, SERVICE_NAME + i);
GetResourceTagsRequest getReq = new GetResourceTagsRequest(resourceArn);
GetResourceTagsResponse getResp = client.getResourceTags(getReq);
Assert.assertEquals(resourceArn, getResp.getResourceArn());
Assert.assertEquals("v3", getResp.getTags().get("k3"));
if (i % 2 == 0) {
Assert.assertFalse(getResp.getTags().containsKey("k2"));
Assert.assertEquals("v1", getResp.getTags().get("k1"));
} else {
Assert.assertFalse(getResp.getTags().containsKey("k1"));
Assert.assertEquals("v2", getResp.getTags().get("k2"));
}
// unTag k3
UntagResourceRequest req = new UntagResourceRequest();
req.setResourceArn(resourceArn);
String[] tagKeys = new String[]{"k3"};
req.setTagKeys(tagKeys);
UntagResourceResponse resp = client.untagResource(req);
assertFalse(Strings.isNullOrEmpty(resp.getRequestId()));
getReq = new GetResourceTagsRequest(resourceArn);
getResp = client.getResourceTags(getReq);
Assert.assertEquals(resourceArn, getResp.getResourceArn());
Assert.assertFalse(getResp.getTags().containsKey("k3"));
if (i % 2 == 0) {
Assert.assertEquals("v1", getResp.getTags().get("k1"));
} else {
Assert.assertEquals("v2", getResp.getTags().get("k2"));
}
// unTag all
req = new UntagResourceRequest();
req.setResourceArn(resourceArn);
tagKeys = new String[]{};
req.setTagKeys(tagKeys);
req.setAll(true);
resp = client.untagResource(req);
assertFalse(Strings.isNullOrEmpty(resp.getRequestId()));
getReq = new GetResourceTagsRequest(resourceArn);
getResp = client.getResourceTags(getReq);
Assert.assertFalse(getResp.getTags().containsKey("k1"));
Assert.assertFalse(getResp.getTags().containsKey("k2"));
Assert.assertFalse(getResp.getTags().containsKey("k3"));
}
// Delete services
for (int i = 0; i < numServices; i++) {
cleanupService(SERVICE_NAME + i);
}
}
@Test
public void testVersions() throws ClientException {
createService(SERVICE_NAME);
String lastVersion = "0";
// publish a version
PublishVersionRequest publishVersionRequest = new PublishVersionRequest(SERVICE_NAME);
PublishVersionResponse publishVersionResponse = client
.publishVersion(publishVersionRequest);
assertEquals(String.format("%d", Integer.parseInt(lastVersion) + 1),
publishVersionResponse.getVersionId());
// List versions
ListVersionsRequest listVersionsRequest = new ListVersionsRequest(SERVICE_NAME);
ListVersionsResponse listVersionsResponse = client.listVersions(listVersionsRequest);
assertTrue(listVersionsResponse.getStatus() == HttpURLConnection.HTTP_OK);
assertEquals(1, listVersionsResponse.getVersions().length);
assertEquals(publishVersionResponse.getVersionId(),
listVersionsResponse.getVersions()[0].getVersionId());
// Delete version
DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(SERVICE_NAME,
publishVersionResponse.getVersionId());
DeleteVersionResponse deleteVersionResponse = client.deleteVersion(deleteVersionRequest);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, deleteVersionResponse.getStatus());
}
@Test
public void testAlias() throws ClientException {
createService(SERVICE_NAME);
String lastVersion = cleanUpVersions(SERVICE_NAME);
// publish a version
PublishVersionRequest publishVersionRequest = new PublishVersionRequest(SERVICE_NAME);
PublishVersionResponse publishVersionResponse = client
.publishVersion(publishVersionRequest);
assertEquals(String.format("%d", Integer.parseInt(lastVersion) + 1),
publishVersionResponse.getVersionId());
lastVersion = publishVersionResponse.getVersionId();
//Create a Alias against it
String aliasName = "myAlias";
CreateAliasRequest createAliasRequest = new CreateAliasRequest(SERVICE_NAME, aliasName,
lastVersion);
CreateAliasResponse createAliasResponse = client.createAlias(createAliasRequest);
assertEquals(HttpURLConnection.HTTP_OK, createAliasResponse.getStatus());
assertEquals(lastVersion, createAliasResponse.getVersionId());
assertEquals(aliasName, createAliasResponse.getAliasName());
//Get Alias
GetAliasRequest getAliasRequest = new GetAliasRequest(SERVICE_NAME, aliasName);
GetAliasResponse getAliasResponse = client.getAlias(getAliasRequest);
assertEquals(HttpURLConnection.HTTP_OK, getAliasResponse.getStatus());
assertEquals(lastVersion, getAliasResponse.getVersionId());
assertEquals(aliasName, getAliasResponse.getAliasName());
assertEquals(0, getAliasResponse.getDescription().length());
assertNull(getAliasResponse.getAdditionalVersionWeight());
//Update the Alias
String description = "my test Alias";
UpdateAliasRequest updateAliasRequest = new UpdateAliasRequest(SERVICE_NAME, aliasName);
updateAliasRequest.setDescription(description);
UpdateAliasResponse updateAliasResponse = client.updateAlias(updateAliasRequest);
assertEquals(HttpURLConnection.HTTP_OK, updateAliasResponse.getStatus());
assertEquals(lastVersion, updateAliasResponse.getVersionId());
assertEquals(aliasName, updateAliasResponse.getAliasName());
assertEquals(description, updateAliasResponse.getDescription());
//Get Alias
getAliasResponse = client.getAlias(getAliasRequest);
assertEquals(HttpURLConnection.HTTP_OK, getAliasResponse.getStatus());
assertEquals(lastVersion, getAliasResponse.getVersionId());
assertEquals(aliasName, getAliasResponse.getAliasName());
assertEquals(description, getAliasResponse.getDescription());
// List Alias
ListAliasesRequest listAliasesRequest = new ListAliasesRequest(SERVICE_NAME);
ListAliasesResponse listAliasesResponse = client.listAliases(listAliasesRequest);
assertEquals(HttpURLConnection.HTTP_OK, listAliasesResponse.getStatus());
assertEquals(1, listAliasesResponse.getAliases().length);
assertEquals(aliasName, listAliasesResponse.getAliases()[0].getAliasName());
assertEquals(lastVersion, listAliasesResponse.getAliases()[0].getVersionId());
assertEquals(description, listAliasesResponse.getAliases()[0].getDescription());
// Delete Alias
DeleteAliasRequest deleteAliasRequest = new DeleteAliasRequest(SERVICE_NAME, aliasName);
DeleteAliasResponse deleteAliasResponse = client
.deleteAlias(deleteAliasRequest);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, deleteAliasResponse.getStatus());
}
public void testHTTPTriggerWithVersion() throws ClientException, IOException {
createService(SERVICE_NAME);
String lastVersion = "0";
// Create a function
String source = generatePythonHttpCode();
byte[] data = Util.createZipByteData("main.py", source);
// create function
createFunction(SERVICE_NAME, FUNCTION_NAME, "main.echo_handler", "python2.7", data);
// publish a version
PublishVersionRequest publishVersionRequest = new PublishVersionRequest(SERVICE_NAME);
PublishVersionResponse publishVersionResponse = client
.publishVersion(publishVersionRequest);
assertEquals(String.format("%d", Integer.parseInt(lastVersion) + 1),
publishVersionResponse.getVersionId());
for (HttpAuthType auth : new HttpAuthType[]{ANONYMOUS, FUNCTION}) {
// create http trigger
createHttpTriggerWithQualifier(TRIGGER_NAME, publishVersionResponse.getVersionId(),
auth, new HttpMethod[]{GET, POST});
// Invoke the function
HttpInvokeFunctionRequest request = new HttpInvokeFunctionRequest(SERVICE_NAME,
FUNCTION_NAME, auth, POST, "/test/path/中文");
request.setQualifier(publishVersionResponse.getVersionId());
request.addQuery("a", "1");
request.addQuery("aaa", null);
request.setHeader("Test-Header-Key", "testHeaderValue");
request.setHeader("Content-Type", "application/json");
request.setPayload(new String("data").getBytes());
InvokeFunctionResponse response = client.invokeFunction(request);
assertEquals(200, response.getStatus());
assertTrue(response.getHeader("Content-Type").startsWith("application/json"));
assertEquals("testHeaderValue", response.getHeader("Test-Header-Key"));
JsonObject jsonObject = gson
.fromJson(new String(response.getPayload()), JsonObject.class);
assertEquals("/test/path/中文", jsonObject.get("path").getAsString());
assertEquals("aaa=&a=1", jsonObject.get("queries").getAsString());
assertEquals("data", jsonObject.get("body").getAsString());
// delete trigger
deleteTrigger(SERVICE_NAME, FUNCTION_NAME, TRIGGER_NAME);
}
// Delete version
DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(SERVICE_NAME,
publishVersionResponse.getVersionId());
DeleteVersionResponse deleteVersionResponse = client.deleteVersion(deleteVersionRequest);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, deleteVersionResponse.getStatus());
// Cleanups
client.deleteFunction(new DeleteFunctionRequest(SERVICE_NAME, FUNCTION_NAME));
client.deleteService(new DeleteServiceRequest(SERVICE_NAME));
}
private void assertEqualsRouteConfig(RouteConfig routeConfig, RouteConfig routeConfigResp) {
int len = routeConfigResp.getRoutes().length;
for (int i = 0; i < len; i++) {
assertEquals(routeConfig.getRoutes()[i].getPath(),
routeConfigResp.getRoutes()[i].getPath());
assertEquals(routeConfig.getRoutes()[i].getServiceName(),
routeConfigResp.getRoutes()[i].getServiceName());
assertEquals(routeConfig.getRoutes()[i].getFunctionName(),
routeConfigResp.getRoutes()[i].getFunctionName());
assertEquals(routeConfig.getRoutes()[i].getQualifier(),
routeConfigResp.getRoutes()[i].getQualifier());
}
}
@Test
public void testClientCredentialProviderMock() {
// init CredentialProvider
String ak = "ak";
String sk = "sk";
String stsToken = "sts_token";
BasicSessionCredentials creds = new BasicSessionCredentials(ak, sk, stsToken);
// mock
InstanceProfileCredentialsProvider credsProvider = mock(InstanceProfileCredentialsProvider.class);
try {
when(credsProvider.getCredentials()).thenReturn(creds);
} catch (com.aliyuncs.exceptions.ClientException e) {
e.printStackTrace();
}
// init fc client
Config config = new Config(REGION, ACCOUNT_ID, credsProvider, false);
FunctionComputeClient fcClient = new FunctionComputeClient(config);
client = fcClient;
// Create a service
try {
createService(SERVICE_NAME);
} catch (Exception e) {
}
assertEquals(creds.getAccessKeyId(), config.getAccessKeyID());
assertEquals(creds.getAccessKeySecret(), config.getAccessKeySecret());
assertEquals(creds.getSessionToken(), config.getSecurityToken());
}
/**
* run only on aliyun ecs, and that ecs need bind a RAM Role
*/
public void testClientCredentialProvider() {
// init CredentialProvider
String roleName = "ECSAccessingFCTestRole";
InstanceProfileCredentialsProvider credsProvider = new InstanceProfileCredentialsProvider(roleName);
// init fc client
Config config = new Config(REGION, ACCOUNT_ID, credsProvider, false);
FunctionComputeClient fcClient = new FunctionComputeClient(config);
client = fcClient;
// Create a service
try {
createService(SERVICE_NAME, false);
} catch (Exception e) {
e.printStackTrace();
}
// create a function
try {
createFunction(FUNCTION_NAME);
} catch (Exception e) {
e.printStackTrace();
}
// Invoke the function with a string as function event parameter, Sync mode
InvokeFunctionRequest invkReq = new InvokeFunctionRequest(SERVICE_NAME, FUNCTION_NAME);
String payload = "Hello FunctionCompute!";
invkReq.setPayload(payload.getBytes());
InvokeFunctionResponse invkResp = client.invokeFunction(invkReq);
System.out.println(new String(invkResp.getContent()));
cleanUpFunctions(SERVICE_NAME);
cleanupService(SERVICE_NAME);
}
}
| [
"\"REGION\"",
"\"ENDPOINT\"",
"\"ROLE\"",
"\"STS_ROLE\"",
"\"ACCESS_KEY\"",
"\"SECRET_KEY\"",
"\"ACCOUNT_ID\"",
"\"CODE_BUCKET\"",
"\"INVOCATION_ROLE\"",
"\"LOG_PROJECT\"",
"\"LOG_STORE\"",
"\"VPC_ID\"",
"\"VSWITCH_IDS\"",
"\"SECURITY_GROUP_ID\"",
"\"USER_ID\"",
"\"GROUP_ID\"",
"\"NAS_SERVER_ADDR\"",
"\"NAS_MOUNT_DIR\"",
"\"PUBLIC_KEY_CERTIFICATE_01\"",
"\"PRIVATE_KEY_01\"",
"\"PUBLIC_KEY_CERTIFICATE_02\"",
"\"PRIVATE_KEY_02\"",
"\"JAEGER_ENDPOINT\""
]
| []
| [
"INVOCATION_ROLE",
"VPC_ID",
"NAS_SERVER_ADDR",
"LOG_PROJECT",
"SECRET_KEY",
"ACCOUNT_ID",
"NAS_MOUNT_DIR",
"JAEGER_ENDPOINT",
"ACCESS_KEY",
"REGION",
"PRIVATE_KEY_02",
"STS_ROLE",
"ENDPOINT",
"LOG_STORE",
"GROUP_ID",
"VSWITCH_IDS",
"SECURITY_GROUP_ID",
"PUBLIC_KEY_CERTIFICATE_02",
"PRIVATE_KEY_01",
"ROLE",
"PUBLIC_KEY_CERTIFICATE_01",
"USER_ID",
"CODE_BUCKET"
]
| [] | ["INVOCATION_ROLE", "VPC_ID", "NAS_SERVER_ADDR", "LOG_PROJECT", "SECRET_KEY", "ACCOUNT_ID", "NAS_MOUNT_DIR", "JAEGER_ENDPOINT", "ACCESS_KEY", "REGION", "PRIVATE_KEY_02", "STS_ROLE", "ENDPOINT", "LOG_STORE", "GROUP_ID", "VSWITCH_IDS", "SECURITY_GROUP_ID", "PUBLIC_KEY_CERTIFICATE_02", "PRIVATE_KEY_01", "ROLE", "PUBLIC_KEY_CERTIFICATE_01", "USER_ID", "CODE_BUCKET"] | java | 23 | 0 | |
main.py | import sys, os
if sys.version_info.major < 3 or sys.version_info.minor < 4:
print("Please using python3.4 or greater!")
sys.exit(1)
import numpy as np
import cv2, io, time, argparse, re
from os import system
from os.path import isfile, join
from time import sleep
import multiprocessing as mp
from openvino.inference_engine import IENetwork, IEPlugin
import heapq
import threading
from imutils.video import VideoStream
#Django connect
api_path = '/home/pi/workspace/bbw3.2/bb' #os.path.abspath('~','workspace','bbw3.2','bb', 'bb')
# print(api_path) # home/foo/work
sys.path.append(api_path)
# print(sys.path)
import django
from django.conf import settings
from bb import settings as bbsettings
#os.environ['DJANGO_SETTINGS_MODULE']='bb.settings'
settings.configure(DATABASES=bbsettings.DATABASES, DEBUG=True)
django.setup()
#from captures.models import Capture
print ('starting...')
pipeline = None
lastresults = None
threads = []
processes = []
frameBuffer = None
results = None
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
cam = None
camera_mode = 0
camera_width = 320
camera_height = 240
window_name = ""
background_transparent_mode = 0
ssd_detection_mode = 1
face_detection_mode = 0
elapsedtime = 0.0
background_img = None
depth_sensor = None
depth_scale = 1.0
align_to = None
align = None
LABELS = [['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'],
['background', 'face']]
def camThread(LABELS, results, frameBuffer, camera_mode, camera_width, camera_height, background_transparent_mode, background_img, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global window_name
global depth_scale
global align_to
global align
# Configure depth and color streams
# Or
# Open USB Camera streams
# # if camera_mode == 0:
# # pipeline = rs.pipeline()
# # config = rs.config()
# # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, vidfps)
# # config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, vidfps)
# # profile = pipeline.start(config)
# # depth_sensor = profile.get_device().first_depth_sensor()
# # depth_scale = depth_sensor.get_depth_scale()
# # align_to = rs.stream.color
# # align = rs.align(align_to)
# # window_name = "RealSense"
# # elif camera_mode == 1:
# # cam = cv2.VideoCapture(0)
# # if cam.isOpened() != True:
# # print("USB Camera Open Error!!!")
# # sys.exit(0)
# # cam.set(cv2.CAP_PROP_FPS, vidfps)
# # cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
# # cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
# # window_name = "USB Camera"
cam = VideoStream(usePiCamera=True,
resolution=(640, 480),
framerate = 32).start()
window_name = "picam"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# 0:= RealSense Mode
# 1:= USB Camera Mode
# USB Camera Stream Read
color_image = cam.read()
if frameBuffer.full():
frameBuffer.get()
frames = color_image
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
res = None
if not results.empty():
res = results.get(False)
detectframecount += 1
imdraw = overlay_on_image(frames, res, LABELS, camera_mode, background_transparent_mode,
background_img, depth_scale=depth_scale, align=align)
lastresults = res
else:
continue
# imdraw = overlay_on_image(frames, lastresults, LABELS, camera_mode, background_transparent_mode,
# background_img, depth_scale=depth_scale, align=align)
cv2.imshow(window_name, cv2.resize(imdraw, (width, height)))
if cv2.waitKey(1)&0xFF == ord('q'):
# Stop streaming
if pipeline != None:
pipeline.stop()
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Playback) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
#ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = "./lrmodel/MobileNetSSD/MobileNetSSD_deploy.xml"
self.model_bin = "./lrmodel/MobileNetSSD/MobileNetSSD_deploy.bin"
self.camera_width = camera_width
self.camera_height = camera_height
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.camera_mode = camera_mode
self.number_of_ncs = number_of_ncs
if self.camera_mode == 0:
self.skip_frame = skpfrm
else:
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
def image_preprocessing(self, color_image):
prepimg = cv2.resize(color_image, (300, 300))
prepimg = prepimg - 127.5
prepimg = prepimg * 0.007843
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
out = self.exec_net.requests[dev].outputs["detection_out"].flatten()
self.results.put([out])
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc()
def inferencer(results, frameBuffer, ssd_detection_mode, face_detection_mode, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
def overlay_on_image(frames, object_infos, LABELS, camera_mode, background_transparent_mode, background_img, depth_scale=1.0, align=None):
try:
# 0:=RealSense Mode, 1:=USB Camera Mode
color_image = frames
if isinstance(object_infos, type(None)):
# 0:= No background transparent, 1:= Background transparent
if background_transparent_mode == 0:
return color_image
elif background_transparent_mode == 1:
return background_img
# Show images
height = color_image.shape[0]
width = color_image.shape[1]
entire_pixel = height * width
occupancy_threshold = 0.9
if background_transparent_mode == 0:
img_cp = color_image.copy()
elif background_transparent_mode == 1:
img_cp = background_img.copy()
for (object_info, LABEL) in zip(object_infos, LABELS):
drawing_initial_flag = True
for box_index in range(100):
if object_info[box_index + 1] == 0.0:
break
base_index = box_index * 7
if (not np.isfinite(object_info[base_index]) or
not np.isfinite(object_info[base_index + 1]) or
not np.isfinite(object_info[base_index + 2]) or
not np.isfinite(object_info[base_index + 3]) or
not np.isfinite(object_info[base_index + 4]) or
not np.isfinite(object_info[base_index + 5]) or
not np.isfinite(object_info[base_index + 6])):
continue
x1 = max(0, int(object_info[base_index + 3] * height))
y1 = max(0, int(object_info[base_index + 4] * width))
x2 = min(height, int(object_info[base_index + 5] * height))
y2 = min(width, int(object_info[base_index + 6] * width))
object_info_overlay = object_info[base_index:base_index + 7]
# 0:= No background transparent, 1:= Background transparent
if background_transparent_mode == 0:
min_score_percent = 60
elif background_transparent_mode == 1:
min_score_percent = 20
source_image_width = width
source_image_height = height
base_index = 0
class_id = object_info_overlay[base_index + 1]
percentage = int(object_info_overlay[base_index + 2] * 100)
if (percentage <= min_score_percent):
continue
box_left = int(object_info_overlay[base_index + 3] * source_image_width)
box_top = int(object_info_overlay[base_index + 4] * source_image_height)
box_right = int(object_info_overlay[base_index + 5] * source_image_width)
box_bottom = int(object_info_overlay[base_index + 6] * source_image_height)
# 0:=RealSense Mode, 1:=USB Camera Mode
label_text = LABEL[int(class_id)] + " (" + str(percentage) + "%)"
box_color = (255, 128, 0)
box_thickness = 1
cv2.rectangle(img_cp, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)
label_background_color = (125, 175, 75)
label_text_color = (255, 255, 255)
label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
label_left = box_left
label_top = box_top - label_size[1]
if (label_top < 1):
label_top = 1
label_right = label_left + label_size[0]
label_bottom = label_top + label_size[1]
cv2.rectangle(img_cp, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1), label_background_color, -1)
cv2.putText(img_cp, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
cv2.putText(img_cp, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(img_cp, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
return img_cp
except:
import traceback
traceback.print_exc()
if __name__ == '__main__':
print ('__main__')
parser = argparse.ArgumentParser()
parser.add_argument('-mod','--mode',dest='camera_mode',type=int,default=0,help='Camera Mode. (0:=RealSense Mode, 1:=USB Camera Mode. Defalut=0)')
parser.add_argument('-wd','--width',dest='camera_width',type=int,default=320,help='Width of the frames in the video stream. (USB Camera Mode Only. Default=320)')
parser.add_argument('-ht','--height',dest='camera_height',type=int,default=240,help='Height of the frames in the video stream. (USB Camera Mode Only. Default=240)')
parser.add_argument('-tp','--transparent',dest='background_transparent_mode',type=int,default=0,help='TransparentMode. (RealSense Mode Only. 0:=No background transparent, 1:=Background transparent)')
parser.add_argument('-sd','--ssddetection',dest='ssd_detection_mode',type=int,default=1,help='[Future functions] SSDDetectionMode. (0:=Disabled, 1:=Enabled Default=1)')
parser.add_argument('-fd','--facedetection',dest='face_detection_mode',type=int,default=0,help='[Future functions] FaceDetectionMode. (0:=Disabled, 1:=Full, 2:=Short Default=0)')
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
parser.add_argument('-vidfps','--fpsofvideo',dest='fps_of_video',type=int,default=30,help='FPS of Video. (USB Camera Mode Only. Default=30)')
parser.add_argument('-skpfrm','--skipframe',dest='number_of_frame_skip',type=int,default=7,help='Number of frame skip. (RealSense Mode Only. Default=7)')
args = parser.parse_args()
print ('past args')
camera_mode = args.camera_mode
camera_width = args.camera_width
camera_height = args.camera_height
background_transparent_mode = args.background_transparent_mode
ssd_detection_mode = args.ssd_detection_mode
face_detection_mode = args.face_detection_mode
number_of_ncs = args.number_of_ncs
vidfps = args.fps_of_video
skpfrm = args.number_of_frame_skip
# 0:=RealSense Mode, 1:=USB Camera Mode
if camera_mode != 0 and camera_mode != 1:
print("Camera Mode Error!! " + str(camera_mode))
sys.exit(0)
if camera_mode != 0 and background_transparent_mode == 1:
background_transparent_mode = 0
if background_transparent_mode == 1:
background_img = np.zeros((camera_height, camera_width, 3), dtype=np.uint8)
if face_detection_mode != 0:
ssd_detection_mode = 0
if ssd_detection_mode == 0 and face_detection_mode != 0:
del(LABELS[0])
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start streaming
p = mp.Process(target=camThread,
args=(LABELS, results, frameBuffer, camera_mode, camera_width, camera_height, background_transparent_mode, background_img, vidfps),
daemon=True)
p.start()
processes.append(p)
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer,
args=(results, frameBuffer, ssd_detection_mode, face_detection_mode, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm),
daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
appium-example/src/test/java/example/android/Tests/simpleSwagLabsAndroid.java | package example.android.Tests;
import io.appium.java_client.android.AndroidDriver;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.testng.ITestResult;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import java.net.URL;
import java.util.concurrent.TimeUnit;
public class simpleSwagLabsAndroid {
public String seleniumURI = "@ondemand.eu-central-1.saucelabs.com:443";
public String username = System.getenv("SAUCE_USERNAME");
public String accesskey = System.getenv("SAUCE_ACCESS_KEY");
public AndroidDriver driver;
@Test
public void launchSwagLabs() throws Exception {
DesiredCapabilities caps = new DesiredCapabilities();
caps.setCapability("platformName", "Android");
caps.setCapability("platformVersion", "10");
//caps.setCapability("deviceName", "Samsung Galaxy S9 WQHD GoogleAPI Emulator");
caps.setCapability("appiumVersion", "1.17.1");
caps.setCapability("browserName", "");
caps.setCapability("deviceOrientation", "portrait");
caps.setCapability("build", "Andy Simple Swag Android Test");
caps.setCapability("app", "storage:78125083-fc97-4017-916c-b8ec3b7db175");
System.out.println("** creating driver **");
driver = new AndroidDriver(new URL("https://" + username + ":" + accesskey + seleniumURI + "/wd/hub"), caps);
System.out.println("** driver created **");
//driver.manage().timeouts().implicitlyWait(30, TimeUnit.SECONDS);
System.out.println("** Swag Splash **");
Thread.sleep(10000);
}
@AfterMethod
public void teardown(ITestResult result) {
System.out.println("** setting success flag on Sauce **");
((JavascriptExecutor) driver).executeScript("sauce:job-result=" + (result.isSuccess() ? "passed" : "failed"));
System.out.println("** quitting the driver **");
driver.quit();
}
} | [
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\""
]
| []
| [
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY"
]
| [] | ["SAUCE_USERNAME", "SAUCE_ACCESS_KEY"] | java | 2 | 0 | |
prebuilder/utils/WithEnv.py | import os
class WithEnv:
__slots__ = ("patch", "backup")
def __init__(self, **kwargs):
self.patch = kwargs
self.backup = None
def __enter__(self):
self.backup = os.environ.copy()
os.environ.update(patch)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.environ = self.backup
| []
| []
| []
| [] | [] | python | 0 | 0 | |
script/smoothing/fft_feats.py | """
Old version of MagPhase vocoder, shipped with the LibWavGen library (Snickery).
@author: Felipe Espic
"""
#==============================================================================
# IMPORTS
#==============================================================================
# Standard:--------------------------------------------------------------------
import numpy as np
import libutils as lu
import libaudio as la
import soundfile as sf
import os
# Additional:------------------------------------------------------------------
from scipy import interpolate
from scipy import signal
#==============================================================================
# BODY
#==============================================================================
# Todo, add a input param to control the mix:
def voi_noise_window(length):
return np.bartlett(length)**2.5 # 2.5 optimum # max: 4
#return np.bartlett(length)**4
#==============================================================================
# If win_func == None, no window is applied (i.e., boxcar)
# win_func: None, window function, or list of window functions.
def windowing(v_sig, v_pm, win_func=np.hanning):
n_smpls = np.size(v_sig)
# Round to int:
v_pm = lu.round_to_int(v_pm)
# Pitch Marks Extension:
v_pm_plus = np.hstack((0,v_pm, (n_smpls-1)))
n_pm = np.size(v_pm_plus) - 2
v_lens = np.zeros(n_pm, dtype=int)
v_shift = np.zeros(n_pm, dtype=int)
v_rights = np.zeros(n_pm, dtype=int)
l_frames = []
for f in xrange(0,n_pm):
left_lim = v_pm_plus[f]
pm = v_pm_plus[f+1]
right_lim = v_pm_plus[f+2]
# Curr raw frame:
v_frm = v_sig[left_lim:(right_lim+1)]
# win lengts:
left_len = pm - left_lim
right_len = right_lim - pm
# Apply window:
if isinstance(win_func, list):
v_win = la.gen_non_symmetric_win(left_len, right_len, win_func[f])
v_frm = v_frm * v_win
elif callable(open): # if it is a function:
v_win = la.gen_non_symmetric_win(left_len, right_len, win_func)
v_frm = v_frm * v_win
elif None:
pass
# Store:
l_frames.append(v_frm)
v_lens[f] = len(v_frm)
v_shift[f] = left_len
v_rights[f] = right_len
return l_frames, v_lens, v_pm_plus, v_shift, v_rights
def analysis(v_in_sig, fs):
# Pitch Marks:-------------------------------------------------------------
v_pm = la.get_pitch_marks(v_in_sig, fs)
v_pm_smpls = v_pm * fs
# Windowing:---------------------------------------------------------------
l_frms, v_lens, v_pm_plus, v_shift, v_rights = windowing(v_in_sig, v_pm_smpls)
# FFT:---------------------------------------------------------------------
len_max = np.max(v_lens) # max frame length in file
nFFT = la.next_pow_of_two(len_max)
print "current nFFT: %d" % (nFFT)
n_frms = len(l_frms)
m_frms = np.zeros((n_frms, nFFT))
for f in xrange(n_frms):
m_frms[f,0:v_lens[f]] = l_frms[f]
m_fft = np.fft.fft(m_frms)
m_sp = np.absolute(m_fft)
m_ph = np.angle(m_fft)
# Remove redundant information:--------------------------------------------
m_sp = la.remove_hermitian_half(m_sp)
m_ph = la.remove_hermitian_half(m_ph)
return m_sp, m_ph, v_pm_plus
def synthesis(m_sp, m_ph, v_pm_plus):
# Mirorring second half of spectrum:
m_sp = la.add_hermitian_half(m_sp)
m_ph = la.add_hermitian_half(m_ph, data_type='phase')
# To complex:
m_fft = m_sp * np.exp(m_ph * 1j)
# To time domain:
m_frms = np.fft.ifft(m_fft).real
# OLA:
n_frms = len(v_pm_plus) - 2
v_out_sig = np.zeros(v_pm_plus[-1] + 1)
for f in xrange(n_frms):
strt = v_pm_plus[f]
curr_len = v_pm_plus[f+2] - strt + 1
v_out_sig[strt:(strt+curr_len)] += m_frms[f,0:curr_len]
return v_out_sig
#==============================================================================
# From (after) 'analysis_with_del_comp':
# new: returns voi/unv decision.
# new: variable length FFT
def analysis_with_del_comp_from_est_file_2(v_in_sig, est_file, fs):
# Pitch Marks:-------------------------------------------------------------
v_pm_sec, v_voi = la.read_reaper_est_file(est_file, check_len_smpls=len(v_in_sig), fs=fs)
v_pm_smpls = v_pm_sec * fs
# Windowing:---------------------------------------------------------------
l_frms, v_lens, v_pm_plus, v_shift, v_rights = windowing(v_in_sig, v_pm_smpls)
n_frms = len(l_frms)
l_sp = []
l_ph = []
for f in xrange(n_frms):
v_frm = l_frms[f]
# un-delay the signal:
v_frm = np.hstack((v_frm[v_shift[f]:], v_frm[0:v_shift[f]]))
v_fft = np.fft.fft(v_frm)
v_sp = np.absolute(v_fft)
v_ph = np.angle(v_fft)
# Remove second (hermitian) half:
v_sp = la.remove_hermitian_half(v_sp)
v_ph = la.remove_hermitian_half(v_ph)
# Storing:
l_sp.append(v_sp)
l_ph.append(v_ph)
return l_sp, l_ph, v_shift, v_voi
#=========================================================================================
def analysis_with_del_comp_from_pm(v_in_sig, v_pm_smpls, nFFT, win_func=np.hanning):
# Windowing:
l_frms, v_lens, v_pm_plus, v_shift, v_rights = windowing(v_in_sig, v_pm_smpls, win_func=win_func)
#import ipdb; ipdb.set_trace() # breakpoint 2c53771f //
# FFT:---------------------------------------------------------------------
len_max = np.max(v_lens) # max frame length in file
if nFFT < len_max:
raise ValueError("nFFT (%d) is shorter than the maximum frame length (%d)" % (nFFT,len_max))
n_frms = len(l_frms)
m_frms = np.zeros((n_frms, nFFT))
# For paper:--------------------------------
#m_frms_orig = np.zeros((n_frms, nFFT))
# ------------------------------------------
for f in xrange(n_frms):
m_frms[f,0:v_lens[f]] = l_frms[f]
# un-delay the signal:
v_curr_frm = m_frms[f,:]
# For paper:----------------------------
#m_frms_orig[f,:] = v_curr_frm
# --------------------------------------
m_frms[f,:] = np.hstack((v_curr_frm[v_shift[f]:], v_curr_frm[0:v_shift[f]]))
# For paper:----------------------------
#m_fft_orig = np.fft.fft(m_frms_orig)
#m_ph_orig = np.angle(m_fft_orig)
# ---------------------------------------
m_fft = np.fft.fft(m_frms)
m_sp = np.absolute(m_fft)
m_ph = np.angle(m_fft)
# For paper:----------------------------
# plotm(np.log(m_sp))
'''
i = 88
nbins = 200
holdon()
plot(m_ph_orig[i,:nbins], '-b')
plot(m_ph_orig[i+1,:nbins], '-r')
plot(m_ph_orig[i+1,:nbins] - m_ph_orig[i,:nbins], '-k')
holdoff()
#import matplotlib.pyplot as plt
i = 88
nbins = 2049
holdon()
plot(m_ph[i,:nbins], '-b')
plot(m_ph[i+1,:nbins], '-r')
plot(m_ph[i+1,:nbins] - m_ph[i,:nbins], '-k')
holdoff()
'''
# -------------------------------------
# Remove redundant second half:--------------------------------------------
m_sp = la.remove_hermitian_half(m_sp)
m_ph = la.remove_hermitian_half(m_ph)
m_fft = la.remove_hermitian_half(m_fft)
return m_sp, m_ph, v_shift, m_frms, m_fft, v_lens
#==============================================================================
# From (after) 'analysis_with_del_comp':
# new: returns voi/unv decision.
def analysis_with_del_comp_from_est_file(v_in_sig, est_file, nfft, fs, win_func=np.hanning, b_ph_unv_zero=False):
# Pitch Marks:-------------------------------------------------------------
v_pm_sec, v_voi = la.read_reaper_est_file(est_file, check_len_smpls=len(v_in_sig), fs=fs)
v_pm_smpls = v_pm_sec * fs
m_sp, m_ph, v_shift, m_frms, m_fft, v_lens = analysis_with_del_comp_from_pm(v_in_sig, v_pm_smpls, nfft, win_func=win_func)
if b_ph_unv_zero:
m_ph = m_ph * v_voi[:,None]
return m_sp, m_ph, v_shift, v_voi, m_frms, m_fft
#==============================================================================
def analysis_with_del_comp(v_in_sig, nFFT, fs):
# Pitch Marks:-------------------------------------------------------------
v_pm = la.get_pitch_marks(v_in_sig, fs)
v_pm_smpls = v_pm * fs
# Windowing:---------------------------------------------------------------
l_frms, v_lens, v_pm_plus, v_shift, v_rights = windowing(v_in_sig, v_pm_smpls)
# FFT:---------------------------------------------------------------------
len_max = np.max(v_lens) # max frame length in file
if nFFT < len_max:
raise ValueError("nFFT (%d) is shorter than the maximum frame length (%d)" % (nFFT,len_max))
n_frms = len(l_frms)
m_frms = np.zeros((n_frms, nFFT))
for f in xrange(n_frms):
m_frms[f,0:v_lens[f]] = l_frms[f]
# un-delay the signal:
v_curr_frm = m_frms[f,:]
m_frms[f,:] = np.hstack((v_curr_frm[v_shift[f]:], v_curr_frm[0:v_shift[f]]))
m_fft = np.fft.fft(m_frms)
m_sp = np.absolute(m_fft)
m_ph = np.angle(m_fft)
# Remove redundant second half:--------------------------------------------
m_sp = la.remove_hermitian_half(m_sp)
m_ph = la.remove_hermitian_half(m_ph)
return m_sp, m_ph, v_shift
'''
#==============================================================================
def synthesis_with_del_comp(m_sp, m_ph, v_shift):
# Enforce int:
v_shift = lu.round_to_int(v_shift)
# Mirorring second half of spectrum:
m_sp = la.add_hermitian_half(m_sp)
m_ph = la.add_hermitian_half(m_ph, data_type='phase')
# To complex:
m_fft = m_sp * np.exp(m_ph * 1j)
# To time domain:
m_frms = np.fft.ifft(m_fft).real
# OLA:---------------------------------------------------------------------
n_frms, nFFT = np.shape(m_sp)
v_out_sig = np.zeros(np.sum(v_shift[:-1]) + nFFT + 1) # despues ver como cortar! (debe estar malo este largo!)
# Metodo 2:----------------------------------------------------------------
# Flip frms:
m_frms = np.fft.fftshift(m_frms, axes=1)
strt = 0
for f in xrange(1,n_frms):
# wrap frame:
v_curr_frm = m_frms[f-1,:]
# Debug: Simple Window Correction:--------
# v_win_shrt = la.gen_wider_window(np.hanning,v_shift[f-1], v_shift[f], 0.5)
#
# mid_nx = nFFT / 2
# v_curr_frm[:(mid_nx-v_shift[f-1])] = 0
# v_curr_frm[(mid_nx+v_shift[f]):] = 0
# Add frames:
v_out_sig[strt:(strt+nFFT)] += v_curr_frm
strt += v_shift[f]
# Cut remainders (TODO!!) (only beginning done!):
v_out_sig = v_out_sig[(nFFT/2 - v_shift[0]):]
return v_out_sig
'''
#==============================================================================
def synthesis_with_del_comp(m_sp, m_ph, v_shift, win_func=np.hanning, win_flat_to_len=0.3):
# Enforce int:
v_shift = lu.round_to_int(v_shift)
# Mirorring second half of spectrum:
m_sp = la.add_hermitian_half(m_sp)
m_ph = la.add_hermitian_half(m_ph, data_type='phase')
# To complex:
m_fft = m_sp * np.exp(m_ph * 1j)
# To time domain:
m_frms = np.fft.ifft(m_fft).real
# OLA:---------------------------------------------------------------------
n_frms, nFFT = np.shape(m_sp)
#v_out_sig = np.zeros(np.sum(v_shift[:-1]) + nFFT + 1) # despues ver como cortar! (debe estar malo este largo!)
v_out_sig = np.zeros(la.shift_to_pm(v_shift)[-1] + nFFT)
# Metodo 2:----------------------------------------------------------------
# Flip frms:
m_frms = np.fft.fftshift(m_frms, axes=1)
strt = 0
v_win = np.zeros(nFFT)
mid_frm_nx = nFFT / 2
for f in xrange(1,n_frms):
# wrap frame:
v_curr_frm = m_frms[f-1,:]
# Window Correction:
if win_flat_to_len < 1:
v_win[:] = 0
v_win_shrt = la.gen_wider_window(win_func,v_shift[f-1], v_shift[f], win_flat_to_len)
v_win[(mid_frm_nx-v_shift[f-1]):(mid_frm_nx+v_shift[f])] = v_win_shrt
rms_orig = np.sqrt(np.mean(v_curr_frm**2))
v_curr_frm = v_curr_frm * v_win
rms_after_win = np.sqrt(np.mean(v_curr_frm**2))
# Energy compensation:
if rms_after_win > 0:
v_curr_frm = v_curr_frm * rms_orig / rms_after_win
# Add frames:
v_out_sig[strt:(strt+nFFT)] += v_curr_frm
strt += v_shift[f]
# Cut remainders (TODO!!) (only beginning done!):
v_out_sig = v_out_sig[(nFFT/2 - v_shift[0]):]
return v_out_sig
#==============================================================================
def ph_enc(m_ph):
m_phs = np.sin(m_ph)
m_phc = np.cos(m_ph)
return m_phs, m_phc
# mode = 'sign': Relies on the cosine value, and uses sine's sign to disambiguate.
# = 'angle': Computes the angle between phs (imag) and phc (real)
def ph_dec(m_phs, m_phc, mode='angle'):
if mode == 'sign':
m_bs = np.arcsin(m_phs)
m_bc = np.arccos(m_phc)
m_ph = np.sign(m_bs) * np.abs(m_bc)
elif mode == 'angle':
m_ph = np.angle(m_phc + m_phs * 1j)
return m_ph
#------------------------------------------------------------------------------
# NOTE: Not finished!!!!
def pm_interp_segment(v_pm, p1nx, p2nx):
# plot(np.diff(v_pm))
p1 = v_pm[p1nx]
p2 = v_pm[p2nx]
d1 = p1 - v_pm[p1nx-1]
d2 = p2 - v_pm[p2nx-1]
a_ = (d1 - d2) / (p1 - p2)
b_ = d1 - a_ * p1
# Gen:
v_pm_seg = np.zeros(p2nx-p1nx) # maybe not the best?? check!
p_prev = p1
for i in xrange(len(v_pm_seg)):
pc = ( b_ + p_prev ) / (1 - a_)
v_pm_seg[i] = pc
if v_pm_seg[i] >= v_pm[p2nx-1]:
break
p_prev = pc
g=1
return
'''
# Done after: 'analysis_with_del_comp_and_ph_encoding'=========================
# NOTE: Not finished!!!!
def analysis_with_del_comp_ph_enc_and_pm_interp(est_file, wav_file, nFFT, mvf):
# Read wav file:
v_sig, fs = sf.read(wav_file)
# Read est file:
v_pm_sec, v_voi = la.read_reaper_est_file(est_file, check_len_smpls=len(v_sig), fs=fs) # ??
v_pm_smpls = v_pm_sec * fs
n_frms = len(v_pm_smpls)
p1nx = 101 # ojo: ultimo voiced
p2nx = 132 # ojo: segundo voiced
pm_interp_segment(v_pm_smpls, p1nx, p2nx)
# plot(np.diff(v_pm_smpls))
# To diff:
v_pm_diff = np.diff(v_pm_smpls)
v_pm_diff = np.hstack((v_pm_smpls[0], v_pm_diff))
# Interp in diff domain:
v_voi_nxs = np.where(v_voi == 1)[0]
fun_intrp = interpolate.interp1d(v_voi_nxs, v_pm_diff[v_voi_nxs], bounds_error=False,fill_value='extrapolate', kind='linear')
v_pm_sec_intrp = fun_intrp(np.arange(n_frms))
# Plot:
pl.figure(3)
pl.ioff()
pl.plot(v_voi_nxs, v_pm_diff[v_voi_nxs], '-b')
pl.plot(v_pm_diff, '-r')
pl.plot(v_pm_sec_intrp, '-g')
pl.show()
# To pm:
v_pm_smpls_rec = np.cumsum(v_pm_diff)
plot(np.diff(v_pm_sec_intrp))
return
'''
#==============================================================================
# From 'analysis_with_del_comp_and_ph_encoding_from_files'
# f0_type: 'f0', 'lf0'
def analysis_with_del_comp__ph_enc__f0_norm__from_files(wav_file, est_file, nFFT, mvf, f0_type='f0', b_ph_unv_zero=False, win_func=np.hanning):
m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, v_voi, fs = analysis_with_del_comp_and_ph_encoding_from_files(wav_file, est_file, nFFT, mvf, b_ph_unv_zero=b_ph_unv_zero, win_func=win_func)
v_f0 = shift_to_f0(v_shift, v_voi, fs, out=f0_type)
return m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, v_voi, v_f0, fs
#==============================================================================
def get_fft_params_from_complex_data(m_fft):
m_mag = np.absolute(m_fft)
m_real = m_fft.real / m_mag # = p_phc
m_imag = m_fft.imag / m_mag # = p_phs
return m_mag, m_real, m_imag
#=======================================================================================
def analysis_with_del_comp__ph_enc__f0_norm__from_files_raw(wav_file, est_file, nFFT, win_func=np.hanning):
'''
This function does not perform any Mel warping or data compression
'''
# Read wav file:-----------------------------------------------------------
v_in_sig, fs = sf.read(wav_file)
# Analysis:----------------------------------------------------------------
m_sp_dummy, m_ph_dummy, v_shift, v_voi, m_frms, m_fft = analysis_with_del_comp_from_est_file(v_in_sig, est_file, nFFT, fs, win_func=win_func)
# Get fft-params:----------------------------------------------------------
m_mag, m_real, m_imag = get_fft_params_from_complex_data(m_fft)
return m_mag, m_real, m_imag, v_shift, v_voi
#==============================================================================
# v2: New fft feats (mag, real, imag) in Mel-frequency scale.
# Selection of number of coeffs.
# mvf: Maximum voiced frequency for phase encoding
# After 'analysis_with_del_comp_and_ph_encoding'
# new: returns voi/unv decision.
# This function performs Mel Warping and vector cutting (for phase)
def analysis_with_del_comp__ph_enc__f0_norm__from_files2(wav_file, est_file, nFFT, mvf, f0_type='f0', win_func=np.hanning, mag_mel_nbins=60, cmplx_ph_mel_nbins=45):
m_mag, m_real, m_imag, v_shift, v_voi = analysis_with_del_comp__ph_enc__f0_norm__from_files_raw(wav_file, est_file, nFFT, win_func=win_func)
# Mel warp:----------------------------------------------------------------
m_mag_mel = la.sp_mel_warp(m_mag, mag_mel_nbins, alpha=0.77, in_type=3)
m_mag_mel_log = np.log(m_mag_mel)
# Phase:-------------------------------------------------------------------
m_imag_mel = la.sp_mel_warp(m_imag, mag_mel_nbins, alpha=0.77, in_type=2)
m_real_mel = la.sp_mel_warp(m_real, mag_mel_nbins, alpha=0.77, in_type=2)
# Cutting phase vectors:
m_imag_mel = m_imag_mel[:,:cmplx_ph_mel_nbins]
m_real_mel = m_real_mel[:,:cmplx_ph_mel_nbins]
m_real_mel = np.clip(m_real_mel, -1, 1)
m_imag_mel = np.clip(m_imag_mel, -1, 1)
# F0:----------------------------------------------------------------------
v_f0 = shift_to_f0(v_shift, v_voi, fs, out=f0_type)
return m_mag_mel_log, m_real_mel, m_imag_mel, v_shift, v_f0, fs
# mvf: Maximum voiced frequency for phase encoding=============================
# After 'analysis_with_del_comp_and_ph_encoding'
# new: returns voi/unv decision.
def analysis_with_del_comp_and_ph_encoding_from_files(wav_file, est_file, nFFT, mvf, b_ph_unv_zero=False, win_func=np.hanning):
# Read wav file:
v_in_sig, fs = sf.read(wav_file)
m_sp, m_ph, v_shift, v_voi, m_frms = analysis_with_del_comp_from_est_file(v_in_sig, est_file, nFFT, fs, b_ph_unv_zero=b_ph_unv_zero, win_func=win_func)
'''
# Debug:
fb = la.MelFBank(2049, 60, 0.77)
m_sp_mel = np.dot(m_sp, fb.m_warp)
m_sp_rec = np.dot(m_sp_mel, fb.m_unwarp)
holdon()
nx = 90# 90
plot(la.db(m_sp[nx,:]),'-b')
plot(la.db(m_sp_rec[nx,:]),'-r')
holdoff()
'''
# Phase encoding:
m_phs, m_phc = ph_enc(m_ph)
# Sp to MGC:
m_spmgc = la.sp_to_mcep(m_sp)
'''
# Debug:
fb = la.MelFBank(2049, 60, 0.77)
m_sp_mel = np.dot(m_spmgc, fb.m_trans.T)
m_spmgc2 = np.dot(np.log(m_sp), fb.m_trans)
#m_spmgc2 = np.dot(m_sp, fb.m_trans)
holdon()
nx = 90# 90
plot(m_spmgc[nx,:],'-b')
plot(m_spmgc2[nx,:],'-r')
holdoff()
holdon()
nx = 90# 90
plot(np.log(m_sp[nx,:]),'-b')
plot(m_sp_mel[nx,:],'-r')
holdoff()
'''
'''
# Debug:
m_sp_db = la.db(m_sp)
m_sp_lq_db = la.db(la.mcep_to_sp(m_spmgc, nFFT))
m_sp_diff_db = m_sp_db - m_sp_lq_db
m_sp_diff_db[m_sp_diff_db < 0] = 0
v_sp_diff_db_mean = np.mean(m_sp_diff_db, axis=0)
v_div = np.sum(m_sp_diff_db > 0, axis=0)
v_sp_diff_db_mean2 = np.sum(m_sp_diff_db, axis=0) / v_div
#plotm(m_sp_lq_db)
#plotm(m_sp_log)
'''
'''
nx = 81
holdon()
plot(m_sp_db[nx,:], '-b')
plot(m_sp_lq_db[nx,:], '-r')
plot(v_sp_diff_db_mean, '-k')
plot(v_sp_diff_db_mean2, '-g')
holdoff()
'''
# Ph to MGC up to MVF:
nFFT = 2*(np.size(m_sp,1) - 1)
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
m_phs_shrt = m_phs[:,:mvf_bin]
m_phc_shrt = m_phc[:,:mvf_bin]
f_interps = interpolate.interp1d(np.arange(mvf_bin), m_phs_shrt, kind='cubic')
f_interpc = interpolate.interp1d(np.arange(mvf_bin), m_phc_shrt, kind='cubic')
m_phs_shrt_intrp = f_interps(np.linspace(0,mvf_bin-1,nFFThalf_ph))
m_phc_shrt_intrp = f_interpc(np.linspace(0,mvf_bin-1,nFFThalf_ph))
m_phs_mgc = la.sp_to_mcep(m_phs_shrt_intrp, in_type=1)
m_phc_mgc = la.sp_to_mcep(m_phc_shrt_intrp, in_type=1)
return m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, v_voi, fs
# mvf: Maximum voiced frequency for phase encoding
def analysis_with_del_comp_and_ph_encoding(v_in_sig, nFFT, fs, mvf):
m_sp, m_ph, v_shift = analysis_with_del_comp(v_in_sig, nFFT, fs)
# Phase encoding:
m_phs, m_phc = ph_enc(m_ph)
# Sp to MGC:
m_spmgc = la.sp_to_mcep(m_sp)
# Ph to MGC up to MVF:
#mvf = 4500
nFFT = 2*(np.size(m_sp,1) - 1)
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
m_phs_shrt = m_phs[:,:mvf_bin]
m_phc_shrt = m_phc[:,:mvf_bin]
f_interps = interpolate.interp1d(np.arange(mvf_bin), m_phs_shrt, kind='cubic')
f_interpc = interpolate.interp1d(np.arange(mvf_bin), m_phc_shrt, kind='cubic')
m_phs_shrt_intrp = f_interps(np.linspace(0,mvf_bin-1,nFFThalf_ph))
m_phc_shrt_intrp = f_interpc(np.linspace(0,mvf_bin-1,nFFThalf_ph))
m_phs_mgc = la.sp_to_mcep(m_phs_shrt_intrp, in_type=1)
m_phc_mgc = la.sp_to_mcep(m_phc_shrt_intrp, in_type=1)
return m_spmgc, m_phs_mgc, m_phc_mgc, v_shift
#==============================================================================
def synth_only_with_noise(m_sp, v_shift, v_voi, nFFT, fs, mvf, func_win_ana=np.hanning, ph_type='minph'):
# Inputs for now:
#func_win_ana = la.cos_win
#func_win_ana = np.hanning
#func_win_syn = la.cos_win
# TD Noise Gen:
v_pm = la.shift_to_pm(v_shift)
sig_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_noise = np.random.uniform(-1, 1, sig_len)
# Extract frames:
l_frames, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=func_win_ana)
# Frame-based processing:
nfrms = len(v_shift)
m_frm = np.zeros((nfrms,nFFT))
for i in xrange(nfrms):
v_sp = m_sp[i,:]
# Debug:
#v_sp = m_sp[30,:]
m_frm[i,:] = la.stamp_mag_sp(l_frames[i], v_sp, v_shift[i], ph_type='minph')
# Debug:
#m_frm[i,:] = la.stamp_mag_sp(l_frames[i], v_sp, v_shift[i], ph_type='linph')
v_sig = la.ola(m_frm, v_pm)
# Debug:
'''
holdon()
max = 129000
plot(v_noise[:max],'-b')
plot(v_sig[:max],'-r')
holdoff()
'''
'''
plotm(m_frm[298:313,:100])
plotm(la.db(m_sp))
'''
return v_sig
#==============================================================================
# Input: f0, instead of shifts (v_shift).
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp__ph_enc__from_f0(m_spmgc, m_phs, m_phc, v_f0, nFFT, fs, mvf, ph_hf_gen, v_voi='estim'):
v_shift = f0_to_shift(v_f0, fs)
v_syn_sig = synthesis_with_del_comp_and_ph_encoding(m_spmgc, m_phs, m_phc, v_shift, nFFT, fs, mvf, ph_hf_gen, v_voi=v_voi)
# Debug:
#v_syn_sig = synthesis_with_del_comp_and_ph_encoding_voi_unv_separated(m_spmgc, m_phs, m_phc, v_shift, v_voi, nFFT, fs, mvf, ph_hf_gen)
return v_syn_sig
#==============================================================================
'''
#==============================================================================
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding_voi_unv_separated(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, v_voi, nFFT, fs, mvf, ph_hf_gen="rand_mask"):
# 1.-Magnitude Spectrum:---------------------------------------------------
# MGC to SP:
m_sp_syn = la.mcep_to_sp(m_spmgc, nFFT)
# 2.-Deterministic Phase:--------------------------------------------------
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Generate phase up to Nyquist:
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
# Phase decoding:
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# 3.-Aperiodic Signal:-----------------------------------------------------
# Getting aperiodicity mask:
m_ph_ap_mask = get_ap_mask_from_uv_decision(v_voi, nFFT, fs, mvf)
# Gen aperiodic phase:
if ph_hf_gen is 'template_mask':
m_ap_ph = gen_rand_phase_by_template('../database/ph_template_1.npy',nfrms, nFFThalf)
elif ph_hf_gen is 'rand_mask':
m_ap_ph = np.random.uniform(-np.pi, np.pi, size=(nfrms,nFFThalf))
# Synth of Aperiodic Areas:------------------------------------------------
v_ap_win = np.zeros(nFFT)
mid_frm_nx = nFFT / 2
v_shift = lu.round_to_int(v_shift)
v_ap_sig = np.zeros(np.sum(v_shift[:-1]) + nFFT + 1)
strt = 0
for f in xrange(nfrms-1):
# From Phase to Time-Domain:
v_ap_ph = m_ap_ph[f,:]
v_ap_ph = la.add_hermitian_half(v_ap_ph[None,:], data_type='phase')[0]
v_sp_comp = np.exp(v_ap_ph * 1j)
v_ap_frm = np.fft.ifft(v_sp_comp).real
# Windowing:
v_ap_win[:] = 0 # reset
v_curr_win_shrt = la.gen_wider_window(np.hanning,v_shift[f], v_shift[f+1], 0.15) # 0.15: value obtained empirically
v_ap_win[(mid_frm_nx-v_shift[f]):(mid_frm_nx+v_shift[f+1])] = v_curr_win_shrt
v_ap_frm = v_ap_frm * v_ap_win
# To frequency domain - again:
v_sp_comp = np.fft.fft(v_ap_frm)
v_curr_ph = np.angle(v_sp_comp)
# Magnitude Spectrum Stamping:
v_targ_sp = m_sp_syn[f,:] * m_ph_ap_mask[f,:]
v_sp_comp = la.add_hermitian_half(v_targ_sp[None,:])[0] * np.exp(v_curr_ph * 1j)
v_ap_frm = np.fft.ifft(v_sp_comp).real
# Window again:
rms_prev = np.sqrt(np.mean(v_ap_frm**2))
v_ap_frm = v_ap_frm * v_ap_win
rms_after = np.sqrt(np.mean(v_ap_frm**2))
v_ap_frm = v_ap_frm * rms_prev / rms_after
# OLA:
v_ap_sig[strt:(strt+nFFT)] += v_ap_frm
strt += v_shift[f]
v_ap_sig = v_ap_sig[(nFFT/2 - v_shift[0]):]
# Deterministic Signal:----------------------------------------------------
m_ph_det_syn = m_ph_deter * (1 - m_ph_ap_mask)
m_sp_det_syn = m_sp_syn * (1 - m_ph_ap_mask)
# Final Synthesis:
v_det_sig = synthesis_with_del_comp(m_sp_det_syn, m_ph_det_syn, v_shift)
return v_det_sig + v_ap_sig
'''
#==============================================================================
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding_voi_unv_separated(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, v_voi, nFFT, fs, mvf, ph_hf_gen="rand_mask"):
# 1.-Magnitude Spectrum:---------------------------------------------------
# MGC to SP:
m_sp_syn = la.mcep_to_sp(m_spmgc, nFFT)
# 2.-Deterministic Phase:--------------------------------------------------
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Generate phase up to Nyquist:
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
# Phase decoding:
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# 3.-Aperiodic Signal:-----------------------------------------------------
# Getting aperiodicity mask:
m_ph_ap_mask = get_ap_mask_from_uv_decision(v_voi, nFFT, fs, mvf, fade_len=1)
# Apply ap mask (PRUE):
m_sp_ap_syn = m_sp_syn * m_ph_ap_mask
#m_sp_ap_syn = m_sp_syn
# Synth of Aperiodic Areas:------------------------------------------------
v_ap_sig = synth_only_with_noise(m_sp_ap_syn, v_shift, v_voi, nFFT, fs, mvf, func_win_ana=la.cos_win)
# Deterministic Signal:----------------------------------------------------
m_ph_det_syn = m_ph_deter * (1 - m_ph_ap_mask)
m_sp_det_syn = m_sp_syn * (1 - m_ph_ap_mask)
# Final Synthesis:
v_det_sig = synthesis_with_del_comp(m_sp_det_syn, m_ph_det_syn, v_shift)
# Debug:
'''
play(v_ap_sig, fs)
play(v_ap_sig + v_det_sig, fs)
'''
return v_det_sig + v_ap_sig
#==============================================================================
# v2: Improved phase generation.
# v3: specific window handling for aperiodic spectrum in voiced segments.
# v4: Splitted window support
# v5: Works with new fft params: mag_mel_log, real_mel, and imag_mel
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
# hf_slope_coeff: 1=no slope, 2=finishing with twice the energy at highest frequency.
def synthesis_with_del_comp_and_ph_encoding5(m_mag_mel_log, m_real_mel, m_imag_mel, v_f0, nfft, fs, mvf, f0_type='lf0', hf_slope_coeff=1.0, b_use_ap_voi=True, b_voi_ap_win=True):
if f0_type=='lf0':
v_f0 = np.exp(v_f0)
# Debug:
'''
vb_voi = v_f0 > 1
v_f02 = np.zeros(len(v_f0))
#v_f02[vb_voi] = signal.medfilt(v_f0[vb_voi], kernel_size=37)
L = 20
v_win = np.hanning(L)
v_f02[vb_voi] = np.convolve(v_f0[vb_voi], v_win / np.sum(v_win), mode='same')
v_f0 = v_f02
#v_f02 = vb_voi * signal.medfilt(v_f0, kernel_size=11)
'''
'''
holdon()
nx = 9
plot(v_f0, '-b')
plot(v_f02, '-r')
holdoff()
'''
nfrms, ncoeffs_mag = m_mag_mel_log.shape
ncoeffs_comp = m_real_mel.shape[1]
nfft_half = nfft / 2 + 1
# Magnitude mel-unwarp:----------------------------------------------------
m_mag = np.exp(la.sp_mel_unwarp(m_mag_mel_log, nfft_half, alpha=0.77, in_type='log'))
# Complex mel-unwarp:------------------------------------------------------
f_intrp_real = interpolate.interp1d(np.arange(ncoeffs_comp), m_real_mel, kind='nearest', fill_value='extrapolate')
f_intrp_imag = interpolate.interp1d(np.arange(ncoeffs_comp), m_imag_mel, kind='nearest', fill_value='extrapolate')
m_real_mel = f_intrp_real(np.arange(ncoeffs_mag))
m_imag_mel = f_intrp_imag(np.arange(ncoeffs_mag))
m_real = la.sp_mel_unwarp(m_real_mel, nfft_half, alpha=0.77, in_type='log')
m_imag = la.sp_mel_unwarp(m_imag_mel, nfft_half, alpha=0.77, in_type='log')
# Noise Gen:---------------------------------------------------------------
v_shift = f0_to_shift(v_f0, fs, unv_frm_rate_ms=5).astype(int)
v_pm = la.shift_to_pm(v_shift)
ns_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_ns = np.random.uniform(-1, 1, ns_len)
# Noise Windowing:---------------------------------------------------------
l_ns_win_funcs = [ np.hanning ] * nfrms
vb_voi = v_f0 > 1 # case voiced (1 is used for safety)
if b_voi_ap_win:
for i in xrange(nfrms):
if vb_voi[i]:
l_ns_win_funcs[i] = voi_noise_window
l_frm_ns, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_ns, v_pm, win_func=l_ns_win_funcs) # Checkear!!
m_frm_ns = la.frm_list_to_matrix(l_frm_ns, v_shift, nfft)
m_frm_ns = np.fft.fftshift(m_frm_ns, axes=1)
m_ns_cmplx = la.remove_hermitian_half(np.fft.fft(m_frm_ns))
# AP-Mask:-----------------------------------------------------------------
cf = 5000 #5000
bw = 2000 #2000
# Norm gain:
m_ns_mag = np.absolute(m_ns_cmplx)
rms_noise = np.sqrt(np.mean(m_ns_mag**2)) # checkear!!!!
m_ap_mask = np.ones(m_ns_mag.shape)
m_ap_mask = m_mag * m_ap_mask / rms_noise
m_zeros = np.zeros((nfrms, nfft_half))
m_ap_mask[vb_voi,:] = la.spectral_crossfade(m_zeros[vb_voi,:], m_ap_mask[vb_voi,:], cf, bw, fs, freq_scale='hz')
# HF - enhancement:
v_slope = np.linspace(1, hf_slope_coeff, num=nfft_half)
m_ap_mask[~vb_voi,:] = m_ap_mask[~vb_voi,:] * v_slope
# Det-Mask:----------------------------------------------------------------
m_det_mask = m_mag
m_det_mask[~vb_voi,:] = 0
m_det_mask[vb_voi,:] = la.spectral_crossfade(m_det_mask[vb_voi,:], m_zeros[vb_voi,:], cf, bw, fs, freq_scale='hz')
# Applying masks:----------------------------------------------------------
m_ap_cmplx = m_ap_mask * m_ns_cmplx
m_det_cmplx = m_real + m_imag * 1j
m_det_cmplx = m_det_mask * m_det_cmplx / np.absolute(m_det_cmplx)
# bin width: bw=11.71875 Hz
# Final synth:-------------------------------------------------------------
m_syn_cmplx = la.add_hermitian_half(m_ap_cmplx + m_det_cmplx, data_type='complex')
m_syn_td = np.fft.ifft(m_syn_cmplx).real
m_syn_td = np.fft.fftshift(m_syn_td, axes=1)
v_syn_sig = la.ola(m_syn_td, v_pm, win_func=None)
# HPF:---------------------------------------------------------------------
fc = 60
order = 4
fc_norm = fc / (fs / 2.0)
bc, ac = signal.ellip(order,0.5 , 80, fc_norm, btype='highpass')
v_syn_sig = signal.lfilter(bc, ac, v_syn_sig)
# Debug:
'''
holdon()
plot(la.db(m_mag[264,:]), '-b')
plot(la.db(m_mag_syn[264,:]), '-r')
plot(10 * m_ph[264,:], '-k')
plot(10 * m_ph_ns[264,:], '.-g')
holdoff()
holdon()
plot(la.db(m_mag[264,:]), '-b')
plot(la.db(m_mag_syn[264,:]), '-r')
plot(10 * m_ph[264,:], '-k')
plot(10 * m_ph_syn[264,:], '-g')
holdoff()
holdon()
plot(la.db(m_mag[264,:]), '-b')
plot(la.db(m_mag_syn[264,:]), '-r')
plot(la.db(m_mag[265,:]), '-k')
plot(la.db(m_mag_syn[265,:]), '-g')
holdoff()
holdon()
plot(la.db(m_mag[264,:]), '-b')
plot(la.db(m_mag[265,:]), '-r')
holdoff()
holdon()
plot(m_ph_syn[264,:], '-b')
plot(m_ph_syn[265,:], '-r')
holdoff()
'''
# la.write_audio_file(out_dir + '/' + filename + suffix + '.wav', v_sig_syn, fs)
return v_syn_sig
'''
def synthesis_with_del_comp_and_ph_encoding5(m_mag_mel_log, m_real_mel, m_imag_mel, v_f0, nfft, fs, mvf, f0_type='lf0', hf_slope_coeff=1.0, b_use_ap_voi=True, b_voi_ap_win=True):
if f0_type=='lf0':
v_f0 = np.exp(v_f0)
nfrms, ncoeffs_mag = m_mag_mel_log.shape
ncoeffs_comp = m_real_mel.shape[1]
nfft_half = nfft / 2 + 1
# Magnitude mel-unwarp:----------------------------------------------------
m_mag = np.exp(la.sp_mel_unwarp(m_mag_mel_log, nfft_half, alpha=0.77, in_type='log'))
# Complex mel-unwarp:------------------------------------------------------
f_intrp_real = interpolate.interp1d(np.arange(ncoeffs_comp), m_real_mel, kind='nearest', fill_value='extrapolate')
f_intrp_imag = interpolate.interp1d(np.arange(ncoeffs_comp), m_imag_mel, kind='nearest', fill_value='extrapolate')
m_real_mel = f_intrp_real(np.arange(ncoeffs_mag))
m_imag_mel = f_intrp_imag(np.arange(ncoeffs_mag))
m_real = la.sp_mel_unwarp(m_real_mel, nfft_half, alpha=0.77, in_type='log')
m_imag = la.sp_mel_unwarp(m_imag_mel, nfft_half, alpha=0.77, in_type='log')
# Noise Gen:---------------------------------------------------------------
v_shift = f0_to_shift(v_f0, fs, unv_frm_rate_ms=5).astype(int)
v_pm = la.shift_to_pm(v_shift)
ns_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_ns = np.random.uniform(-1, 1, ns_len)
# Noise Windowing:---------------------------------------------------------
l_ns_win_funcs = [ np.hanning ] * nfrms
vb_voi = v_f0 > 1 # case voiced (1 is used for safety)
if b_voi_ap_win:
for i in xrange(nfrms):
if vb_voi[i]:
l_ns_win_funcs[i] = voi_noise_window
l_frm_ns, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_ns, v_pm, win_func=l_ns_win_funcs) # Checkear!!
m_frm_ns = la.frm_list_to_matrix(l_frm_ns, v_shift, nfft)
m_frm_ns = np.fft.fftshift(m_frm_ns, axes=1)
m_ns_cmplx = la.remove_hermitian_half(np.fft.fft(m_frm_ns))
m_ns_mag, m_ns_real, m_ns_imag = get_fft_params_from_complex_data(m_ns_cmplx)
# Norm:
rms_noise = np.sqrt(np.mean(m_ns_mag**2)) # checkear!!!!
m_ns_mag = m_ns_mag / rms_noise
# HF - enhancement:
v_slope = np.linspace(1, hf_slope_coeff, num=nfft_half)
m_ns_mag[~vb_voi,:] = m_ns_mag[~vb_voi,:] * v_slope
# Merge data:--------------------------------------------------------------
cf_mag = 5000 #5000
bw_mag = 2000 #2000
cf_cmpx = cf_mag #5000
bw_cmpx = bw_mag #2000
# Alloc:
m_mag_ap = np.zeros((nfrms, nfft_half))
m_mag_det = np.zeros((nfrms, nfft_half))
# Working:
m_ph = np.angle(m_real + m_imag *1j)
m_ph_ns = np.angle(m_ns_real + m_ns_imag *1j)
m_ph_ap = m_ph_ns
m_ph_det = m_ph
m_mag_zeros = np.zeros((nfrms, nfft_half))
if b_use_ap_voi:
# Mag - ap:
m_mag_ap[vb_voi,:] = la.spectral_crossfade(m_mag_zeros[vb_voi,:], m_mag[vb_voi,:] * m_ns_mag[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
m_mag_ap[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
#-------------------------------------------------------------------------------
# Mag - det:
m_mag_det[vb_voi,:] = la.spectral_crossfade(m_mag[vb_voi,:], m_mag_zeros[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
else: # Check:
# Mag - ap:
m_mag_ap[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
# Mag - det:
m_mag_det[vb_voi,:] = m_mag[vb_voi,:]
# Debug:
m_syn_cmplx = m_mag_ap * np.exp(m_ph_ap * 1j) + m_mag_det * np.exp(m_ph_det * 1j)
m_syn_cmplx = la.add_hermitian_half(m_syn_cmplx , data_type='complex')
# bin width: bw=11.71875 Hz
# Final synth:-------------------------------------------------------------
m_syn_td = np.fft.ifft(m_syn_cmplx).real
m_syn_td = np.fft.fftshift(m_syn_td, axes=1)
v_syn_sig = la.ola(m_syn_td, v_pm, win_func=None)
# HPF:---------------------------------------------------------------------
fc = 60
order = 4
fc_norm = fc / (fs / 2.0)
bc, ac = signal.ellip(order,0.5 , 80, fc_norm, btype='highpass')
v_syn_sig = signal.lfilter(bc, ac, v_syn_sig)
# la.write_audio_file(out_dir + '/' + filename + suffix + '.wav', v_sig_syn, fs)
return v_syn_sig
'''
'''
def synthesis_with_del_comp_and_ph_encoding5(m_mag_mel_log, m_real_mel, m_imag_mel, v_f0, nfft, fs, mvf, f0_type='lf0', hf_slope_coeff=1.0, b_use_ap_voi=True, b_voi_ap_win=True):
if f0_type=='lf0':
v_f0 = np.exp(v_f0)
nfrms, ncoeffs_mag = m_mag_mel_log.shape
ncoeffs_comp = m_real_mel.shape[1]
nfft_half = nfft / 2 + 1
# Magnitude mel-unwarp:----------------------------------------------------
m_mag = np.exp(la.sp_mel_unwarp(m_mag_mel_log, nfft_half, alpha=0.77, in_type='log'))
# Complex mel-unwarp:------------------------------------------------------
f_intrp_real = interpolate.interp1d(np.arange(ncoeffs_comp), m_real_mel, kind='nearest', fill_value='extrapolate')
f_intrp_imag = interpolate.interp1d(np.arange(ncoeffs_comp), m_imag_mel, kind='nearest', fill_value='extrapolate')
m_real_mel = f_intrp_real(np.arange(ncoeffs_mag))
m_imag_mel = f_intrp_imag(np.arange(ncoeffs_mag))
# Debug:-------------------------------------------------------------------
#m_real_mel = np.pad(m_real_mel, ((0,0),(0,ncoeffs_mag-ncoeffs_comp)), 'constant', constant_values=0)
#m_imag_mel = np.pad(m_imag_mel, ((0,0),(0,ncoeffs_mag-ncoeffs_comp)), 'constant', constant_values=0)
m_real = la.sp_mel_unwarp(m_real_mel, nfft_half, alpha=0.77, in_type='log')
m_imag = la.sp_mel_unwarp(m_imag_mel, nfft_half, alpha=0.77, in_type='log')
# Debug:-------------------------------------------------------------------
#m_cmpx_orig_mag = np.absolute(m_real + m_imag * 1j)
#m_real = m_real / m_cmpx_orig_mag
#m_imag = m_imag / m_cmpx_orig_mag
# Noise Gen:---------------------------------------------------------------
v_shift = f0_to_shift(v_f0, fs, unv_frm_rate_ms=5).astype(int)
v_pm = la.shift_to_pm(v_shift)
ns_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_ns = np.random.uniform(-1, 1, ns_len)
# Noise Windowing:---------------------------------------------------------
l_ns_win_funcs = [ np.hanning ] * nfrms
vb_voi = v_f0 > 1 # case voiced (1 is used for safety)
if b_voi_ap_win:
for i in xrange(nfrms):
if vb_voi[i]:
l_ns_win_funcs[i] = voi_noise_window
l_frm_ns, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_ns, v_pm, win_func=l_ns_win_funcs) # Checkear!!
m_frm_ns = la.frm_list_to_matrix(l_frm_ns, v_shift, nfft)
m_frm_ns = np.fft.fftshift(m_frm_ns, axes=1)
m_ns_cmplx = la.remove_hermitian_half(np.fft.fft(m_frm_ns))
m_ns_mag, m_ns_real, m_ns_imag = get_fft_params_from_complex_data(m_ns_cmplx)
# Norm:
rms_noise = np.sqrt(np.mean(m_ns_mag**2)) # checkear!!!!
m_ns_mag = m_ns_mag / rms_noise
# HF - enhancement:
v_slope = np.linspace(1, hf_slope_coeff, num=nfft_half)
m_ns_mag[~vb_voi,:] = m_ns_mag[~vb_voi,:] * v_slope
# Merge data:--------------------------------------------------------------
#cf_mag = 5000 #5000
#bw_mag = 2000 #2000
cf_mag = 6000 #5000
bw_mag = 4000 #2000
cf_cmpx = cf_mag #5000
bw_cmpx = bw_mag #2000
# Alloc:
m_mag_syn = np.ones((nfrms, nfft_half))
m_real_syn = np.zeros((nfrms, nfft_half))
m_imag_syn = np.zeros((nfrms, nfft_half))
if b_use_ap_voi:
# Mag:
m_mag_syn[vb_voi,:] = la.spectral_crossfade(m_mag[vb_voi,:], m_mag[vb_voi,:] * m_ns_mag[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
m_mag_syn[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
#Compx - Voi:
m_real_syn[vb_voi,:] = la.spectral_crossfade(m_real[vb_voi,:], m_ns_real[vb_voi,:], cf_cmpx, bw_cmpx, fs, freq_scale='hz')
m_imag_syn[vb_voi,:] = la.spectral_crossfade(m_imag[vb_voi,:], m_ns_imag[vb_voi,:], cf_cmpx, bw_cmpx, fs, freq_scale='hz')
#Compx - Unv:
m_real_syn[~vb_voi,:] = m_ns_real[~vb_voi,:]
m_imag_syn[~vb_voi,:] = m_ns_imag[~vb_voi,:]
else:
# Mag:
m_mag_syn[vb_voi,:] = m_mag[vb_voi,:]
m_mag_syn[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
# Compx - Voi:
m_real_syn[vb_voi,:] = m_real[vb_voi,:]
m_imag_syn[vb_voi,:] = m_imag[vb_voi,:]
# Compx - Unv:
m_real_syn[~vb_voi,:] = m_ns_real[~vb_voi,:]
m_imag_syn[~vb_voi,:] = m_ns_imag[~vb_voi,:]
# Final synth:-------------------------------------------------------------
# Debug:--------------------------------------------------
g = (m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j) / m_cmpx_mag
m_g_mag = np.absolute(g)
m_g_ph = np.angle(g)
#m_ph = np.angle(m_real_syn + m_imag_syn *1j)
#m_syn = m_mag_syn * np.exp(m_ph * 1j)
#m_syn = la.add_hermitian_half(m_syn, data_type='complex')
#m_syn = la.add_hermitian_half(m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j, data_type='complex')
#------------------------------------------------------------------------
m_cmpx_mag = np.absolute(m_real_syn + m_imag_syn * 1j)
m_syn = la.add_hermitian_half((m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j) / m_cmpx_mag, data_type='complex')
m_syn = np.fft.ifft(m_syn).real
m_syn = np.fft.fftshift(m_syn, axes=1)
v_sig_syn = la.ola(m_syn, v_pm, win_func=None)
# HPF:---------------------------------------------------------------------
fc = 60
order = 4
fc_norm = fc / (fs / 2.0)
bc, ac = signal.ellip(order,0.5 , 80, fc_norm, btype='highpass')
v_sig_syn = signal.lfilter(bc, ac, v_sig_syn)
return v_sig_syn, m_syn, m_mag_syn, m_real_syn, m_imag_syn
'''
#==============================================================================
# v2: Improved phase generation.
# v3: specific window handling for aperiodic spectrum in voiced segments.
# v4: Splitted window support
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding4(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, nFFT, fs, mvf, v_voi, b_medfilt=False, win_func=None):
#Protection:
v_shift = v_shift.astype(int)
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp_sptk(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp_sptk(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Deterministic Phase decoding:----------------------
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
#m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# Debug:
f = interpolate.interp1d(np.arange(mvf_bin), m_ph_deter, kind='nearest', fill_value='extrapolate')
m_ph_deter = f(np.arange(nFFThalf))
# TD Noise Gen:---------------------------------------
v_pm = la.shift_to_pm(v_shift)
sig_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_noise = np.random.uniform(-1, 1, sig_len)
#v_noise = np.random.normal(size=sig_len)
# Extract noise magnitude and phase for unvoiced segments: (TODO: make it more efficient!)-------------------------------
win_func_unv = np.hanning
if win_func is la.cos_win:
win_func_unv = la.cos_win
l_frm_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=win_func_unv)
m_frm_noise = la.frm_list_to_matrix(l_frm_noise, v_shift, nFFT)
m_frm_noise = np.fft.fftshift(m_frm_noise, axes=1)
'''
# Debug - randomise sequence of noise frames (NO BORRAR!):
v_new_nx = np.random.randint(nfrms, size=nfrms)
m_frm_noise = m_frm_noise[v_new_nx,:]
#------------------------------------------
'''
m_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_noise))
m_noise_ph = np.angle(m_noise_sp)
m_noise_mag = np.absolute(m_noise_sp)
m_noise_mag_log = np.log(m_noise_mag)
# Noise amp-normalisation:
rms_noise = np.sqrt(np.mean(m_noise_mag**2))
m_noise_mag_log = m_noise_mag_log - np.log(rms_noise)
# Extract noise magnitude and phase for voiced segments: (TODO: make it more efficient!)-------------------------------------
l_frm_voi_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=voi_noise_window)
m_frm_voi_noise = la.frm_list_to_matrix(l_frm_voi_noise, v_shift, nFFT)
m_frm_voi_noise = np.fft.fftshift(m_frm_voi_noise, axes=1)
m_voi_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_voi_noise))
m_voi_noise_ph = np.angle(m_voi_noise_sp)
m_voi_noise_mag = np.absolute(m_voi_noise_sp)
m_voi_noise_mag_log = np.log(m_voi_noise_mag)
# Noise amp-normalisation:
rms_voi_noise = np.sqrt(np.mean(m_voi_noise_mag**2))
m_voi_noise_mag_log = m_voi_noise_mag_log - np.log(rms_voi_noise)
#------------------------------------------------------------------------------------------------------------------------------
# ap mask:
v_voi_mask = np.clip(v_voi, 0, 1)
# target sp from mgc:
m_sp_targ = la.mcep_to_sp_sptk(m_spmgc, nFFT)
# medfilt:
if b_medfilt:
m_sp_targ = signal.medfilt(m_sp_targ, kernel_size=[3,1])
'''
# Debug - Minimum phase filter for ap signal (NO BORRAR!):
m_sp_comp_mph = la.sp_to_min_phase(m_sp_targ, in_type='sp')
m_sp_ph_mph = np.angle(m_sp_comp_mph)
m_noise_ph = m_noise_ph + m_sp_ph_mph
m_voi_noise_ph = m_voi_noise_ph + m_sp_ph_mph
'''
# Alloc:
m_frm_syn = np.zeros((nfrms, nFFT))
m_mag_syn = np.zeros((nfrms, nFFThalf)) # just for debug
m_mag = np.zeros((nfrms, nFFThalf)) # just for debug
# Spectral crossfade constants (TODO: Improve this):
muf = 3500 # "minimum unvoiced freq."
bw = (mvf - muf) - 20 # values found empirically. assuming mvf > 4000
cut_off = (mvf + muf) / 2
v_zeros = np.zeros((1,nFFThalf))
# Iterates through frames:
for i in xrange(nfrms):
if v_voi_mask[i] == 1: # voiced case
# Magnitude:-----------------------------------------
v_mag_log = m_voi_noise_mag_log[i,:]
v_mag_log = la.spectral_crossfade(v_zeros, v_mag_log[None,:], cut_off, bw, fs, freq_scale='hz')[0]
# Debug:
v_mag_log = np.squeeze(v_zeros)
# Phase:--------------------------------------------
v_ph = la.spectral_crossfade(m_ph_deter[None, i,:], m_voi_noise_ph[None,i,:], cut_off, bw, fs, freq_scale='hz')[0]
# Debug:
v_ph_deters, v_ph_deterc = ph_enc(m_ph_deter[i,:])
v_voi_noise_phs, v_voi_noise_phc = ph_enc(m_voi_noise_ph[i,:])
v_phsA = la.spectral_crossfade(v_ph_deters[None,:], v_voi_noise_phs[None,:], 5000, 2000, fs, freq_scale='hz')[0]
v_phcA = la.spectral_crossfade(v_ph_deterc[None,:], v_voi_noise_phc[None,:], 5000, 2000, fs, freq_scale='hz')[0]
v_ph = ph_dec(v_phsA, v_phcA)
#v_ph = m_ph_deter[i,:]
'''
holdon()
plot(v_ph_deters, '.-b')
plot(v_voi_noise_phs, '.-r')
plot(v_phsA, '.-k')
holdoff()
'''
'''
holdon()
plot(m_ph_deter[None, i,:], '.-b')
plot(m_voi_noise_ph[None,i,:], '.-r')
plot(v_ph, '.-k')
holdoff()
'''
elif v_voi_mask[i] == 0: # unvoiced case
# Magnitude:---------------------------------------
v_mag_log = m_noise_mag_log[i,:]
# Debug:
v_mag_log = np.squeeze(v_zeros)
# Phase:--------------------------------------------
v_ph = m_noise_ph[i,:]
# To complex:
m_mag[i,:] = np.exp(v_mag_log) # just for debug
v_mag = np.exp(v_mag_log) * m_sp_targ[i,:]
v_sp = v_mag * np.exp(v_ph * 1j)
v_sp = la.add_hermitian_half(v_sp[None,:], data_type='complex')
'''
# Debug:
holdon()
plot(np.log(m_sp_targ[i,:]), '.-b')
plot(v_mag_log, '.-r')
plot(np.log(v_mag), '.-k')
plot(m_voi_noise_mag_log[i,:], '-b')
holdoff()
'''
# Save:
#print(i)
m_mag_syn[i,:] = v_mag # for inspection
m_frm_syn[i,:] = np.fft.fftshift(np.fft.ifft(v_sp).real)
v_sig_syn = la.ola(m_frm_syn, v_pm, win_func=win_func)
return v_sig_syn, m_frm_syn, m_mag_syn, m_sp_targ, m_frm_noise, m_frm_voi_noise, m_mag
#==============================================================================
# v3: specific window handling for aperiodic spectrum in voiced segments.
# v2: Improved phase generation.
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding3(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, nFFT, fs, mvf, v_voi, b_medfilt=False):
#Protection:
v_shift = v_shift.astype(int)
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Deterministic Phase decoding:----------------------
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# TD Noise Gen:---------------------------------------
v_pm = la.shift_to_pm(v_shift)
sig_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_noise = np.random.uniform(-1, 1, sig_len)
#v_noise = np.random.normal(size=sig_len)
# Extract noise magnitude and phase for unvoiced segments: (TODO: make it more efficient!)-------------------------------
l_frm_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=np.hanning)
m_frm_noise = la.frm_list_to_matrix(l_frm_noise, v_shift, nFFT)
m_frm_noise = np.fft.fftshift(m_frm_noise, axes=1)
'''
# Debug - randomise sequence of noise frames (NO BORRAR!):
v_new_nx = np.random.randint(nfrms, size=nfrms)
m_frm_noise = m_frm_noise[v_new_nx,:]
#------------------------------------------
'''
m_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_noise))
m_noise_ph = np.angle(m_noise_sp)
m_noise_mag = np.absolute(m_noise_sp)
m_noise_mag_log = np.log(m_noise_mag)
# Noise amp-normalisation:
rms_noise = np.sqrt(np.mean(m_noise_mag**2))
m_noise_mag_log = m_noise_mag_log - np.log(rms_noise)
# Extract noise magnitude and phase for voiced segments: (TODO: make it more efficient!)-------------------------------------
l_frm_voi_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=voi_noise_window)
m_frm_voi_noise = la.frm_list_to_matrix(l_frm_voi_noise, v_shift, nFFT)
m_frm_voi_noise = np.fft.fftshift(m_frm_voi_noise, axes=1)
m_voi_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_voi_noise))
m_voi_noise_ph = np.angle(m_voi_noise_sp)
m_voi_noise_mag = np.absolute(m_voi_noise_sp)
m_voi_noise_mag_log = np.log(m_voi_noise_mag)
# Noise amp-normalisation:
rms_voi_noise = np.sqrt(np.mean(m_voi_noise_mag**2))
m_voi_noise_mag_log = m_voi_noise_mag_log - np.log(rms_voi_noise)
#------------------------------------------------------------------------------------------------------------------------------
# ap mask:
v_voi_mask = np.clip(v_voi, 0, 1)
# target sp from mgc:
m_sp_targ = la.mcep_to_sp(m_spmgc, nFFT)
# medfilt:
if b_medfilt:
m_sp_targ = signal.medfilt(m_sp_targ, kernel_size=[3,1])
'''
# Debug - Minimum phase filter for ap signal (NO BORRAR!):
m_sp_comp_mph = la.sp_to_min_phase(m_sp_targ, in_type='sp')
m_sp_ph_mph = np.angle(m_sp_comp_mph)
m_noise_ph = m_noise_ph + m_sp_ph_mph
m_voi_noise_ph = m_voi_noise_ph + m_sp_ph_mph
'''
# Alloc:
m_frm_syn = np.zeros((nfrms, nFFT))
m_mag_syn = np.zeros((nfrms, nFFThalf)) # just for debug
m_mag = np.zeros((nfrms, nFFThalf)) # just for debug
# Spectral crossfade constants (TODO: Improve this):
muf = 3500 # "minimum unvoiced freq."
bw = (mvf - muf) - 20 # values found empirically. assuming mvf > 4000
cut_off = (mvf + muf) / 2
v_zeros = np.zeros((1,nFFThalf))
# Iterates through frames:
for i in xrange(nfrms):
if v_voi_mask[i] == 1: # voiced case
# Magnitude:
v_mag_log = m_voi_noise_mag_log[i,:]
v_mag_log = la.spectral_crossfade(v_zeros, v_mag_log[None,:], cut_off, bw, fs, freq_scale='hz')[0]
# Phase:
#v_ph = la.spectral_crossfade(m_ph_deter[None, i,:], m_noise_ph[None,i,:], cut_off, bw, fs, freq_scale='hz')[0]
v_ph = la.spectral_crossfade(m_ph_deter[None, i,:], m_voi_noise_ph[None,i,:], cut_off, bw, fs, freq_scale='hz')[0]
elif v_voi_mask[i] == 0: # unvoiced case
# Magnitude:
v_mag_log = m_noise_mag_log[i,:]
# Phase:
v_ph = m_noise_ph[i,:]
# To complex:
m_mag[i,:] = np.exp(v_mag_log) # just for debug
v_mag = np.exp(v_mag_log) * m_sp_targ[i,:]
v_sp = v_mag * np.exp(v_ph * 1j)
v_sp = la.add_hermitian_half(v_sp[None,:], data_type='complex')
# Save:
#print(i)
m_mag_syn[i,:] = v_mag # for inspection
m_frm_syn[i,:] = np.fft.fftshift(np.fft.ifft(v_sp).real)
v_sig_syn = la.ola(m_frm_syn, v_pm)
return v_sig_syn, m_frm_syn, m_mag_syn, m_sp_targ, m_frm_noise, m_frm_voi_noise, m_mag
#==============================================================================
# v2: Improved phase generation.
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding2(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, nFFT, fs, mvf, v_voi, win_func=np.hanning):
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Deterministic Phase decoding:----------------------
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# Estimating aperiodicity mask:-----------------------
#m_ph_ap_mask = get_ap_mask_from_uv_decision(v_voi, nFFT, fs, mvf)
# TD Noise Gen:---------------------------------------
v_pm = la.shift_to_pm(v_shift)
sig_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_noise = np.random.uniform(-1, 1, sig_len)
#v_noise = np.random.normal(size=sig_len)
# Extract noise magnitude and phase:
l_frm_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=win_func)
m_frm_noise = la.frm_list_to_matrix(l_frm_noise, v_shift, nFFT)
m_frm_noise = np.fft.fftshift(m_frm_noise, axes=1)
m_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_noise))
m_noise_ph = np.angle(m_noise_sp)
m_noise_mag = np.absolute(m_noise_sp)
m_noise_mag_log = np.log(m_noise_mag)
# Debug:
'''
ga2 = np.fft.fftshift(m_frm_noise, axes=1)
nx = 114
holdon()
plot(m_frm_noise[nx,:], '-b')
plot(ga2[nx,:], '-r')
holdoff()
'''
# ap mask:
v_voi_mask = np.clip(v_voi, 0, 1)
# target sp from mgc:
m_sp_targ = la.mcep_to_sp(m_spmgc, nFFT)
# Debug:
#v_voi_mask[:] = 0
# m_noise_ph = gen_rand_phase_by_template('../database/ph_template_1.npy',nfrms, nFFThalf)
# Minimum phase filter for ap signal:
#m_sp_targ = np.tile(m_sp_targ[30,:], (nfrms,1))
m_sp_comp_mph = la.sp_to_min_phase(m_sp_targ, in_type='sp')
m_sp_ph_mph = np.angle(m_sp_comp_mph)
m_noise_ph = m_noise_ph + m_sp_ph_mph
# Alloc:
m_frm_syn = np.zeros((nfrms, nFFT))
m_mag_syn = np.zeros((nfrms, nFFThalf)) # just for debug
# Noise amp-normalisation:
'''
mag_ave = np.mean(m_noise_mag_log)
m_noise_mag_log -= mag_ave
'''
rms_noise = np.sqrt(np.mean(m_noise_mag**2))
m_noise_mag_log = m_noise_mag_log - np.log(rms_noise)
# Spectral crossfade constants (TODO: Improve this):
muf = 3500 # "minimum unvoiced freq."
bw = (mvf - muf) - 20 # values found empirically. assuming mvf > 4000
cut_off = (mvf + muf) / 2
v_zeros = np.zeros((1,nFFThalf))
# Iterates through frames:
for i in xrange(nfrms):
v_mag_log = m_noise_mag_log[i,:]
if v_voi_mask[i] == 1: # voiced case
# Magnitude:
#v_mag_log[:mvf_bin] = 0
v_mag_log = la.spectral_crossfade(v_zeros, v_mag_log[None,:], cut_off, bw, fs, freq_scale='hz')[0]
# Phase:
#v_ph = np.hstack((m_ph_deter[i,:], m_noise_ph[i,mvf_bin:]))
v_ph = la.spectral_crossfade(m_ph_deter[None, i,:], m_noise_ph[None,i,:], cut_off, bw, fs, freq_scale='hz')[0]
elif v_voi_mask[i] == 0: # unvoiced case
# Phase:
v_ph = m_noise_ph[i,:]
# To complex:
v_mag = np.exp(v_mag_log) * m_sp_targ[i,:]
#Debug:
#v_mag = np.exp(v_mag_log)
#v_mag = m_sp_targ[114,:]
v_sp = v_mag * np.exp(v_ph * 1j)
v_sp = la.add_hermitian_half(v_sp[None,:], data_type='complex')
# Save:
print(i)
m_mag_syn[i,:] = v_mag # for inspection
m_frm_syn[i,:] = np.fft.fftshift(np.fft.ifft(v_sp).real)
v_sig_syn = la.ola(m_frm_syn, v_pm)
# la.write_audio_file('hola.wav', v_sig, fs)
return v_sig_syn, m_frm_syn, m_mag_syn, m_sp_targ, m_frm_noise
#==============================================================================
#==============================================================================
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, nFFT, fs, mvf, ph_hf_gen="rand", v_voi='estim', win_func=np.hanning, win_flat_to_len=0.3):
# MGC to SP:
m_sp_syn = la.mcep_to_sp(m_spmgc, nFFT)
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Generate phase up to Nyquist:
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
if ph_hf_gen is 'rand':
m_phs_syn = np.hstack((m_phs_shrt_syn, np.random.uniform(-1, 1, size=(nfrms,nFFThalf-mvf_bin))))
m_phc_syn = np.hstack((m_phc_shrt_syn, np.random.uniform(-1, 1, size=(nfrms,nFFThalf-mvf_bin))))
# Phase decoding:
m_ph_syn = ph_dec(m_phs_syn, m_phc_syn)
elif ph_hf_gen is 'template_mask' or 'rand_mask':
# Deterministic Phase decoding:----------------------
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# Estimating aperiodicity mask:-----------------------
if v_voi is 'estim':
m_ph_ap_mask = estim_ap_mask_from_ph_data(m_phs_shrt_syn, nFFT, fs, mvf)
elif type(v_voi) is np.ndarray:
# Debug:
#v_voi[:] = 0
m_ph_ap_mask = get_ap_mask_from_uv_decision(v_voi, nFFT, fs, mvf)
# Gen aperiodic phase:--------------------------------
if ph_hf_gen is 'template_mask':
m_ap_ph = gen_rand_phase_by_template('../database/ph_template_1.npy',nfrms, nFFThalf)
elif ph_hf_gen is 'rand_mask':
m_ap_ph = np.random.uniform(-np.pi, np.pi, size=(nfrms,nFFThalf))
# Mix:
m_ph_syn = m_ap_ph * m_ph_ap_mask + m_ph_deter * (1 - m_ph_ap_mask)
# Final Synthesis:
v_syn_sig = synthesis_with_del_comp(m_sp_syn, m_ph_syn, v_shift, win_func=win_func, win_flat_to_len=win_flat_to_len)
# Debug:
#v_syn_sig = synthesis_with_del_comp_2(m_sp_syn, m_ph_syn, m_ph_ap_mask, v_shift)
return v_syn_sig
#==============================================================================
def get_ap_mask_from_uv_decision(v_voi, nFFT, fs, mvf, fade_len=40):
# Body:-------------------------------------
v_ph_ap_mask = 1 - np.clip(v_voi, 0, 1)
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
m_ph_ap_mask = np.tile(v_ph_ap_mask[:,None],[1,mvf_bin])
# Smoothing of the mask arounf mvf:
v_ramp = np.linspace(1,0,fade_len)
m_ph_ap_mask = 1 - m_ph_ap_mask
m_ph_ap_mask[:,-fade_len:] = m_ph_ap_mask[:,-fade_len:] * v_ramp
m_ph_ap_mask = 1 - m_ph_ap_mask
nfrms = len(v_voi)
nFFThalf = nFFT / 2 + 1
m_ph_ap_mask = np.hstack((m_ph_ap_mask, np.ones((nfrms,nFFThalf-mvf_bin))))
return m_ph_ap_mask
#==============================================================================
def estim_ap_mask_from_ph_data(m_mask_ref, nFFT, fs, mvf):
# Constants:
clip_range = [-28.1 , -10.3]
fade_len = 40
# Body:-------------------------------------------------
v_mask_ref = la.db(np.sqrt(np.mean(m_mask_ref**2,1)))
v_ph_ap_mask = -np.clip(v_mask_ref, clip_range[0], clip_range[1])
v_ph_ap_mask = (v_ph_ap_mask + clip_range[1]) / float(clip_range[1] - clip_range[0])
# Phase mask in 3D:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
m_ph_ap_mask = np.tile(v_ph_ap_mask[:,None],[1,mvf_bin])
# Smoothing of the mask arounf mvf:
v_ramp = np.linspace(1,0,fade_len)
m_ph_ap_mask = 1 - m_ph_ap_mask
m_ph_ap_mask[:,-fade_len:] = m_ph_ap_mask[:,-fade_len:] * v_ramp
m_ph_ap_mask = 1 - m_ph_ap_mask
nFFThalf = nFFT / 2 + 1
nfrms = np.size(m_mask_ref,0)
m_ph_ap_mask = np.hstack((m_ph_ap_mask, np.ones((nfrms,nFFThalf-mvf_bin))))
return m_ph_ap_mask
#==============================================================================
# Transform data from picth sync to constant rate in provided in ms.
def to_constant_rate(m_data, targ_shift_ms, v_shift, fs, interp_kind='linear'):
v_in_cntr_nxs = np.cumsum(v_shift)
in_est_sig_len = v_in_cntr_nxs[-1] + v_shift[-1] # Instead of using sig_len, it could be estimated like this
targ_shift_smpls = targ_shift_ms / 1000.0 * fs
v_targ_cntr_nxs = np.arange(targ_shift_smpls, in_est_sig_len, targ_shift_smpls) # checkear que el codigo DNN indexe los frames asi tamnbien!
v_targ_cntr_nxs = v_targ_cntr_nxs.astype(int)
# Interpolation:
f_interp = interpolate.interp1d(v_in_cntr_nxs, m_data, axis=0, fill_value='extrapolate', kind=interp_kind)
m_data = f_interp(v_targ_cntr_nxs)
return m_data
'''
def to_pitch_sync(m_data, shift_ms, v_shift_cons, fs, interp_kind='linear'):
nInfrms = np.size(m_data,0)
shift_smpls = shift_ms / 1000.0 * fs
est_sig_len = nInfrms * shift_smpls
v_in_cntr_nxs = np.arange(shift_smpls, est_sig_len, shift_smpls)
v_targ_cntr_nxs = v_targ_cntr_nxs.astype(int)
v_in_cntr_nxs = np.cumsum(v_shift_5ms)
in_est_sig_len = v_in_cntr_nxs[-1] + v_shift[-1] # Instead of using sig_len, it could be estimated like this
# Interpolation:
f_interp = interpolate.interp1d(v_in_cntr_nxs, m_data, axis=0, fill_value='extrapolate', kind=interp_kind)
m_data = f_interp(v_targ_cntr_nxs)
return
'''
# NOT FINISHED!!!!
def const_shifts_to_pitch_sync(v_const_lefts, shift_ms, fs, interp_kind='linear'):
nConstFrms = len(v_const_lefts)
shift_smpls = shift_ms / 1000.0 * fs
v_const_cntr_nxs = np.arange(1, nConstFrms+1) * shift_smpls
f_interp = interpolate.interp1d(v_const_cntr_nxs, v_const_lefts, axis=0, fill_value='extrapolate', kind=interp_kind)
#m_data = f_interp(v_targ_cntr_nxs)
v_shift = np.zeros(nConstFrms * 2) # Twice should be enough, although maybe not, check!!!
for con_left in v_const_lefts:
g=1
return
#==============================================================================
# v2: allows fine frame state position (adds relative position within the state as decimal number).
# shift file in samples
def frame_to_state_mapping2(shift_file, state_lab_file, fs, states_per_phone=5, b_refine=True):
#Read files:
v_shift = lu.read_binfile(shift_file, dim=1)
v_pm = la.shift_to_pm(v_shift)
m_state_times = np.loadtxt(state_lab_file, usecols=(0,1))
# to miliseconds:
v_pm_ms = 1000 * v_pm / fs
m_state_times_ms = m_state_times / 10000.0
# Compare:
nfrms = len(v_pm_ms)
v_st = np.zeros(nfrms) - 1 # init
for f in xrange(nfrms):
vb_greater = (v_pm_ms[f] >= m_state_times_ms[:,0]) # * (v_pm_ms[f] < m_state_times_ms[:,1])
state_nx = np.where(vb_greater)[0][-1]
v_st[f] = np.remainder(state_nx, states_per_phone)
# Refining:
if b_refine:
state_len_ms = m_state_times_ms[state_nx,1] - m_state_times_ms[state_nx,0]
fine_pos = ( v_pm_ms[f] - m_state_times_ms[state_nx,0] ) / state_len_ms
v_st[f] += fine_pos
# Protection against wrong ended label files:
np.clip(v_st, 0, states_per_phone, out=v_st)
return v_st
#==============================================================================
def frame_to_state_mapping(shift_file, lab_file, fs, states_per_phone=5):
#Read files:
v_shift = lu.read_binfile(shift_file, dim=1)
v_pm = la.shift_to_pm(v_shift)
m_state_times = np.loadtxt(lab_file, usecols=(0,1))
# to miliseconds:
v_pm_ms = 1000 * v_pm / fs
m_state_times_ms = m_state_times / 10000.0
# Compare:
nfrms = len(v_pm_ms)
v_st = np.zeros(nfrms) - 1 # init
for f in xrange(nfrms):
vb_greater = (v_pm_ms[f] >= m_state_times_ms[:,0]) # * (v_pm_ms[f] < m_state_times_ms[:,1])
state_nx = np.where(vb_greater)[0][-1]
v_st[f] = np.remainder(state_nx, states_per_phone)
return v_st
#==============================================================================
def get_n_frms_per_unit(v_shifts, in_lab_state_al_file, fs, unit_type='phone', n_sts_x_ph=5):
raise ValueError('Deprecated. Use "get_num_of_frms_per_phon_unit", instead')
return
#==============================================================================
# in_lab_aligned_file: in HTS format
# n_lines_x_unit: e.g., number of states per phoneme. (each state in one line)
# TODO: Change name of variables. e.g, states -> lines
# v_shift in samples.
# nfrms_tolerance: Maximum number of frames of difference between shifts and lab file allowed (Some times, the end of lab files is not acurately defined).
def get_num_of_frms_per_phon_unit(v_shift, in_lab_aligned_file, fs, n_lines_x_unit=5, nfrms_tolerance=1):
# Read lab file:
m_labs_state = np.loadtxt(in_lab_aligned_file, usecols=(0,1))
m_labs_state_ms = m_labs_state / 10000.0
# Epoch Indexes:
v_ep_nxs = np.cumsum(v_shift)
v_ep_nxs_ms = v_ep_nxs * 1000.0 / fs
# Get number of frames per state:
n_states = np.size(m_labs_state_ms,axis=0)
v_nfrms_x_state = np.zeros(n_states)
for st in xrange(n_states):
vb_to_right = (m_labs_state_ms[st,0] <= v_ep_nxs_ms)
vb_to_left = (v_ep_nxs_ms < m_labs_state_ms[st,1])
vb_inter = vb_to_right * vb_to_left
v_nfrms_x_state[st] = sum(vb_inter)
# Correct if there is only one frame of difference:
nfrms_diff = np.size(v_shift) - np.sum(v_nfrms_x_state)
if (nfrms_diff > 0) and (nfrms_diff <= nfrms_tolerance):
v_nfrms_x_state[-1] += nfrms_diff
# Checking number of frames:
if np.sum(v_nfrms_x_state) != np.size(v_shift):
raise ValueError('Total number of frames is different to the number of frames of the shifts.')
m_nfrms_x_ph = np.reshape(v_nfrms_x_state, (n_states/n_lines_x_unit, n_lines_x_unit) )
v_nfrms_x_ph = np.sum(m_nfrms_x_ph, axis=1)
# Checking that the number of frames per phoneme should be greater than 0:
if any(v_nfrms_x_ph == 0.0):
raise ValueError('There is some phoneme(s) that do(es) not contain any frame.')
return v_nfrms_x_ph
'''
def get_num_of_frms_per_phon_unit(v_shift, in_lab_aligned_file, fs, n_lines_x_unit=5):
# Read lab file:
m_labs_state = np.loadtxt(in_lab_aligned_file, usecols=(0,1))
m_labs_state_ms = m_labs_state / 10000.0
# Epoch Indexes:
v_ep_nxs = np.cumsum(v_shift)
v_ep_nxs_ms = v_ep_nxs * 1000.0 / fs
# Get number of frames per state:
n_states = np.size(m_labs_state_ms,axis=0)
v_nfrms_x_state = np.zeros(n_states)
for st in xrange(n_states):
vb_to_right = (m_labs_state_ms[st,0] <= v_ep_nxs_ms)
vb_to_left = (v_ep_nxs_ms < m_labs_state_ms[st,1])
vb_inter = vb_to_right * vb_to_left
v_nfrms_x_state[st] = sum(vb_inter)
# Correct if there is only one frame of difference:
if (np.sum(v_nfrms_x_state) + 1) == np.size(v_shift):
v_nfrms_x_state[-1] += 1
# Checking number of frames:
if np.sum(v_nfrms_x_state) != np.size(v_shift):
raise ValueError('Total number of frames is different to the number of frames of the shifts.')
m_nfrms_x_ph = np.reshape(v_nfrms_x_state, (n_states/n_lines_x_unit, n_lines_x_unit) )
v_nfrms_x_ph = np.sum(m_nfrms_x_ph, axis=1)
# Checking that the number of frames per phoneme should be greater than 0:
if any(v_nfrms_x_ph == 0.0):
raise ValueError('There is some phoneme(s) that do(es) not contain any frame.')
return v_nfrms_x_ph
'''
#==============================================================================
def gen_rand_phase_by_template(tmplt_file, nfrms, nBins):
# Read template:
m_ph_tmplt = np.load(tmplt_file)
# Mirror phase (check!):
m_ph_tile = np.vstack((m_ph_tmplt, m_ph_tmplt))
m_ph_tile = np.hstack((m_ph_tile, m_ph_tile))
#m_ph_mirror = np.hstack((m_ph_mirror, np.fliplr(m_ph_mirror))) # keep as a comment
# Size of mirrored phase:
nfrmsT, nBinsT = m_ph_tile.shape
# Tile phase:
times_frms = int(np.ceil(nfrms / float(nfrmsT)))
times_bins = int(np.ceil(nBins / float(nBinsT)))
m_ph_tile = np.tile(m_ph_tile,[times_frms, times_bins])
m_ph_tile = m_ph_tile[:nfrms, :nBins]
return m_ph_tile
def main_phase_template():
#==============================================================================
# INPUT
#==============================================================================
in_file = os.getenv("HOME") + "/Dropbox/Education/UoE/Projects/DirectFFTWaveformModelling/data/prue_nat/herald_182.wav"
mvf = 4500
nFFT = 2048 #128 #2048
shift_ms = 5
#==============================================================================
# BODY
#==============================================================================
v_in_sig, fs = sf.read(in_file)
m_sp, m_ph, v_shift = analysis_with_del_comp(v_in_sig, nFFT, fs)
m_ph_tmplate = m_ph[:50,50:820] # values found empirically
# Write template:
tmplt_file = '/home/s1373426/Dropbox/Education/UoE/Projects/DirectFFTWaveformModelling/database/ph_template_prue.npy'
np.save(tmplt_file, m_ph_tmplate)
return
def main_pitch_marks_analysis():
fs = 48000
v_in_sig = sf.read('/afs/inf.ed.ac.uk/group/cstr/projects/Felipe_Espic/Databases/Nick-Zhizheng_dnn_baseline_practice/data/wav/hvd_649.wav')[0]
v_pm = la.get_pitch_marks(v_in_sig, fs)
#v_pm_ms = v_pm * 1000
v_pm_smpls = v_pm * fs
holdon()
plot(v_in_sig, '-b')
stem(v_pm_smpls, np.ones(len(v_pm_smpls)), '-r')
holdoff()
plot(v_pm_smpls[1:] , np.diff(v_pm_smpls), '.-r')
return
'''
def pm_to_shift(v_pm):
v_shift = np.diff(np.hstack((0,v_pm)))
return v_shift
def shift_to_pm(v_shift):
v_pm = np.cumsum(v_shift)
return v_pm
'''
#==============================================================================
# out: 'f0' or 'lf0'
def shift_to_f0(v_shift, v_voi, fs, out='f0', b_filt=True):
v_f0 = v_voi * fs / v_shift.astype('float64')
if b_filt:
v_f0 = v_voi * signal.medfilt(v_f0)
if out == 'lf0':
v_f0 = la.f0_to_lf0(v_f0)
return v_f0
#==============================================================================
def f0_to_shift(v_f0_in, fs, unv_frm_rate_ms=5.0):
v_f0 = v_f0_in.copy()
v_f0[v_f0 == 0] = 1000.0 / unv_frm_rate_ms
v_shift = fs / v_f0
return v_shift
#==============================================================================
def interp_from_variable_to_const_frm_rate(m_data, v_pm_smpls, const_rate_ms, fs, interp_type='linear') :
dur_total_smpls = v_pm_smpls[-1]
const_rate_smpls = fs * const_rate_ms / 1000
#cons_frm_rate_frm_len = 2 * frm_rate_smpls # This assummed according to the Merlin code. E.g., frame_number = int((end_time - start_time)/50000)
v_c_rate_centrs_smpls = np.arange(const_rate_smpls, dur_total_smpls, const_rate_smpls)
# Interpolation m_spmgc:
f_interp = interpolate.interp1d(v_pm_smpls, m_data, axis=0, kind=interp_type)
m_data_const_rate = f_interp(v_c_rate_centrs_smpls)
return m_data_const_rate
#==============================================================================
def interp_from_const_to_variable_rate(m_data, v_frm_locs_smpls, frm_rate_ms, fs, interp_type='linear'):
n_c_rate_frms = np.size(m_data,0)
frm_rate_smpls = fs * frm_rate_ms / 1000
v_c_rate_centrs_smpls = frm_rate_smpls * np.arange(1,n_c_rate_frms+1)
f_interp = interpolate.interp1d(v_c_rate_centrs_smpls, m_data, axis=0, kind=interp_type)
m_data_intrp = f_interp(v_frm_locs_smpls)
return m_data_intrp
#==============================================================================
# NOTE: "v_frm_locs_smpls" are the locations of the target frames (centres) in the constant rate data to sample from.
# This function should be used along with the function "interp_from_const_to_variable_rate"
def get_shifts_and_frm_locs_from_const_shifts(v_shift_c_rate, frm_rate_ms, fs, interp_type='linear'):
# Interpolation in reverse:
n_c_rate_frms = np.size(v_shift_c_rate,0)
frm_rate_smpls = fs * frm_rate_ms / 1000
v_c_rate_centrs_smpls = frm_rate_smpls * np.arange(1,n_c_rate_frms+1)
f_interp = interpolate.interp1d(v_c_rate_centrs_smpls, v_shift_c_rate, axis=0, kind=interp_type)
v_shift_vr = np.zeros(n_c_rate_frms * 2) # * 2 just in case, Improve these!
v_frm_locs_smpls = np.zeros(n_c_rate_frms * 2)
curr_pos_smpl = v_c_rate_centrs_smpls[-1]
for i_vr in xrange(len(v_shift_vr)-1,0, -1):
#print(i_vr)
v_frm_locs_smpls[i_vr] = curr_pos_smpl
try:
v_shift_vr[i_vr] = f_interp(curr_pos_smpl)
except ValueError:
v_frm_locs_smpls = v_frm_locs_smpls[i_vr+1:]
v_shift_vr = v_shift_vr[i_vr+1:]
break
curr_pos_smpl = curr_pos_smpl - v_shift_vr[i_vr]
return v_shift_vr, v_frm_locs_smpls
#==============================================================================
# MAIN (for Dev)
#==============================================================================
if __name__ == '__main__':
import libaudio as la
#==============================================================================
# INPUT
#==============================================================================
mvf = 4500
nFFT = 4096 #128 #2048
fs = 48000
data_dir = '/afs/inf.ed.ac.uk/group/cstr/projects/Felipe_Espic/Projects/DirectFFTWaveModelling/dnn/expers_nick/nick_fft_feats_new_expr_2_SLSTM_5_layers_state_feat_ref_60_45_unv_zero_learn_rate_0_002/gen/DNN_TANH_TANH_TANH_TANH_SLSTM_LINEAR__mag_lf0_real_imag_vuv_0_2400_597_454_5_1024_512'
filename = 'hvd_622'
#data_dir = '/afs/inf.ed.ac.uk/group/cstr/projects/Felipe_Espic/Projects/DirectFFTWaveModelling/dnn/expers_laura/laura_fft_feats_new_expr_2_SLSTM_5_layers_state_feat_ref_60_45_unv_zero_learn_rate_0_002/gen/DNN_TANH_TANH_TANH_TANH_SLSTM_LINEAR__mag_lf0_real_imag_vuv_0_4500_486_454_5_1024_512'
#filename = '1106_1' #'3589_1'#'3087_1'#'3589_1'
out_dir = '/home/s1373426/Dropbox/Education/UoE/Projects/DirectFFTWaveformModelling/data/out_dir_prue3'
#filename = 'hvd_618'
suffix = '_prue_new2'
hf_slope_coeff = 1.8 # def: 1.0
b_use_ap_voi = True # def: True
b_voi_ap_win = True # def: True
#==============================================================================
# BODY
#==============================================================================
m_mag_mel_log = la.read_mgc(data_dir + '/' + filename + '.mag_pf', 60)
m_real_mel = la.read_mgc(data_dir + '/' + filename + '.real', 45)
m_imag_mel = la.read_mgc(data_dir + '/' + filename + '.imag', 45)
v_f0 = la.read_f0(data_dir + '/' + filename + '.lf0', kind='lf0')
# Synth:
v_syn_sig = synthesis_with_del_comp_and_ph_encoding5(m_mag_mel_log, m_real_mel, m_imag_mel, v_f0, nFFT, fs, mvf, f0_type='f0', hf_slope_coeff=hf_slope_coeff, b_use_ap_voi=b_use_ap_voi, b_voi_ap_win=b_voi_ap_win)
play(v_syn_sig, fs)
la.write_audio_file(out_dir + '/' + filename + suffix + '.wav', v_syn_sig, fs)
dummy=1
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
gox_test.go | package goxstream
import (
"log"
"os"
"testing"
)
func TestM(t *testing.T) {
user := os.Getenv("XSTREAM_USER")
pwd := os.Getenv("XSTREAM_PASSWORD")
db := os.Getenv("XSTREAM_DBNAME")
server := os.Getenv("XSTREAM_SERVER")
conn, err := Open(user, pwd, db, server, 12)
if err != nil {
log.Panic(err)
}
//var lastScn scn.SCN
//go func() {
// for range time.NewTicker(10 * time.Second).C {
// if lastScn > 0 {
// log.Printf("scnlwm update to %v\n", lastScn)
// err := conn.SetSCNLwm(lastScn)
// if err != nil {
// panic(err)
// }
// }
// }
//}()
for {
msg, err := conn.GetRecord()
if err != nil {
log.Fatal(err)
}
log.Println(msg.String())
}
}
| [
"\"XSTREAM_USER\"",
"\"XSTREAM_PASSWORD\"",
"\"XSTREAM_DBNAME\"",
"\"XSTREAM_SERVER\""
]
| []
| [
"XSTREAM_DBNAME",
"XSTREAM_PASSWORD",
"XSTREAM_SERVER",
"XSTREAM_USER"
]
| [] | ["XSTREAM_DBNAME", "XSTREAM_PASSWORD", "XSTREAM_SERVER", "XSTREAM_USER"] | go | 4 | 0 | |
sharedsecret_test.go | package sharedsecret
import (
"crypto/rand"
"fmt"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// With the `New` function, a random secret is generated and distributed into shares. Both the
// secret and the shares are returned.
func Example_new() {
// Create 5 shares that 3 or more of them can recover the secret.
shares, secret := New(5, 3)
// Now we should distribute the shares to different parties and forget about the shares and
// secret. Once the original secret is needed, at least 3 shares should be used in order to
// recover it:
// We can't recover from only 2 shares:
wrong := Recover(shares[1], shares[3])
// We can recover from only 3 (or more) shares:
correct := Recover(shares[1], shares[3], shares[0])
fmt.Println(secret.Cmp(wrong) != 0, secret.Cmp(correct) == 0)
// Output: true true
}
// With the `Distribute` function, a given secret can be distributed to shares.
func Example_distribute() {
secret := big.NewInt(120398491412912873)
// Create 5 shares that 3 or more of them can recover the secret.
shares := Distribute(secret, 5, 3)
// We can recover from only 3 (or more) shares:
recovered := Recover(shares[1], shares[3], shares[0])
fmt.Println(recovered)
// Output: 120398491412912873
}
const (
testN = 10
testK = 4
)
func TestNewRecover_sanity(t *testing.T) {
t.Parallel()
// Create testN shares that testK or more of them can recover the secret.
shares, secret := New(testN, testK)
testSharesAndSecret(t, shares, secret)
}
func TestDistributeRecover_sanity(t *testing.T) {
t.Parallel()
// Create a secret and distribute it to testN shares that testK or more of them can recover the
// secret.
secret := big.NewInt(123456)
shares := Distribute(secret, testN, testK)
testSharesAndSecret(t, shares, secret)
}
func testSharesAndSecret(t *testing.T, shares []Share, secret *big.Int) {
t.Run("All shares should recover", func(t *testing.T) {
assert.Equal(t, secret, Recover(shares...))
})
t.Run("The minimum number of shares should recover", func(t *testing.T) {
assert.Equal(t, secret, Recover(shares[:testK]...))
assert.Equal(t, secret, Recover(shares[testK:]...))
})
t.Run("Less than the minimum number of shares should not recover", func(t *testing.T) {
assert.NotEqual(t, secret, Recover(shares[:testK-1]...))
assert.NotEqual(t, secret, Recover(shares[testN-testK+1:]...))
})
t.Run("minimum number with repeated share should not recover", func(t *testing.T) {
shares := shares[:testK-1]
shares = append(shares, shares[0])
assert.NotEqual(t, secret, Recover(shares...))
assert.NotEqual(t, secret, Recover(shares...))
})
}
func TestNew_panic(t *testing.T) {
t.Parallel()
secret := big.NewInt(123456)
secretTooBig := big.NewInt(2)
secretTooBig.Exp(secretTooBig, big.NewInt(127), nil)
assert.Panics(t, func() { New(1, 2) })
assert.Panics(t, func() { New(1, 0) })
assert.Panics(t, func() { Distribute(secret, 1, 2) })
assert.Panics(t, func() { Distribute(secret, 1, 0) })
assert.Panics(t, func() { Distribute(secretTooBig, testN, testK) })
}
func TestShareString(t *testing.T) {
t.Parallel()
s := Share{big.NewInt(0), big.NewInt(1)}
assert.Equal(t, "0,1", s.String())
}
func TestShareMarshalText_fuzz(t *testing.T) {
t.Parallel()
for i := 0; i < 10000; i++ {
x, err := rand.Int(rand.Reader, big.NewInt(10000))
require.NoError(t, err)
y, err := rand.Int(rand.Reader, big.NewInt(10000))
require.NoError(t, err)
want := Share{x: x, y: y}
text, err := want.MarshalText()
require.NoError(t, err)
var got Share
err = got.UnmarshalText(text)
require.NoError(t, err)
assert.True(t, want.x.Cmp(got.x) == 0)
assert.True(t, want.y.Cmp(got.y) == 0)
}
}
func TestShareUnMarshalText_errors(t *testing.T) {
t.Parallel()
var s Share
assert.Error(t, s.UnmarshalText([]byte("")))
assert.Error(t, s.UnmarshalText([]byte("1,2,3")))
assert.Error(t, s.UnmarshalText([]byte("a,1")))
assert.Error(t, s.UnmarshalText([]byte("1,a")))
}
| []
| []
| []
| [] | [] | go | null | null | null |
src/config/asgi.py | """
ASGI config for billing_gateway project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.local')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
source/loaders/java_loader/bootstrap/lib/bootstrap.java | import java.io.File;
import javax.tools.*;
import java.util.*;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.net.URL;
import java.net.URLClassLoader;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.file.Path;
import java.nio.file.Paths;
public class bootstrap {
private static Set<String> executionPath = new HashSet<String>();
public static Class<?> FindClass(String name) {
System.out.println("Finding Class " + name);
try {
URL[] urlArr = new URL[executionPath.size()];
int i = 0;
for (String x : executionPath)
urlArr[i++] = new File(x).toURI().toURL();
Class<?> cls = Class.forName(name, true, new URLClassLoader(urlArr));
return cls;
} catch (Exception e) {
System.out.println("Find Class Error" + e);
}
return null;
}
public static int java_bootstrap_execution_path(String path) {
System.out.println("bootstraping Execution path = " + path);
executionPath.add(path);
try {
URL execPathFile = new File(path).toURI().toURL();
String classpath = System.getProperty("java.class.path");
classpath = classpath + System.getProperty("path.separator") + execPathFile.toString();
System.setProperty("java.class.path", classpath);
return 0;
} catch (Exception e) {
System.out.println("Exec Error = " + e);
}
return 1;
}
public static Class<?>[] loadFromFile(String[] paths) {
Class<?>[] handleObject = new Class<?>[paths.length];
for (int i = 0; i < paths.length; i++) {
System.out.println("Path provided " + paths[i]);
try {
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
DiagnosticCollector<JavaFileObject> ds = new DiagnosticCollector<>();
StandardJavaFileManager mgr = compiler.getStandardFileManager(ds, null, null);
Iterable<String> classOutputPath = Arrays.asList(new String[] { "-d", System.getenv("LOADER_SCRIPT_PATH") });
File pathFile = new File(paths[i]);
Iterable<? extends JavaFileObject> sources = mgr.getJavaFileObjectsFromFiles(Arrays.asList(pathFile));
JavaCompiler.CompilationTask task = compiler.getTask(null, mgr, ds, classOutputPath, null, sources);
Boolean call = task.call(); // main method to compile the file into class
if (call) {
System.out.println("Compilation Successful");
Path path = Paths.get(pathFile.getCanonicalPath());
String classname = path.getFileName().toString().split(".java")[0];
for (String curExecPath : executionPath) {
try {
File execPathFile = new File(curExecPath);
URLClassLoader clsLoader = new URLClassLoader(new URL[] { execPathFile.toURI().toURL() });
// handleObject.addClass(classname, clsLoader.loadClass(classname));
handleObject[i] = clsLoader.loadClass(classname);
clsLoader.close();
System.out.println(i + " -> " + classname + " loaded");
// handleArray.addClass(classname, Class.forName(classname));
System.out.println("Class Loading Successful");
break;
} catch (Exception e) {
}
}
} else {
System.out.println("Compilation Failed");
}
for (Diagnostic<? extends JavaFileObject> d : ds.getDiagnostics()) { // diagnostic error printing
System.out.format("DIAGNOSTIC Line: %d, %s in %s", d.getLineNumber(), d.getMessage(null),
d.getSource().getName());
}
mgr.close();
System.out.print("\n");
} catch (Exception e) {
System.err.println("Load Function" + e);
}
}
return handleObject;
}
public static Class<?>[] load_from_package(String path) {
if (path.endsWith(".class")) {
Class<?>[] handleObject = new Class<?>[1];
System.out.println("bootstrap load from package " + path);
for (String curExecPath : executionPath) {
try {
File pathFile = new File(path);
Path canonical = Paths.get(pathFile.getCanonicalPath());
String classname = canonical.getFileName().toString().split(".class")[0];
File execPathFile = new File(curExecPath);
URLClassLoader clsLoader = new URLClassLoader(new URL[] { execPathFile.toURI().toURL() });
Class<?> c = clsLoader.loadClass(classname);
handleObject[0] = c;
clsLoader.close();
break;
} catch (Exception e) {
System.out.println("EXEPTION " + e);
}
}
return handleObject;
} else if (path.endsWith(".jar")) {
try {
for (String curExecPath : executionPath) {
ArrayList<Class<?>> handleList = new ArrayList<Class<?>>();
Path curJarPath = Paths.get(curExecPath, path);
JarFile jarFile = new JarFile(curJarPath.toString());
Enumeration<JarEntry> e = jarFile.entries();
Path jpath = Paths.get("jar:file:", curExecPath, path);
String jarPath = jpath.toString() + "!/";
Path epath = Paths.get(curExecPath, path);
executionPath.add(epath.toString());
URLClassLoader clsLoader = new URLClassLoader(new URL[] { new URL(jarPath) });
while (e.hasMoreElements()) {
JarEntry je = e.nextElement();
if (je.getName().endsWith(".class")) {
String className = je.getName().substring(0, je.getName().length() - 6);
className = className.replace(File.separatorChar, '.');
try {
Class<?> c = clsLoader.loadClass(className);
if (c != null) {
System.out.println("Got CLass " + c.getName());
handleList.add(c);
}
} catch (Exception ex) {
System.out.println(ex);
}
}
}
clsLoader.close();
Class<?>[] rtnClsArr = new Class<?>[handleList.size()];
rtnClsArr = handleList.toArray(rtnClsArr);
jarFile.close();
return rtnClsArr;
}
} catch (Exception e) {
System.out.println("EXCEPTION " + e);
}
}
return null;
}
public static String getSignature(Method m) {
String sig;
try {
Field gSig = Method.class.getDeclaredField("signature");
gSig.setAccessible(true);
sig = (String) gSig.get(m);
if (sig != null)
return sig;
} catch (IllegalAccessException | NoSuchFieldException e) {
e.printStackTrace();
}
StringBuilder sb = new StringBuilder("(");
for (Class<?> c : m.getParameterTypes())
sb.append((sig = Array.newInstance(c, 0).toString()).substring(1, sig.indexOf('@')));
return sb.append(')').append(m.getReturnType() == void.class ? "V"
: (sig = Array.newInstance(m.getReturnType(), 0).toString()).substring(1, sig.indexOf('@'))).toString();
}
public static String get_Field_Type(Class<?> cls, String key) {
System.out.println("Getting field type bootstrap for " + key);
String valType = "";
try {
Field f = cls.getField(key);
valType = f.getType().getName();
} catch (Exception e) {
System.out.println("Finding field error" + e);
}
return valType;
}
public static String get_static_invoke_return_type(Class<?> cls, String key) {
String rtnType = "";
try {
System.out.println("ClassName: " + cls.getName() + " " + key);
Method[] methods = cls.getDeclaredMethods();
for (Method method : methods) {
System.out.println("Name of the method: " + method.getName());
}
Class<?>[] cArg = new Class[1];
cArg[0] = String.class;
Method m = cls.getDeclaredMethod(key, cArg);
System.out.println("OUR: " + m.getReturnType().getName());
} catch (Exception e) {
e.printStackTrace();
}
return rtnType;
}
public static String java_bootstrap_get_class_name(Class<?> cls) {
// Working test for getting function name and details
Constructor<?>[] constructors = cls.getDeclaredConstructors();
for (Constructor<?> cnstr : constructors) {
System.out.println("Name of the constructor: " + cnstr.getName());
}
Field[] fields = cls.getFields();
for (Field f : fields) {
System.out.println("Name of the fiekd: " + f.getName());
}
Method[] methods = cls.getDeclaredMethods();
for (Method method : methods) {
System.out.println("Name of the method: " + method.getName());
System.out.println("Signature " + getSignature(method));
}
return cls.getName();
}
// public static void DiscoverData(String classname) {
// Method[] methods = hClass.getDeclaredMethods();
// for (Method method : methods) {
// System.out.println("Name of the method: " + method.getName());
// Class<?>[] parameters = method.getParameterTypes();
// if (parameters.length == 0)
// System.out.println("\tparameter: none");
// for (Class<?> parameter : parameters) {
// System.out.println("\tparameter: " + parameter.getSimpleName());
// }
// System.out.println("\tReturn Type: " + method.getReturnType() + "\n");
// }
// }
}
| [
"\"LOADER_SCRIPT_PATH\""
]
| []
| [
"LOADER_SCRIPT_PATH"
]
| [] | ["LOADER_SCRIPT_PATH"] | java | 1 | 0 | |
tests/conftest.py | import os
import pytest
from etcetra import EtcdClient, HostPortPair
@pytest.fixture
def etcd_addr():
env_addr = os.environ.get('BACKEND_ETCD_ADDR')
if env_addr is not None:
return HostPortPair.parse(env_addr)
return HostPortPair.parse('localhost:2379')
@pytest.fixture
async def etcd(etcd_addr):
etcd = EtcdClient(etcd_addr)
try:
yield etcd
finally:
async with etcd.connect() as communicator:
await communicator.delete_prefix('/test')
del etcd
| []
| []
| [
"BACKEND_ETCD_ADDR"
]
| [] | ["BACKEND_ETCD_ADDR"] | python | 1 | 0 | |
python-sdk/workflows/train/tensorflow/mnist-distributed/job.py | # description: train tensorflow CNN model on mnist data distributed via tensorflow
# Train a distributed TensorFlow job using the `tf.distribute.Strategy` API on Azure ML.
#
# For more information on distributed training with TensorFlow, refer [here](https://www.tensorflow.org/guide/distributed_training).
# imports
import os
from pathlib import Path
from azureml.core import Workspace
from azureml.core import ScriptRunConfig, Experiment, Environment
from azureml.core.runconfig import TensorflowConfiguration
# get workspace
ws = Workspace.from_config()
# get root of git repo
prefix = Path(__file__).parent
# training script
source_dir = str(prefix.joinpath("src"))
script_name = "train.py"
# environment file
environment_file = str(prefix.joinpath("environment.yml"))
# azure ml settings
environment_name = "tf-gpu-example"
experiment_name = "tf-mnist-distributed-example"
compute_name = "gpu-K80-2"
# create environment
env = Environment.from_conda_specification(environment_name, environment_file)
# specify a GPU base image
env.docker.enabled = True
env.docker.base_image = (
"mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04"
)
# Create a `ScriptRunConfig` to specify the training script & arguments, environment, and cluster to run on.
#
# The training script in this example utilizes multi-worker distributed training of a Keras model using the `tf.distribute.Strategy` API,
# specifically `tf.distribute.experimental.MultiWorkerMirroredStrategy`. To run a multi-worker TensorFlow job on Azure ML, create a
# `TensorflowConfiguration`. Specify a `worker_count` corresponding to the number of nodes for your training job.
#
# In TensorFlow, the `TF_CONFIG` environment variable is required for training on multiple machines.
# Azure ML will configure and set the `TF_CONFIG` variable appropriately for each worker before executing your training script.
# You can access `TF_CONFIG` from your training script if you need to via `os.environ['TF_CONFIG']`.
# create distributed config
distr_config = TensorflowConfiguration(worker_count=2, parameter_server_count=0)
# create args
model_path = os.path.join("./outputs", "keras-model")
args = ["--epochs", 30, "--model-dir", model_path]
# create job config
src = ScriptRunConfig(
source_directory=source_dir,
script=script_name,
arguments=args,
compute_target=compute_name,
environment=env,
distributed_job_config=distr_config,
)
# submit job
run = Experiment(ws, experiment_name).submit(src)
run.wait_for_completion(show_output=True)
| []
| []
| [
"TF_CONFIG"
]
| [] | ["TF_CONFIG"] | python | 1 | 0 | |
code/UI/ClientExamples/Python/ExampleQuery_Operations_2.py | """ This example sends a simple set of DSL commands to the ARAX API.
"""
# Import minimal requirements
import requests
import json
import re
# Set the base URL for the ARAX reasoner and its endpoint
endpoint_url = 'https://arax.ncats.io/api/arax/v1.1'
# Create a dict of the request, includes a reference to a previous response and a filter action
request = { "message": {}, "operations": {
"message_uris": [ f"https://arax.ncats.io/api/arax/v1.1/response/9857" ],
"actions": [
"filter_results(action=limit_number_of_results, max_results=7)",
] } }
# Send the request to RTX and check the status
response_content = requests.post(endpoint_url + '/query', json=request, headers={'accept': 'application/json'})
status_code = response_content.status_code
if status_code != 200:
print("ERROR returned with status "+str(status_code))
print(response_content)
exit()
# Unpack the response content into a dict
response_dict = response_content.json()
# Display the information log
for message in response_dict['logs']:
if True or message['level'] != 'DEBUG':
print(f"{message['timestamp']}: {message['level']}: {message['message']}")
# Display the results
print(f"Results ({len(response_dict['message']['results'])}):")
for result in response_dict['message']['results']:
confidence = 0.0
if 'confidence' in result:
confidence = result['confidence']
if confidence is None:
confidence = 0.0
essence = '?'
if 'essence' in result:
essence = result['essence']
print(" -" + '{:6.3f}'.format(confidence) + f"\t{essence}")
# These URLs provide direct access to resulting data and GUI
print(f"Data: {response_dict['id']}")
if response_dict['id'] is not None:
match = re.search(r'(\d+)$', response_dict['id'])
if match:
print(f"GUI: https://arax.ncats.io/?r={match.group(1)}")
| []
| []
| []
| [] | [] | python | null | null | null |
java/src/test/java/net/razorvine/examples/EchoExample.java | package net.razorvine.examples;
import java.io.IOException;
import java.util.SortedMap;
import net.razorvine.pickle.PrettyPrint;
import net.razorvine.pyro.Config;
import net.razorvine.pyro.NameServerProxy;
import net.razorvine.pyro.PyroException;
import net.razorvine.pyro.PyroProxy;
import net.razorvine.pyro.PyroURI;
/**
* Simple example that shows the use of Pyro with the Pyro echo server.
*
* @author Irmen de Jong ([email protected])
*/
public class EchoExample {
static protected byte[] hmacKey = null; // "irmen".getBytes();
public static void main(String[] args) throws IOException {
System.out.println("Testing Pyro echo server (make sure it's running, with nameserver enabled)...");
System.out.println("Pyrolite version: "+Config.PYROLITE_VERSION);
setConfig();
// Config.SERIALIZER = Config.SerializerType.pickle;
NameServerProxy ns = NameServerProxy.locateNS(null, hmacKey);
PyroProxy p = new PyroProxy(ns.lookup("test.echoserver"));
p.pyroHmacKey = hmacKey;
p.pyroHandshake = "banana";
ns.close();
// PyroProxy p=new PyroProxy("localhost",9999,"test.echoserver");
Object x=42;
System.out.println("echo param:");
PrettyPrint.print(x);
Object result=p.call("echo", x);
System.out.println("return value:");
PrettyPrint.print(result);
String s="This string is way too long. This string is way too long. This string is way too long. This string is way too long. ";
s=s+s+s+s+s;
System.out.println("echo param:");
PrettyPrint.print(s);
result=p.call("echo", s);
System.out.println("return value:");
PrettyPrint.print(result);
// echo a pyro proxy and validate that all relevant attributes are also present on the proxy we got back.
System.out.println("proxy test.");
result = p.call("echo", p);
PyroProxy p2 = (PyroProxy) result;
System.out.println("response proxy: " + p2);
if(!p2.objectid.equals("test.echoserver")) throw new AssertionError("objectid");
if(!((String)p2.pyroHandshake).equals("banana")) throw new AssertionError("handshake");
if(!p2.pyroMethods.contains("echo")) throw new AssertionError("methods");
if(p2.pyroHmacKey!=null) {
String hmac2 = new String(p2.pyroHmacKey);
if(!hmac2.equals(new String(hmacKey))) throw new AssertionError("hmac");
}
System.out.println("remote iterator test.");
@SuppressWarnings("unchecked")
Iterable<String> iter = (Iterable<String>)p.call("generator");
for(String item: iter) {
System.out.println(" got item: "+item);
}
System.out.println("error test.");
try {
result=p.call("error");
} catch (PyroException e) {
System.out.println("Pyro Exception (expected)! "+e.getMessage());
System.out.println("Pyro Exception cause: "+e.getCause());
System.out.println("Pyro Exception remote traceback:\n>>>\n"+e._pyroTraceback+"<<<");
}
try {
result=p.call("error_with_text");
} catch (PyroException e) {
System.out.println("Pyro Exception (expected)! "+e.getMessage());
System.out.println("Pyro Exception cause: "+e.getCause());
System.out.println("Pyro Exception remote traceback:\n>>>\n"+e._pyroTraceback+"<<<");
}
// System.out.println("shutting down the test echo server.");
// p.call("shutdown");
// tidy up:
p.close();
}
static void setConfig() {
String tracedir=System.getenv("PYRO_TRACE_DIR");
if(System.getProperty("PYRO_TRACE_DIR")!=null) {
tracedir=System.getProperty("PYRO_TRACE_DIR");
}
String serializer=System.getenv("PYRO_SERIALIZER");
if(System.getProperty("PYRO_SERIALIZER")!=null) {
serializer=System.getProperty("PYRO_SERIALIZER");
}
if(serializer!=null) {
Config.SERIALIZER = Enum.valueOf(Config.SerializerType.class, serializer);
}
Config.MSG_TRACE_DIR=tracedir;
}
}
/**
* This custom proxy adds custom annotations to the pyro messages
*/
@SuppressWarnings("serial")
class CustomProxy extends PyroProxy
{
public CustomProxy(PyroURI uri) throws IOException
{
super(uri);
}
@Override
public SortedMap<String, byte[]> annotations()
{
SortedMap<String, byte[]> ann = super.annotations();
ann.put("XYZZ", "A custom annotation!".getBytes());
return ann;
}
} | [
"\"PYRO_TRACE_DIR\"",
"\"PYRO_SERIALIZER\""
]
| []
| [
"PYRO_SERIALIZER",
"PYRO_TRACE_DIR"
]
| [] | ["PYRO_SERIALIZER", "PYRO_TRACE_DIR"] | java | 2 | 0 | |
docs/dev/newbies/2.py | mo = {
"a":2,
"b":2,
"c":6
}
print type(mo)
print mo['a']
import os
print type(os.environ)
print os.environ['PATH']
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
format/format.go | // Copyright (c) 2019, Daniel Martí <[email protected]>
// See LICENSE for licensing information
// Package format exposes gofumpt's formatting in an API similar to go/format.
// In general, the APIs are only guaranteed to work well when the input source
// is in canonical gofmt format.
package format
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
"golang.org/x/mod/semver"
"golang.org/x/tools/go/ast/astutil"
"mvdan.cc/gofumpt/internal/version"
)
type Options struct {
// LangVersion corresponds to the Go language version a piece of code is
// written in. The version is used to decide whether to apply formatting
// rules which require new language features. When inside a Go module,
// LangVersion should generally be specified as the result of:
//
// go list -m -f {{.GoVersion}}
//
// LangVersion is treated as a semantic version, which might start with
// a "v" prefix. Like Go versions, it might also be incomplete; "1.14"
// is equivalent to "1.14.0". When empty, it is equivalent to "v1", to
// not use language features which could break programs.
LangVersion string
ExtraRules bool
}
// Source formats src in gofumpt's format, assuming that src holds a valid Go
// source file.
func Source(src []byte, opts Options) ([]byte, error) {
fset := token.NewFileSet()
// Ensure our parsed files never start with base 1,
// to ensure that using token.NoPos+1 will panic.
fset.AddFile("gofumpt_base.go", 1, 10)
file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err != nil {
return nil, err
}
File(fset, file, opts)
var buf bytes.Buffer
if err := format.Node(&buf, fset, file); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
var rxCodeGenerated = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`)
// File modifies a file and fset in place to follow gofumpt's format. The
// changes might include manipulating adding or removing newlines in fset,
// modifying the position of nodes, or modifying literal values.
func File(fset *token.FileSet, file *ast.File, opts Options) {
simplify(file)
for _, cg := range file.Comments {
if cg.Pos() > file.Package {
break
}
for _, line := range cg.List {
if rxCodeGenerated.MatchString(line.Text) {
return
}
}
}
if opts.LangVersion == "" {
opts.LangVersion = "v1"
} else if opts.LangVersion[0] != 'v' {
opts.LangVersion = "v" + opts.LangVersion
}
if !semver.IsValid(opts.LangVersion) {
panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion))
}
f := &fumpter{
File: fset.File(file.Pos()),
fset: fset,
astFile: file,
Options: opts,
minSplitFactor: 0.4,
}
var topFuncType *ast.FuncType
pre := func(c *astutil.Cursor) bool {
f.applyPre(c)
switch node := c.Node().(type) {
case *ast.FuncDecl:
topFuncType = node.Type
case *ast.FieldList:
ft, _ := c.Parent().(*ast.FuncType)
if ft == nil || ft != topFuncType {
break
}
// For top-level function declaration parameters,
// require the line split to be longer.
// This avoids func lines which are a bit too short,
// and allows func lines which are a bit longer.
//
// We don't just increase longLineLimit,
// as we still want splits at around the same place.
if ft.Params == node {
f.minSplitFactor = 0.6
}
// Don't split result parameters into multiple lines,
// as that can be easily confused for input parameters.
// TODO: consider the same for single-line func calls in
// if statements.
// TODO: perhaps just use a higher factor, like 0.8.
if ft.Results == node {
f.minSplitFactor = 1000
}
case *ast.BlockStmt:
f.blockLevel++
}
return true
}
post := func(c *astutil.Cursor) bool {
f.applyPost(c)
// Reset minSplitFactor and blockLevel.
switch node := c.Node().(type) {
case *ast.FuncType:
if node == topFuncType {
f.minSplitFactor = 0.4
}
case *ast.BlockStmt:
f.blockLevel--
}
return true
}
astutil.Apply(file, pre, post)
}
// Multiline nodes which could easily fit on a single line under this many bytes
// may be collapsed onto a single line.
const shortLineLimit = 60
// Single-line nodes which take over this many bytes, and could easily be split
// into two lines of at least its minSplitFactor factor, may be split.
const longLineLimit = 100
var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`)
type fumpter struct {
Options
*token.File
fset *token.FileSet
astFile *ast.File
// blockLevel is the number of indentation blocks we're currently under.
// It is used to approximate the levels of indentation a line will end
// up with.
blockLevel int
minSplitFactor float64
}
func (f *fumpter) commentsBetween(p1, p2 token.Pos) []*ast.CommentGroup {
comments := f.astFile.Comments
i1 := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= p1
})
comments = comments[i1:]
i2 := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= p2
})
comments = comments[:i2]
return comments
}
func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment {
comments := f.astFile.Comments
i := sort.Search(len(comments), func(i int) bool {
return comments[i].Pos() >= pos
})
if i >= len(comments) {
return nil
}
line := f.Line(pos)
for _, comment := range comments[i].List {
if f.Line(comment.Pos()) == line {
return comment
}
}
return nil
}
// addNewline is a hack to let us force a newline at a certain position.
func (f *fumpter) addNewline(at token.Pos) {
offset := f.Offset(at)
field := reflect.ValueOf(f.File).Elem().FieldByName("lines")
n := field.Len()
lines := make([]int, 0, n+1)
for i := 0; i < n; i++ {
cur := int(field.Index(i).Int())
if offset == cur {
// This newline already exists; do nothing. Duplicate
// newlines can't exist.
return
}
if offset >= 0 && offset < cur {
lines = append(lines, offset)
offset = -1
}
lines = append(lines, cur)
}
if offset >= 0 {
lines = append(lines, offset)
}
if !f.SetLines(lines) {
panic(fmt.Sprintf("could not set lines to %v", lines))
}
}
// removeLines removes all newlines between two positions, so that they end
// up on the same line.
func (f *fumpter) removeLines(fromLine, toLine int) {
for fromLine < toLine {
f.MergeLine(fromLine)
toLine--
}
}
// removeLinesBetween is like removeLines, but it leaves one newline between the
// two positions.
func (f *fumpter) removeLinesBetween(from, to token.Pos) {
f.removeLines(f.Line(from)+1, f.Line(to))
}
type byteCounter int
func (b *byteCounter) Write(p []byte) (n int, err error) {
*b += byteCounter(len(p))
return len(p), nil
}
func (f *fumpter) printLength(node ast.Node) int {
var count byteCounter
if err := format.Node(&count, f.fset, node); err != nil {
panic(fmt.Sprintf("unexpected print error: %v", err))
}
// Add the space taken by an inline comment.
if c := f.inlineComment(node.End()); c != nil {
fmt.Fprintf(&count, " %s", c.Text)
}
// Add an approximation of the indentation level. We can't know the
// number of tabs go/printer will add ahead of time. Trying to print the
// entire top-level declaration would tell us that, but then it's near
// impossible to reliably find our node again.
return int(count) + (f.blockLevel * 8)
}
func (f *fumpter) tabbedColumn(p token.Pos) int {
col := f.Position(p).Column
// Like in printLength, add an approximation of the indentation level.
// Since any existing tabs were already counted as one column, multiply
// the level by 7.
return col + (f.blockLevel * 7)
}
func (f *fumpter) lineEnd(line int) token.Pos {
if line < 1 {
panic("illegal line number")
}
total := f.LineCount()
if line > total {
panic("illegal line number")
}
if line == total {
return f.astFile.End()
}
return f.LineStart(line+1) - 1
}
// rxCommentDirective covers all common Go comment directives:
//
// //go: | standard Go directives, like go:noinline
// //some-words: | similar to the syntax above, like lint:ignore or go-sumtype:decl
// //line | inserted line information for cmd/compile
// //export | to mark cgo funcs for exporting
// //extern | C function declarations for gccgo
// //sys(nb)? | syscall function wrapper prototypes
// //nolint | nolint directive for golangci
// //noinspection | noinspection directive for GoLand and friends
//
// Note that the "some-words:" matching expects a letter afterward, such as
// "go:generate", to prevent matching false positives like "https://site".
var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)`)
func (f *fumpter) applyPre(c *astutil.Cursor) {
f.splitLongLine(c)
switch node := c.Node().(type) {
case *ast.File:
// Join contiguous lone var/const/import lines.
// Abort if there are empty lines or comments in between,
// including a leading comment, which could be a directive.
newDecls := make([]ast.Decl, 0, len(node.Decls))
for i := 0; i < len(node.Decls); {
newDecls = append(newDecls, node.Decls[i])
start, ok := node.Decls[i].(*ast.GenDecl)
if !ok || isCgoImport(start) || start.Doc != nil {
i++
continue
}
lastPos := start.Pos()
for i++; i < len(node.Decls); {
cont, ok := node.Decls[i].(*ast.GenDecl)
if !ok || cont.Tok != start.Tok || cont.Lparen != token.NoPos ||
f.Line(lastPos) < f.Line(cont.Pos())-1 || isCgoImport(cont) {
break
}
start.Specs = append(start.Specs, cont.Specs...)
if c := f.inlineComment(cont.End()); c != nil {
// don't move an inline comment outside
start.Rparen = c.End()
} else {
// so the code below treats the joined
// decl group as multi-line
start.Rparen = cont.End()
}
lastPos = cont.Pos()
i++
}
}
node.Decls = newDecls
// Multiline top-level declarations should be separated by an
// empty line.
// Do this after the joining of lone declarations above,
// as joining single-line declarations makes then multi-line.
var lastMulti bool
var lastEnd token.Pos
for _, decl := range node.Decls {
pos := decl.Pos()
comments := f.commentsBetween(lastEnd, pos)
if len(comments) > 0 {
pos = comments[0].Pos()
}
multi := f.Line(pos) < f.Line(decl.End())
if multi && lastMulti && f.Line(lastEnd)+1 == f.Line(pos) {
f.addNewline(lastEnd)
}
lastMulti = multi
lastEnd = decl.End()
}
// Comments aren't nodes, so they're not walked by default.
groupLoop:
for _, group := range node.Comments {
for _, comment := range group.List {
if comment.Text == "//gofumpt:diagnose" || strings.HasPrefix(comment.Text, "//gofumpt:diagnose ") {
slc := []string{
"//gofumpt:diagnose",
version.String(),
"-lang=" + f.LangVersion,
}
if f.ExtraRules {
slc = append(slc, "-extra")
}
comment.Text = strings.Join(slc, " ")
}
body := strings.TrimPrefix(comment.Text, "//")
if body == comment.Text {
// /*-style comment
continue groupLoop
}
if rxCommentDirective.MatchString(body) {
// this line is a directive
continue groupLoop
}
r, _ := utf8.DecodeRuneInString(body)
if !unicode.IsLetter(r) && !unicode.IsNumber(r) && !unicode.IsSpace(r) {
// this line could be code like "//{"
continue groupLoop
}
}
// If none of the comment group's lines look like a
// directive or code, add spaces, if needed.
for _, comment := range group.List {
body := strings.TrimPrefix(comment.Text, "//")
r, _ := utf8.DecodeRuneInString(body)
if !unicode.IsSpace(r) {
comment.Text = "// " + body
}
}
}
case *ast.DeclStmt:
decl, ok := node.Decl.(*ast.GenDecl)
if !ok || decl.Tok != token.VAR || len(decl.Specs) != 1 {
break // e.g. const name = "value"
}
spec := decl.Specs[0].(*ast.ValueSpec)
if spec.Type != nil {
break // e.g. var name Type
}
tok := token.ASSIGN
names := make([]ast.Expr, len(spec.Names))
for i, name := range spec.Names {
names[i] = name
if name.Name != "_" {
tok = token.DEFINE
}
}
c.Replace(&ast.AssignStmt{
Lhs: names,
Tok: tok,
Rhs: spec.Values,
})
case *ast.GenDecl:
if node.Tok == token.IMPORT && node.Lparen.IsValid() {
f.joinStdImports(node)
}
// Single var declarations shouldn't use parentheses, unless
// there's a comment on the grouped declaration.
if node.Tok == token.VAR && len(node.Specs) == 1 &&
node.Lparen.IsValid() && node.Doc == nil {
specPos := node.Specs[0].Pos()
specEnd := node.Specs[0].End()
if len(f.commentsBetween(node.TokPos, specPos)) > 0 {
// If the single spec has any comment, it must
// go before the entire declaration now.
node.TokPos = specPos
} else {
f.removeLines(f.Line(node.TokPos), f.Line(specPos))
}
f.removeLines(f.Line(specEnd), f.Line(node.Rparen))
// Remove the parentheses. go/printer will automatically
// get rid of the newlines.
node.Lparen = token.NoPos
node.Rparen = token.NoPos
}
case *ast.InterfaceType:
if len(node.Methods.List) > 0 {
method := node.Methods.List[0]
removeToPos := method.Pos()
if comments := f.commentsBetween(node.Interface, method.Pos()); len(comments) > 0 {
// only remove leading line upto the first comment
removeToPos = comments[0].Pos()
}
// remove leading lines if they exist
f.removeLines(f.Line(node.Interface)+1, f.Line(removeToPos))
}
case *ast.BlockStmt:
f.stmts(node.List)
comments := f.commentsBetween(node.Lbrace, node.Rbrace)
if len(node.List) == 0 && len(comments) == 0 {
f.removeLinesBetween(node.Lbrace, node.Rbrace)
break
}
var sign *ast.FuncType
var cond ast.Expr
switch parent := c.Parent().(type) {
case *ast.FuncDecl:
sign = parent.Type
case *ast.FuncLit:
sign = parent.Type
case *ast.IfStmt:
cond = parent.Cond
case *ast.ForStmt:
cond = parent.Cond
}
if len(node.List) > 1 && sign == nil {
// only if we have a single statement, or if
// it's a func body.
break
}
var bodyPos, bodyEnd token.Pos
if len(node.List) > 0 {
bodyPos = node.List[0].Pos()
bodyEnd = node.List[len(node.List)-1].End()
}
if len(comments) > 0 {
if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos {
bodyPos = pos
}
if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd {
bodyEnd = pos
}
}
f.removeLinesBetween(bodyEnd, node.Rbrace)
if cond != nil && f.Line(cond.Pos()) != f.Line(cond.End()) {
// The body is preceded by a multi-line condition, so an
// empty line can help readability.
return
}
if sign != nil {
endLine := f.Line(sign.End())
paramClosingIsFirstCharOnEndLine := sign.Params != nil &&
f.Position(sign.Params.Closing).Column == 1 &&
f.Line(sign.Params.Closing) == endLine
resultClosingIsFirstCharOnEndLine := sign.Results != nil &&
f.Position(sign.Results.Closing).Column == 1 &&
f.Line(sign.Results.Closing) == endLine
endLineIsIndented := !(paramClosingIsFirstCharOnEndLine || resultClosingIsFirstCharOnEndLine)
if f.Line(sign.Pos()) != endLine && endLineIsIndented {
// The body is preceded by a multi-line function
// signature, we move the `) {` to avoid the empty line.
switch {
case sign.Results != nil &&
!resultClosingIsFirstCharOnEndLine &&
sign.Results.Closing.IsValid(): // there may be no ")"
sign.Results.Closing += 1
f.addNewline(sign.Results.Closing)
case sign.Params != nil && !paramClosingIsFirstCharOnEndLine:
sign.Params.Closing += 1
f.addNewline(sign.Params.Closing)
}
}
}
f.removeLinesBetween(node.Lbrace, bodyPos)
case *ast.CaseClause:
f.stmts(node.Body)
openLine := f.Line(node.Case)
closeLine := f.Line(node.Colon)
if openLine == closeLine {
// nothing to do
break
}
if len(f.commentsBetween(node.Case, node.Colon)) > 0 {
// don't move comments
break
}
if f.printLength(node) > shortLineLimit {
// too long to collapse
break
}
f.removeLines(openLine, closeLine)
case *ast.CommClause:
f.stmts(node.Body)
case *ast.FieldList:
if node.NumFields() == 0 && len(f.commentsBetween(node.Pos(), node.End())) == 0 {
// Empty field lists should not contain a newline.
// Do not join the two lines if the first has an inline
// comment, as that can result in broken formatting.
openLine := f.Line(node.Pos())
closeLine := f.Line(node.End())
f.removeLines(openLine, closeLine)
}
// Merging adjacent fields (e.g. parameters) is disabled by default.
if !f.ExtraRules {
break
}
switch c.Parent().(type) {
case *ast.FuncDecl, *ast.FuncType, *ast.InterfaceType:
node.List = f.mergeAdjacentFields(node.List)
c.Replace(node)
case *ast.StructType:
// Do not merge adjacent fields in structs.
}
case *ast.BasicLit:
// Octal number literals were introduced in 1.13.
if semver.Compare(f.LangVersion, "v1.13") >= 0 {
if node.Kind == token.INT && rxOctalInteger.MatchString(node.Value) {
node.Value = "0o" + node.Value[1:]
c.Replace(node)
}
}
case *ast.AssignStmt:
// Only remove lines between the assignment token and the first right-hand side expression
f.removeLines(f.Line(node.TokPos), f.Line(node.Rhs[0].Pos()))
}
}
func (f *fumpter) applyPost(c *astutil.Cursor) {
switch node := c.Node().(type) {
// Adding newlines to composite literals happens as a "post" step, so
// that we can take into account whether "pre" steps added any newlines
// that would affect us here.
case *ast.CompositeLit:
if len(node.Elts) == 0 {
// doesn't have elements
break
}
openLine := f.Line(node.Lbrace)
closeLine := f.Line(node.Rbrace)
if openLine == closeLine {
// all in a single line
break
}
newlineAroundElems := false
newlineBetweenElems := false
lastEnd := node.Lbrace
lastLine := openLine
for i, elem := range node.Elts {
pos := elem.Pos()
comments := f.commentsBetween(lastEnd, pos)
if len(comments) > 0 {
pos = comments[0].Pos()
}
if curLine := f.Line(pos); curLine > lastLine {
if i == 0 {
newlineAroundElems = true
// remove leading lines if they exist
f.removeLines(openLine+1, curLine)
} else {
newlineBetweenElems = true
}
}
lastEnd = elem.End()
lastLine = f.Line(lastEnd)
}
if closeLine > lastLine {
newlineAroundElems = true
}
if newlineBetweenElems || newlineAroundElems {
first := node.Elts[0]
if openLine == f.Line(first.Pos()) {
// We want the newline right after the brace.
f.addNewline(node.Lbrace + 1)
closeLine = f.Line(node.Rbrace)
}
last := node.Elts[len(node.Elts)-1]
if closeLine == f.Line(last.End()) {
// We want the newline right before the brace.
f.addNewline(node.Rbrace)
}
}
// If there's a newline between any consecutive elements, there
// must be a newline between all composite literal elements.
if !newlineBetweenElems {
break
}
for i1, elem1 := range node.Elts {
i2 := i1 + 1
if i2 >= len(node.Elts) {
break
}
elem2 := node.Elts[i2]
// TODO: do we care about &{}?
_, ok1 := elem1.(*ast.CompositeLit)
_, ok2 := elem2.(*ast.CompositeLit)
if !ok1 && !ok2 {
continue
}
if f.Line(elem1.End()) == f.Line(elem2.Pos()) {
f.addNewline(elem1.End())
}
}
}
}
func (f *fumpter) splitLongLine(c *astutil.Cursor) {
if os.Getenv("GOFUMPT_SPLIT_LONG_LINES") != "on" {
// By default, this feature is turned off.
// Turn it on by setting GOFUMPT_SPLIT_LONG_LINES=on.
return
}
node := c.Node()
if node == nil {
return
}
newlinePos := node.Pos()
start := f.Position(node.Pos())
end := f.Position(node.End())
// If the node is already split in multiple lines, there's nothing to do.
if start.Line != end.Line {
return
}
// Only split at the start of the current node if it's part of a list.
if _, ok := c.Parent().(*ast.BinaryExpr); ok {
// Chains of binary expressions are considered lists, too.
} else if c.Index() >= 0 {
// For the rest of the nodes, we're in a list if c.Index() >= 0.
} else {
return
}
// Like in printLength, add an approximation of the indentation level.
// Since any existing tabs were already counted as one column, multiply
// the level by 7.
startCol := start.Column + f.blockLevel*7
endCol := end.Column + f.blockLevel*7
// If this is a composite literal,
// and we were going to insert a newline before the entire literal,
// insert the newline before the first element instead.
// Since we'll add a newline after the last element too,
// this format is generally going to be nicer.
if comp := isComposite(node); comp != nil && len(comp.Elts) > 0 {
newlinePos = comp.Elts[0].Pos()
}
// If this is a function call,
// and we were to add a newline before the first argument,
// prefer adding the newline before the entire call.
// End-of-line parentheses aren't very nice, as we don't put their
// counterparts at the start of a line too.
// We do this by using the average of the two starting positions.
if call, _ := node.(*ast.CallExpr); call != nil && len(call.Args) > 0 {
first := f.Position(call.Args[0].Pos())
startCol += (first.Column - start.Column) / 2
}
// If the start position is too short, we definitely won't split the line.
if startCol <= shortLineLimit {
return
}
lineEnd := f.Position(f.lineEnd(start.Line))
// firstLength and secondLength are the split line lengths, excluding
// indentation.
firstLength := start.Column - f.blockLevel
if firstLength < 0 {
panic("negative length")
}
secondLength := lineEnd.Column - start.Column
if secondLength < 0 {
panic("negative length")
}
// If the line ends past the long line limit,
// and both splits are estimated to take at least minSplitFactor of the limit,
// then split the line.
minSplitLength := int(f.minSplitFactor * longLineLimit)
if endCol > longLineLimit &&
firstLength >= minSplitLength && secondLength >= minSplitLength {
f.addNewline(newlinePos)
}
}
func isComposite(node ast.Node) *ast.CompositeLit {
switch node := node.(type) {
case *ast.CompositeLit:
return node
case *ast.UnaryExpr:
return isComposite(node.X) // e.g. &T{}
default:
return nil
}
}
func (f *fumpter) stmts(list []ast.Stmt) {
for i, stmt := range list {
ifs, ok := stmt.(*ast.IfStmt)
if !ok || i < 1 {
continue // not an if following another statement
}
as, ok := list[i-1].(*ast.AssignStmt)
if !ok || as.Tok != token.DEFINE ||
!identEqual(as.Lhs[len(as.Lhs)-1], "err") {
continue // not "..., err := ..."
}
be, ok := ifs.Cond.(*ast.BinaryExpr)
if !ok || ifs.Init != nil || ifs.Else != nil {
continue // complex if
}
if be.Op != token.NEQ || !identEqual(be.X, "err") ||
!identEqual(be.Y, "nil") {
continue // not "err != nil"
}
f.removeLinesBetween(as.End(), ifs.Pos())
}
}
func identEqual(expr ast.Expr, name string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == name
}
// isCgoImport returns true if the declaration is simply:
//
// import "C"
//
// or the equivalent:
//
// import `C`
//
// Note that parentheses do not affect the result.
func isCgoImport(decl *ast.GenDecl) bool {
if decl.Tok != token.IMPORT || len(decl.Specs) != 1 {
return false
}
spec := decl.Specs[0].(*ast.ImportSpec)
v, err := strconv.Unquote(spec.Path.Value)
if err != nil {
panic(err) // should never error
}
return v == "C"
}
// joinStdImports ensures that all standard library imports are together and at
// the top of the imports list.
func (f *fumpter) joinStdImports(d *ast.GenDecl) {
var std, other []ast.Spec
firstGroup := true
lastEnd := d.Pos()
needsSort := false
for i, spec := range d.Specs {
spec := spec.(*ast.ImportSpec)
if coms := f.commentsBetween(lastEnd, spec.Pos()); len(coms) > 0 {
lastEnd = coms[len(coms)-1].End()
}
if i > 0 && firstGroup && f.Line(spec.Pos()) > f.Line(lastEnd)+1 {
firstGroup = false
} else {
// We're still in the first group, update lastEnd.
lastEnd = spec.End()
}
path, _ := strconv.Unquote(spec.Path.Value)
switch {
// Imports with a period are definitely third party.
case strings.Contains(path, "."):
fallthrough
// "test" and "example" are reserved as per golang.org/issue/37641.
// "internal" is unreachable.
case strings.HasPrefix(path, "test/") ||
strings.HasPrefix(path, "example/") ||
strings.HasPrefix(path, "internal/"):
fallthrough
// To be conservative, if an import has a name or an inline
// comment, and isn't part of the top group, treat it as non-std.
case !firstGroup && (spec.Name != nil || spec.Comment != nil):
other = append(other, spec)
continue
}
// If we're moving this std import further up, reset its
// position, to avoid breaking comments.
if !firstGroup || len(other) > 0 {
setPos(reflect.ValueOf(spec), d.Pos())
needsSort = true
}
std = append(std, spec)
}
// Ensure there is an empty line between std imports and other imports.
if len(std) > 0 && len(other) > 0 && f.Line(std[len(std)-1].End())+1 >= f.Line(other[0].Pos()) {
// We add two newlines, as that's necessary in some edge cases.
// For example, if the std and non-std imports were together and
// without indentation, adding one newline isn't enough. Two
// empty lines will be printed as one by go/printer, anyway.
f.addNewline(other[0].Pos() - 1)
f.addNewline(other[0].Pos())
}
// Finally, join the imports, keeping std at the top.
d.Specs = append(std, other...)
// If we moved any std imports to the first group, we need to sort them
// again.
if needsSort {
ast.SortImports(f.fset, f.astFile)
}
}
// mergeAdjacentFields returns fields with adjacent fields merged if possible.
func (f *fumpter) mergeAdjacentFields(fields []*ast.Field) []*ast.Field {
// If there are less than two fields then there is nothing to merge.
if len(fields) < 2 {
return fields
}
// Otherwise, iterate over adjacent pairs of fields, merging if possible,
// and mutating fields. Elements of fields may be mutated (if merged with
// following fields), discarded (if merged with a preceding field), or left
// unchanged.
i := 0
for j := 1; j < len(fields); j++ {
if f.shouldMergeAdjacentFields(fields[i], fields[j]) {
fields[i].Names = append(fields[i].Names, fields[j].Names...)
} else {
i++
fields[i] = fields[j]
}
}
return fields[:i+1]
}
func (f *fumpter) shouldMergeAdjacentFields(f1, f2 *ast.Field) bool {
if len(f1.Names) == 0 || len(f2.Names) == 0 {
// Both must have names for the merge to work.
return false
}
if f.Line(f1.Pos()) != f.Line(f2.Pos()) {
// Trust the user if they used separate lines.
return false
}
// Only merge if the types are equal.
opt := cmp.Comparer(func(x, y token.Pos) bool { return true })
return cmp.Equal(f1.Type, f2.Type, opt)
}
var posType = reflect.TypeOf(token.NoPos)
// setPos recursively sets all position fields in the node v to pos.
func setPos(v reflect.Value, pos token.Pos) {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if !v.IsValid() {
return
}
if v.Type() == posType {
v.Set(reflect.ValueOf(pos))
}
if v.Kind() == reflect.Struct {
for i := 0; i < v.NumField(); i++ {
setPos(v.Field(i), pos)
}
}
}
| [
"\"GOFUMPT_SPLIT_LONG_LINES\""
]
| []
| [
"GOFUMPT_SPLIT_LONG_LINES"
]
| [] | ["GOFUMPT_SPLIT_LONG_LINES"] | go | 1 | 0 | |
cm/base/BaseCM/cm_base.py | #!/usr/bin/env python3
"""Base module for the calculation modules.
"""
import inspect
import json
import logging
import os
import jsonschema
import requests
from celery import Celery, Task
from celery.worker import worker
CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL")
CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND")
API_URL = os.environ.get("API_URL")
def get_default_app(name):
"""Create default Celery application."""
app = Celery(name, broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
app.conf.update(
task_serializer="json",
accept_content=["json"], # Ignore other content
result_serializer="json",
timezone="Europe/Zurich",
enable_utc=True,
task_default_queue=name,
)
return app
def get_default_schema_path():
"""return the schema.json relative to the caller.
We expect to find the schema.json file in the same directory as the worker.
"""
filename = inspect.stack()[1].filename
dir_path = os.path.dirname(os.path.abspath(filename))
schema_path = os.path.join(dir_path, "schema.json")
if not os.path.isfile(schema_path):
raise FileNotFoundError(
"Cannot find schema.json file under the path " + schema_path
)
return schema_path
def get_default_input_layers_path():
"""return the input_layers.json relative to the caller.
We expect to find the input_layers.json file in the same directory as the worker.
"""
filename = inspect.stack()[1].filename
dir_path = os.path.dirname(os.path.abspath(filename))
input_layers_path = os.path.join(dir_path, "input_layers.json")
if not os.path.isfile(input_layers_path):
raise FileNotFoundError(
"Cannot find input_layers.json file under the path " + input_layers_path
)
return input_layers_path
class CMBase(Task):
schema_path = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = self.app.conf.task_default_queue
signature = inspect.signature(self.__wrapped__)
self.parameters = [p for p in signature.parameters]
self.pretty_name = CMBase.format_function(self.__wrapped__)
if self.schema_path:
with open(self.schema_path) as fd:
self.schema = json.load(fd)
else:
self.schema = {}
if self.input_layers_path:
with open(self.input_layers_path) as fd:
self.input_layers = json.load(fd)
else:
self.input_layers = []
@staticmethod
def format_function(function):
"""From a named callable extract its name then
format it to be human readable.
"""
raw_name = function.__name__
spaced_name = raw_name.replace("_", " ").replace("-", " ")
return spaced_name.capitalize()
def validate_params(self, params):
"""Validate the dict parameters based on the schema.json declaration.
Raises a ValueError containing the declaration of the validation failure.
"""
try:
jsonschema.validate(params, schema=self.schema)
except jsonschema.ValidationError as err:
raise ValueError(str(err))
@property
def cm_info(self):
"""Return worker information formatted as a json string"""
d = {}
d["parameters"] = self.parameters
d["schema"] = self.schema
d["doc"] = self.__doc__
d["pretty_name"] = self.pretty_name
d["name"] = self.name
d["queue"] = self.queue
d["input_layers"] = self.input_layers
return json.dumps(d)
def post_raster(self, raster_name, raster_fd):
"""Post a raster file to the api."""
files = {"file": (raster_name, raster_fd, "image/tiff")}
try:
resp = requests.post(
f"{API_URL}/cm/{self.name}/task/{self.request.id}/geofile/", files=files
)
return resp.status_code
except ConnectionError as error:
logging.error("Error during the post of the file.")
raise ConnectionError(error)
def base_task(app, schema_path):
"""Wrapper for the app.task decoration"""
return app.task(base=CMBase, bind=True, schema_path=schema_path, queue=app.name)
def start_app(app):
"""Start the celery application passed as single parameter"""
logging.basicConfig(level=logging.ERROR)
w = worker.WorkController(app=app)
w.start()
| []
| []
| [
"API_URL",
"CELERY_RESULT_BACKEND",
"CELERY_BROKER_URL"
]
| [] | ["API_URL", "CELERY_RESULT_BACKEND", "CELERY_BROKER_URL"] | python | 3 | 0 | |
maps/ors.py | """This module defines all the ORS(https://openrouteservice.org/services/) commands."""
import os
import click
import openrouteservice as opnrs
import simplejson as json
from geojsonio import display as geo_display
from maps.exceptions import ApiKeyNotFoundError
from maps.utils import yield_subcommands
@click.group()
@click.pass_context
def ors(ctx):
"""ORS (https://openrouteservice.org/) provider."""
ctx.obj = {}
@ors.command()
def show():
"""show list of all sub commands."""
for sub in yield_subcommands(ors):
click.secho(sub, fg="green")
@ors.command(short_help="forward or reverse geocode for an address or coordinates.")
@click.argument("query", required=True)
@click.option("--apikey", help="Your ORS API key", type=str)
@click.option(
"--forward/--reverse",
default=True,
show_default=True,
help="Perform a forward or reverse geocode",
)
@click.option("--raw", is_flag=True)
@click.option("--display", help="Display result in browser", is_flag=True)
@click.pass_context
def geocoding(ctx, query, apikey, forward, raw, display):
"""
Open Route Service geocoding service.
\f
:param ctx: A context dictionary.
:param query: A string to represent address query for geocoding.
:param apikey: An API key for authentication.
:param forward: A boolean flag for forward/reverse geocoding.
:param raw: A boolean flag to show api response as it is.
:param display: A boolean flag to show result in web browser.
:return: None.
"""
apikey = apikey or os.environ.get("ORS_APIKEY")
if apikey is None:
raise ApiKeyNotFoundError(
"Please pass Open Route Service API KEY as --apikey or set it as environment "
"variable in ORS_APIKEY "
)
ctx.obj["apikey"] = apikey
geolocator = opnrs.Client(key=ctx.obj["apikey"])
if forward:
geocode = geolocator.pelias_search(text=query)
if raw:
click.secho(json.dumps(geocode, indent=2), fg="green")
elif display:
geocode.pop("geocoding")
geo_display(json.dumps(geocode))
else:
for feature in geocode["features"]:
coords = feature["geometry"]["coordinates"]
result = {"lat": coords[1], "lon": coords[0]}
click.secho(json.dumps(result, indent=2), fg="green")
else:
coordinate = query.split(",")
reverse = geolocator.pelias_reverse(point=coordinate, validate=False)
if raw:
for result in reverse["features"]:
click.secho(json.dumps(result, indent=2), fg="green")
else:
for result in reverse["features"]:
click.secho(result["properties"]["label"], fg="green")
| []
| []
| [
"ORS_APIKEY"
]
| [] | ["ORS_APIKEY"] | python | 1 | 0 | |
examples/function/deploy/deployAFunction/main.go | package main
import (
"fmt"
"os"
"go.m3o.com/function"
)
// Deploy a group of functions
func main() {
functionService := function.NewFunctionService(os.Getenv("M3O_API_TOKEN"))
rsp, err := functionService.Deploy(&function.DeployRequest{
Branch: "main",
Entrypoint: "Helloworld",
Name: "helloworld",
Region: "europe-west1",
Repo: "https://github.com/m3o/m3o",
Runtime: "go116",
Subfolder: "examples/go-function",
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
util/chplenv/compiler_utils.py | """ Backend compiler utility functions for chplenv modules """
import os
import re
import sys
from collections import namedtuple
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
from utils import memoize, run_command
@memoize
def get_compiler_name(compiler):
if compiler_is_prgenv(compiler):
return 'cc'
elif compiler == 'aarch64-gnu':
return 'aarch64-unknown-linux-gnu-gcc'
elif 'gnu' in compiler:
return 'gcc'
elif compiler == 'clang':
return 'clang'
elif compiler == 'intel':
return 'icc'
elif compiler == 'pgi':
return 'pgcc'
return 'other'
@memoize
def get_compiler_version(compiler):
version_string = '0'
if 'gnu' in compiler:
# Asssuming the 'compiler' version matches the gcc version
# e.g., `mpicc -dumpversion == gcc -dumpversion`
version_string = run_command([get_compiler_name(compiler), '-dumpversion'])
elif 'cray-prgenv-cray' == compiler:
version_string = os.environ.get('CRAY_CC_VERSION', '0')
return CompVersion(version_string)
@memoize
def CompVersion(version_string):
"""
Takes a version string of the form 'major', 'major.minor',
'major.minor.revision', or 'major.minor,revision.build' and returns the
named tuple (major, minor, revision, build). If minor, revision, or build
are not specified, 0 will be used for their value(s)
"""
CompVersionT = namedtuple('CompVersion', ['major', 'minor', 'revision', 'build'])
match = re.search(u'(\d+)(\.(\d+))?(\.(\d+))?(\.(\d+))?', version_string)
if match:
major = int(match.group(1))
minor = int(match.group(3) or 0)
revision = int(match.group(5) or 0)
build = int(match.group(7) or 0)
return CompVersionT(major=major, minor=minor, revision=revision, build=build)
else:
raise ValueError("Could not convert version '{0}' to "
"a tuple".format(version_string))
@memoize
def compiler_is_prgenv(compiler_val):
return (compiler_val.startswith('cray-prgenv') or
os.environ.get('CHPL_ORIG_TARGET_COMPILER','').startswith('cray-prgenv'))
def strip_preprocessor_lines(lines):
lines = [line for line in lines if len(line.split('#')[0].strip()) > 0]
return lines
#
# Determine whether a given compiler's default compilation mode
# supports standard atomics by running the compiler and checking
# how it expands key feature-test macros.
#
# The assumption is that if standard atomics make it into the
# compiler's default compilation mode, then they actually work.
# If they are not available in the default mode, they probably
# have problems and we don't want to use them.
#
# Due to the command-line options required, this works for GCC,
# Clang, and the Intel compiler, but probably not others.
#
@memoize
def has_std_atomics(compiler_val):
try:
compiler_name = get_compiler_name(compiler_val)
if compiler_name == 'other':
return False
version_key='version'
atomics_key='atomics'
cmd_input = '{0}=__STDC_VERSION__\n{1}=__STDC_NO_ATOMICS__'.format(version_key, atomics_key)
cmd = [compiler_name, '-E', '-x', 'c', '-']
output = run_command(cmd, cmd_input=cmd_input)
output = strip_preprocessor_lines(output.splitlines())
output_dict = dict(line.split('=') for line in output)
version = output_dict[version_key].rstrip("L")
atomics = output_dict[atomics_key]
if version == "__STDC_VERSION__" or int(version) < 201112:
return False
# If the atomics macro was expanded, then we do not have support.
if atomics != "__STDC_NO_ATOMICS__":
return False
return True
except:
return False
| []
| []
| [
"CHPL_ORIG_TARGET_COMPILER",
"CRAY_CC_VERSION"
]
| [] | ["CHPL_ORIG_TARGET_COMPILER", "CRAY_CC_VERSION"] | python | 2 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/DartDioClientCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import com.samskivert.mustache.Mustache;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.lang3.StringUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.utils.ModelUtils;
import org.openapitools.codegen.utils.ProcessUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
import static org.openapitools.codegen.utils.StringUtils.underscore;
public class DartDioClientCodegen extends DartClientCodegen {
private static final Logger LOGGER = LoggerFactory.getLogger(DartDioClientCodegen.class);
public static final String NULLABLE_FIELDS = "nullableFields";
public static final String DATE_LIBRARY = "dateLibrary";
private static final String CLIENT_NAME = "clientName";
private boolean nullableFields = true;
private String dateLibrary = "core";
public DartDioClientCodegen() {
super();
outputFolder = "generated-code/dart-dio";
embeddedTemplateDir = "dart-dio";
this.setTemplateDir(embeddedTemplateDir);
cliOptions.add(new CliOption(NULLABLE_FIELDS, "Is the null fields should be in the JSON payload"));
CliOption dateLibrary = new CliOption(DATE_LIBRARY, "Option. Date library to use").defaultValue(this.getDateLibrary());
Map<String, String> dateOptions = new HashMap<>();
dateOptions.put("core", "Dart core library (DateTime)");
dateOptions.put("timemachine", "Time Machine is date and time library for Flutter, Web, and Server with support for timezones, calendars, cultures, formatting and parsing.");
dateLibrary.setEnum(dateOptions);
cliOptions.add(dateLibrary);
typeMapping.put("Array", "BuiltList");
typeMapping.put("array", "BuiltList");
typeMapping.put("List", "BuiltList");
typeMapping.put("set", "BuiltSet");
typeMapping.put("map", "BuiltMap");
typeMapping.put("file", "Uint8List");
typeMapping.put("binary", "Uint8List");
typeMapping.put("object", "JsonObject");
typeMapping.put("AnyType", "JsonObject");
additionalReservedWords.addAll(Sets.newHashSet(
"EnumClass",
// The following are reserved dataTypes but can not be added to defaultIncludes
// as this would prevent them from being added to the imports.
"BuiltList",
"BuiltSet",
"BuiltMap",
"Uint8List",
"JsonObject"
));
importMapping.put("BuiltList", "package:built_collection/built_collection.dart");
importMapping.put("BuiltSet", "package:built_collection/built_collection.dart");
importMapping.put("BuiltMap", "package:built_collection/built_collection.dart");
importMapping.put("JsonObject", "package:built_value/json_object.dart");
importMapping.put("Uint8List", "dart:typed_data");
}
public String getDateLibrary() {
return dateLibrary;
}
public void setDateLibrary(String library) {
this.dateLibrary = library;
}
public boolean getNullableFields() {
return nullableFields;
}
public void setNullableFields(boolean nullableFields) {
this.nullableFields = nullableFields;
}
@Override
public String getName() {
return "dart-dio";
}
@Override
public String getHelp() {
return "Generates a Dart Dio client library.";
}
@Override
protected ImmutableMap.Builder<String, Mustache.Lambda> addMustacheLambdas() {
return super.addMustacheLambdas()
.put("escapeBuiltValueEnum", (fragment, writer) -> {
// Raw strings don't work correctly in built_value enum strings.
// Dollar signs need to be escaped in to make them work.
// @BuiltValueEnumConst(wireName: r'$') produces '$' in generated code.
// @BuiltValueEnumConst(wireName: r'\$') produces '\$' in generated code.
writer.write(fragment.execute().replace("$", "\\$"));
});
}
@Override
public String toDefaultValue(Schema schema) {
if (schema.getDefault() != null) {
if (ModelUtils.isStringSchema(schema)) {
return "'" + schema.getDefault().toString().replaceAll("'", "\\'") + "'";
}
return schema.getDefault().toString();
}
return null;
}
@Override
public void processOpts() {
defaultProcessOpts();
if (StringUtils.isEmpty(System.getenv("DART_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable DART_POST_PROCESS_FILE not defined so the Dart code may not be properly formatted. To define it, try `export DART_POST_PROCESS_FILE=\"/usr/local/bin/dartfmt -w\"` (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
if (additionalProperties.containsKey(NULLABLE_FIELDS)) {
this.setNullableFields(convertPropertyToBooleanAndWriteBack(NULLABLE_FIELDS));
} else {
//not set, use to be passed to template
additionalProperties.put(NULLABLE_FIELDS, nullableFields);
}
if (additionalProperties.containsKey(PUB_LIBRARY)) {
this.setPubLibrary((String) additionalProperties.get(PUB_LIBRARY));
} else {
//not set, use to be passed to template
additionalProperties.put(PUB_LIBRARY, pubLibrary);
}
if (additionalProperties.containsKey(PUB_NAME)) {
this.setPubName((String) additionalProperties.get(PUB_NAME));
} else {
//not set, use to be passed to template
additionalProperties.put(PUB_NAME, pubName);
}
if (!additionalProperties.containsKey(CLIENT_NAME)) {
additionalProperties.put(CLIENT_NAME, org.openapitools.codegen.utils.StringUtils.camelize(pubName));
}
if (additionalProperties.containsKey(PUB_VERSION)) {
this.setPubVersion((String) additionalProperties.get(PUB_VERSION));
} else {
//not set, use to be passed to template
additionalProperties.put(PUB_VERSION, pubVersion);
}
if (additionalProperties.containsKey(PUB_DESCRIPTION)) {
this.setPubDescription((String) additionalProperties.get(PUB_DESCRIPTION));
} else {
//not set, use to be passed to template
additionalProperties.put(PUB_DESCRIPTION, pubDescription);
}
if (additionalProperties.containsKey(USE_ENUM_EXTENSION)) {
this.setUseEnumExtension(convertPropertyToBooleanAndWriteBack(USE_ENUM_EXTENSION));
} else {
// Not set, use to be passed to template.
additionalProperties.put(USE_ENUM_EXTENSION, useEnumExtension);
}
if (additionalProperties.containsKey(CodegenConstants.SOURCE_FOLDER)) {
this.setSourceFolder((String) additionalProperties.get(CodegenConstants.SOURCE_FOLDER));
}
if (additionalProperties.containsKey(DATE_LIBRARY)) {
this.setDateLibrary(additionalProperties.get(DATE_LIBRARY).toString());
}
// make api and model doc path available in mustache template
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
final String libFolder = sourceFolder + File.separator + "lib";
supportingFiles.add(new SupportingFile("pubspec.mustache", "", "pubspec.yaml"));
supportingFiles.add(new SupportingFile("analysis_options.mustache", "", "analysis_options.yaml"));
supportingFiles.add(new SupportingFile("apilib.mustache", libFolder, "api.dart"));
supportingFiles.add(new SupportingFile("api_util.mustache", libFolder, "api_util.dart"));
supportingFiles.add(new SupportingFile("serializers.mustache", libFolder, "serializers.dart"));
supportingFiles.add(new SupportingFile("gitignore.mustache", "", ".gitignore"));
supportingFiles.add(new SupportingFile("README.mustache", "", "README.md"));
final String authFolder = libFolder + File.separator + "auth";
supportingFiles.add(new SupportingFile("auth/api_key_auth.mustache", authFolder, "api_key_auth.dart"));
supportingFiles.add(new SupportingFile("auth/basic_auth.mustache", authFolder, "basic_auth.dart"));
supportingFiles.add(new SupportingFile("auth/oauth.mustache", authFolder, "oauth.dart"));
supportingFiles.add(new SupportingFile("auth/auth.mustache", authFolder, "auth.dart"));
if ("core".equals(dateLibrary)) {
// this option uses the same classes as normal dart generator
additionalProperties.put("core", "true");
} else if ("timemachine".equals(dateLibrary)) {
additionalProperties.put("timeMachine", "true");
typeMapping.put("date", "OffsetDate");
typeMapping.put("Date", "OffsetDate");
typeMapping.put("DateTime", "OffsetDateTime");
typeMapping.put("datetime", "OffsetDateTime");
additionalReservedWords.addAll(Sets.newHashSet("OffsetDate", "OffsetDateTime"));
importMapping.put("OffsetDate", "package:time_machine/time_machine.dart");
importMapping.put("OffsetDateTime", "package:time_machine/time_machine.dart");
supportingFiles.add(new SupportingFile("local_date_serializer.mustache", libFolder, "local_date_serializer.dart"));
}
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
objs = super.postProcessModels(objs);
List<Object> models = (List<Object>) objs.get("models");
ProcessUtils.addIndexToProperties(models, 1);
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
Set<String> modelImports = new HashSet<>();
CodegenModel cm = (CodegenModel) mo.get("model");
for (String modelImport : cm.imports) {
if (needToImport(modelImport)) {
if (importMapping().containsKey(modelImport)) {
modelImports.add(importMapping().get(modelImport));
} else {
modelImports.add("package:" + pubName + "/model/" + underscore(modelImport) + ".dart");
}
}
}
cm.imports = modelImports;
boolean hasVars = cm.vars.size() > 0;
cm.vendorExtensions.put("x-has-vars", hasVars);
}
return objs;
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
if (nullableFields) {
property.isNullable = true;
}
if (property.isEnum) {
// enums are generated with built_value and make use of BuiltSet
model.imports.add("BuiltSet");
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
objs = super.postProcessOperationsWithModels(objs, allModels);
Map<String, Object> operations = (Map<String, Object>) objs.get("operations");
List<CodegenOperation> operationList = (List<CodegenOperation>) operations.get("operation");
Set<String> modelImports = new HashSet<>();
Set<String> fullImports = new HashSet<>();
for (CodegenOperation op : operationList) {
op.httpMethod = op.httpMethod.toLowerCase(Locale.ROOT);
boolean isJson = true; //default to JSON
boolean isForm = false;
boolean isMultipart = false;
if (op.consumes != null) {
for (Map<String, String> consume : op.consumes) {
if (consume.containsKey("mediaType")) {
String type = consume.get("mediaType");
isJson = type.equalsIgnoreCase("application/json");
isForm = type.equalsIgnoreCase("application/x-www-form-urlencoded");
isMultipart = type.equalsIgnoreCase("multipart/form-data");
break;
}
}
}
for (CodegenParameter param : op.bodyParams) {
if (param.baseType != null && param.baseType.equalsIgnoreCase("Uint8List") && isMultipart) {
param.baseType = "MultipartFile";
param.dataType = "MultipartFile";
}
}
op.vendorExtensions.put("x-is-json", isJson);
op.vendorExtensions.put("x-is-form", isForm);
op.vendorExtensions.put("x-is-multipart", isMultipart);
if (op.getHasFormParams()) {
fullImports.add("package:" + pubName + "/api_util.dart");
}
Set<String> imports = new HashSet<>();
for (String item : op.imports) {
if (needToImport(item)) {
if (importMapping().containsKey(item) && needToImport(item)) {
fullImports.add(importMapping().get(item));
} else {
imports.add(underscore(item));
}
}
}
modelImports.addAll(imports);
op.imports = imports;
}
objs.put("modelImports", modelImports);
objs.put("fullImports", fullImports);
return objs;
}
}
| [
"\"DART_POST_PROCESS_FILE\""
]
| []
| [
"DART_POST_PROCESS_FILE"
]
| [] | ["DART_POST_PROCESS_FILE"] | java | 1 | 0 | |
utility/vault/test_vault.go | // © Copyright IBM Corporation 2020. All rights reserved.
// SPDX-License-Identifier: Apache2.0
//
package vault
import (
"github.com/GFTN/gftn-services/utility/vault/api"
"github.com/GFTN/gftn-services/utility/vault/auth"
// "encoding/json"
// "github.com/GFTN/gftn-services/utility/vault/utils"
// "fmt"
// "net/http"
// "log"
//"fmt"
)
func main() {
var appId = "SSLcert"
var safeName = "ApiSafe"
//var newCredential = "test123"
s := auth.GetSession()
//create application and enable certificateSerialNumber
api.AddApplication(s, appId)
api.ListAuthentication(s, appId)
api.AddAuthentication(s, appId)
//create safe
api.AddSafe(s, safeName)
api.ListSafeMember(s, safeName)
// for AIM operation
// api.AddSafeMember(s, safeName, appId)
// api.AddSafeMember(s, safeName, "Prov_EC2AMAZ-TQ8PDII")
// api.AddSafeMember(s, safeName, "AIMWebService")
// api.ListSafeMember(s, safeName)
//add account in the safe
api.AddAccount(s, safeName, "IBM_Token_ACC_PUBLIC", "ie.one.payments.worldwire.io", "ibm-account-public-key", "")
api.AddAccount(s, safeName, "IBM_Token_ACC_PRIVATE", "ie.one.payments.worldwire.io", "ibm-account-private-key", "")
//get account id inside the safe
//accountId := api.GetAccount(s, safeName)
//api.GetAccountGroup(s, safeName)
//update password
// auth.GetPasswordValue(s, accountId)
// auth.RandomCredential(s, accountId)
// auth.GetPasswordValue(s, accountId)
// auth.SetCredential(s, accountId, newCredential)
// auth.GetPasswordValue(s, accountId)
//AIM
// body := auth.GetPassword(appId, safeName, "IBM_Token_ACC_PRIVATE")
// var secret utils.Secret
// if err := json.Unmarshal([]byte(body), &secret); err != nil{
// panic(err)
// }
//this will not be prininted since the output will be catched by eval() in env.sh
// fmt.Println("IBM_PRIVATE_LABEL=", secret.Content)
// auth.SetEnv()
// utils.GetEnv()
// fmt.Println(os.Getenv("test"))
}
| [
"\"test\""
]
| []
| [
"test"
]
| [] | ["test"] | go | 1 | 0 | |
maintner/maintnerd/maintnerd.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The maintnerd command serves project maintainer data from Git,
// Github, and/or Gerrit.
package main
import (
"bytes"
"context"
"crypto/tls"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/storage"
"golang.org/x/build/autocertcache"
"golang.org/x/build/gerrit"
"golang.org/x/build/internal/gitauth"
"golang.org/x/build/maintner"
"golang.org/x/build/maintner/godata"
"golang.org/x/build/maintner/maintnerd/apipb"
"golang.org/x/build/maintner/maintnerd/gcslog"
"golang.org/x/build/maintner/maintnerd/maintapi"
"golang.org/x/crypto/acme/autocert"
"golang.org/x/net/http2"
"golang.org/x/time/rate"
grpc "grpc.go4.org"
)
var (
listen = flag.String("listen", "localhost:6343", "listen address")
devTLSPort = flag.Int("dev-tls-port", 0, "if non-zero, port number to run localhost self-signed TLS server")
autocertDomain = flag.String("autocert", "", "if non-empty, listen on port 443 and serve a LetsEncrypt TLS cert on this domain")
autocertBucket = flag.String("autocert-bucket", "", "if non-empty, Google Cloud Storage bucket to store LetsEncrypt cache in")
syncQuit = flag.Bool("sync-and-quit", false, "sync once and quit; don't run a server")
initQuit = flag.Bool("init-and-quit", false, "load the mutation log and quit; don't run a server")
verbose = flag.Bool("verbose", false, "enable verbose debug output")
genMut = flag.Bool("generate-mutations", true, "whether this instance should read from upstream git/gerrit/github and generate new mutations to the end of the log. This requires network access and only one instance can be generating mutation")
watchGithub = flag.String("watch-github", "", "Comma-separated list of owner/repo pairs to slurp")
watchGerrit = flag.String("watch-gerrit", "", `Comma-separated list of Gerrit projects to watch, each of form "hostname/project" (e.g. "go.googlesource.com/go")`)
pubsub = flag.String("pubsub", "", "If non-empty, the golang.org/x/build/cmd/pubsubhelper URL scheme and hostname, without path")
config = flag.String("config", "", "If non-empty, the name of a pre-defined config. Valid options are 'go' to be the primary Go server; 'godata' to run the server locally using the godata package, and 'devgo' to act like 'go', but mirror from godata at start-up.")
dataDir = flag.String("data-dir", "", "Local directory to write protobuf files to (default $HOME/var/maintnerd)")
debug = flag.Bool("debug", false, "Print debug logging information")
githubRateLimit = flag.Int("github-rate", 10, "Rate to limit GitHub requests (in queries per second, 0 is treated as unlimited)")
bucket = flag.String("bucket", "", "if non-empty, Google Cloud Storage bucket to use for log storage")
migrateGCSFlag = flag.Bool("migrate-disk-to-gcs", false, "[dev] If true, migrate from disk-based logs to GCS logs on start-up, then quit.")
)
func init() {
flag.Usage = func() {
os.Stderr.WriteString(`Maintner mirrors, searches, syncs, and serves data from Gerrit, Github, and Git repos.
Maintner gathers data about projects that you want to watch and holds it all in
memory. This way it's easy and fast to search, and you don't have to worry about
retrieving that data from remote APIs.
Maintner is short for "maintainer."
`)
flag.PrintDefaults()
}
}
var autocertManager *autocert.Manager
func main() {
flag.Parse()
if *autocertDomain != "" {
if *autocertBucket == "" {
log.Fatalf("using --autocert requires --autocert-bucket.")
}
sc, err := storage.NewClient(context.Background())
if err != nil {
log.Fatalf("Creating autocert cache, storage.NewClient: %v", err)
}
autocertManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(*autocertDomain),
Cache: autocertcache.NewGoogleCloudStorageCache(sc, *autocertBucket),
}
}
if *dataDir == "" {
*dataDir = filepath.Join(os.Getenv("HOME"), "var", "maintnerd")
if *bucket == "" {
if err := os.MkdirAll(*dataDir, 0755); err != nil {
log.Fatal(err)
}
log.Printf("Storing data in implicit directory %s", *dataDir)
}
}
if *migrateGCSFlag && *bucket == "" {
log.Fatalf("--bucket flag required with --migrate-disk-to-gcs")
}
type storage interface {
maintner.MutationSource
maintner.MutationLogger
}
var logger storage
corpus := new(maintner.Corpus)
switch *config {
case "":
// Nothing
case "devgo":
dir := godata.Dir()
if err := os.MkdirAll(dir, 0700); err != nil {
log.Fatal(err)
}
log.Printf("Syncing from https://maintner.golang.org/logs to %s", dir)
mutSrc := maintner.NewNetworkMutationSource("https://maintner.golang.org/logs", dir)
for evt := range mutSrc.GetMutations(context.Background()) {
if evt.Err != nil {
log.Fatal(evt.Err)
}
if evt.End {
break
}
}
syncProdToDevMutationLogs()
log.Printf("Synced from https://maintner.golang.org/logs.")
setGoConfig()
case "go":
if err := gitauth.Init(); err != nil {
log.Fatalf("gitauth: %v", err)
}
setGoConfig()
case "godata":
setGodataConfig()
var err error
log.Printf("Using godata corpus...")
corpus, err = godata.Get(context.Background())
if err != nil {
log.Fatal(err)
}
default:
log.Fatalf("unknown --config=%s", *config)
}
if *genMut {
if *bucket != "" {
ctx := context.Background()
gl, err := gcslog.NewGCSLog(ctx, *bucket)
if err != nil {
log.Fatalf("newGCSLog: %v", err)
}
gl.RegisterHandlers(http.DefaultServeMux)
if *migrateGCSFlag {
diskLog := maintner.NewDiskMutationLogger(*dataDir)
if err := gl.CopyFrom(diskLog); err != nil {
log.Fatalf("migrate: %v", err)
}
log.Printf("Success.")
return
}
logger = gl
} else {
logger = maintner.NewDiskMutationLogger(*dataDir)
}
corpus.EnableLeaderMode(logger, *dataDir)
}
if *debug {
corpus.SetDebug()
}
corpus.SetVerbose(*verbose)
if *watchGithub != "" {
if *githubRateLimit > 0 {
limit := rate.Every(time.Second / time.Duration(*githubRateLimit))
corpus.SetGitHubLimiter(rate.NewLimiter(limit, *githubRateLimit))
}
for _, pair := range strings.Split(*watchGithub, ",") {
splits := strings.SplitN(pair, "/", 2)
if len(splits) != 2 || splits[1] == "" {
log.Fatalf("Invalid github repo: %s. Should be 'owner/repo,owner2/repo2'", pair)
}
token, err := getGithubToken()
if err != nil {
log.Fatalf("getting github token: %v", err)
}
corpus.TrackGitHub(splits[0], splits[1], token)
}
}
if *watchGerrit != "" {
for _, project := range strings.Split(*watchGerrit, ",") {
// token may be empty, that's OK.
corpus.TrackGerrit(project)
}
}
var ln net.Listener
var err error
if !*syncQuit && !*initQuit {
ln, err = net.Listen("tcp", *listen)
if err != nil {
log.Fatal(err)
}
log.Printf("Listening on %v", ln.Addr())
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
t0 := time.Now()
if logger != nil {
if err := corpus.Initialize(ctx, logger); err != nil {
// TODO: if Initialize only partially syncs the data, we need to delete
// whatever files it created, since Github returns events newest first
// and we use the issue updated dates to check whether we need to keep
// syncing.
log.Fatal(err)
}
initDur := time.Since(t0)
runtime.GC()
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
log.Printf("Loaded data in %v. Memory: %v MB (%v bytes)", initDur, ms.HeapAlloc>>20, ms.HeapAlloc)
}
if *initQuit {
return
}
if *syncQuit {
if err := corpus.Sync(ctx); err != nil {
log.Fatalf("corpus.Sync = %v", err)
}
if err := corpus.Check(); err != nil {
log.Fatalf("post-Sync Corpus.Check = %v", err)
}
return
}
if *pubsub != "" {
corpus.StartPubSubHelperSubscribe(*pubsub)
}
grpcServer := grpc.NewServer()
apipb.RegisterMaintnerServiceServer(grpcServer, maintapi.NewAPIService(corpus))
http.Handle("/apipb.MaintnerService/", grpcServer)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
return
}
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
io.WriteString(w, `<html>
<body>
<p>
This is <a href='https://godoc.org/golang.org/x/build/maintner/maintnerd'>maintnerd</a>,
the <a href='https://godoc.org/golang.org/x/build/maintner'>maintner</a> server.
See the <a href='https://godoc.org/golang.org/x/build/maintner/godata'>godata package</a> for
a client.
</p>
<ul>
<li><a href='/logs'>/logs</a>
</ul>
</body></html>
`)
})
errc := make(chan error)
if *genMut {
go func() { errc <- fmt.Errorf("Corpus.SyncLoop = %v", corpus.SyncLoop(ctx)) }()
}
if ln != nil {
var handler http.Handler = http.DefaultServeMux
if autocertManager != nil {
handler = autocertManager.HTTPHandler(handler)
}
go func() { errc <- fmt.Errorf("http.Serve = %v", http.Serve(ln, handler)) }()
}
if *autocertDomain != "" {
go func() { errc <- serveAutocertTLS() }()
}
if *devTLSPort != 0 {
go func() { errc <- serveDevTLS(*devTLSPort) }()
}
log.Fatal(<-errc)
}
// Projects to watch when using the "go" config.
var goGitHubProjects = []string{
"golang/arch",
"golang/benchmarks",
"golang/blog",
"golang/build",
"golang/crypto",
"golang/debug",
"golang/dl",
"golang/example",
"golang/exp",
"golang/gddo",
"golang/go",
"golang/image",
"golang/lint",
"golang/mobile",
"golang/net",
"golang/oauth2",
"golang/perf",
"golang/playground",
"golang/proposal",
"golang/review",
"golang/scratch",
"golang/sublime-build",
"golang/sublime-config",
"golang/sync",
"golang/sys",
"golang/talks",
"golang/term",
"golang/text",
"golang/time",
"golang/tools",
"golang/tour",
"golang/vgo",
"golang/website",
}
func setGoConfig() {
if *watchGithub != "" {
log.Fatalf("can't set both --config and --watch-github")
}
if *watchGerrit != "" {
log.Fatalf("can't set both --config and --watch-gerrit")
}
*pubsub = "https://pubsubhelper.golang.org"
*watchGithub = strings.Join(goGitHubProjects, ",")
gerrc := gerrit.NewClient("https://go-review.googlesource.com/", gerrit.NoAuth)
projs, err := gerrc.ListProjects(context.Background())
if err != nil {
log.Fatalf("error listing Go's gerrit projects: %v", err)
}
var buf bytes.Buffer
buf.WriteString("code.googlesource.com/gocloud,code.googlesource.com/google-api-go-client")
for _, pi := range projs {
buf.WriteString(",go.googlesource.com/")
buf.WriteString(pi.ID)
}
*watchGerrit = buf.String()
}
func setGodataConfig() {
if *watchGithub != "" {
log.Fatalf("can't set both --config and --watch-github")
}
if *watchGerrit != "" {
log.Fatalf("can't set both --config and --watch-gerrit")
}
*genMut = false
}
func getGithubToken() (string, error) {
if metadata.OnGCE() {
token, err := metadata.ProjectAttributeValue("maintner-github-token")
if err == nil {
return token, nil
}
log.Printf("getting GCE metadata 'maintner-github-token': %v", err)
log.Printf("falling back to github token from file.")
}
tokenFile := filepath.Join(os.Getenv("HOME"), ".github-issue-token")
slurp, err := ioutil.ReadFile(tokenFile)
if err != nil {
return "", err
}
f := strings.SplitN(strings.TrimSpace(string(slurp)), ":", 2)
if len(f) != 2 || f[0] == "" || f[1] == "" {
return "", fmt.Errorf("Expected token file %s to be of form <username>:<token>", tokenFile)
}
token := f[1]
return token, nil
}
func serveDevTLS(port int) error {
ln, err := net.Listen("tcp", "localhost:"+strconv.Itoa(port))
if err != nil {
return err
}
defer ln.Close()
log.Printf("Serving self-signed TLS at https://%s", ln.Addr())
// Abuse httptest for its localhost TLS setup code:
ts := httptest.NewUnstartedServer(http.DefaultServeMux)
// Ditch the provided listener, replace with our own:
ts.Listener.Close()
ts.Listener = ln
ts.TLS = &tls.Config{
NextProtos: []string{"h2", "http/1.1"},
InsecureSkipVerify: true,
}
ts.StartTLS()
select {}
}
func serveAutocertTLS() error {
if *autocertBucket == "" {
return fmt.Errorf("using --autocert requires --autocert-bucket.")
}
ln, err := net.Listen("tcp", ":443")
if err != nil {
return err
}
defer ln.Close()
config := &tls.Config{
GetCertificate: autocertManager.GetCertificate,
NextProtos: []string{"h2", "http/1.1"},
}
tlsLn := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config)
server := &http.Server{
Addr: ln.Addr().String(),
}
if err := http2.ConfigureServer(server, nil); err != nil {
log.Fatalf("http2.ConfigureServer: %v", err)
}
return server.Serve(tlsLn)
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
func syncProdToDevMutationLogs() {
src := godata.Dir()
dst := *dataDir
want := map[string]int64{} // basename => size
srcFis, err := ioutil.ReadDir(src)
if err != nil {
log.Fatal(err)
}
dstFis, err := ioutil.ReadDir(dst)
if err != nil {
log.Fatal(err)
}
for _, fi := range srcFis {
name := fi.Name()
if !strings.HasSuffix(name, ".mutlog") {
continue
}
// The DiskMutationLogger (as we'l use in the dst dir)
// prepends "maintner-". So prepend that here ahead
// of time, even though the network mutation source's
// cache doesn't.
want["maintner-"+name] = fi.Size()
}
for _, fi := range dstFis {
name := fi.Name()
if !strings.HasSuffix(name, ".mutlog") {
continue
}
if want[name] == fi.Size() {
delete(want, name)
continue
}
log.Printf("dst file %q unwanted", name)
if err := os.Remove(filepath.Join(dst, name)); err != nil {
log.Fatal(err)
}
}
for name := range want {
log.Printf("syncing %s from %s to %s", name, src, dst)
slurp, err := ioutil.ReadFile(filepath.Join(src, strings.TrimPrefix(name, "maintner-")))
if err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(dst, name), slurp, 0644); err != nil {
log.Fatal(err)
}
}
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
docs/conf.py | # -- Project information -----------------------------------------------------
import os
project = "Sphinx Book Theme"
copyright = "2020"
author = "the Executable Book Project"
master_doc = "index"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"sphinx_copybutton",
"sphinx_togglebutton",
"sphinxcontrib.bibtex",
"sphinx_thebe",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ablog",
"sphinxext.opengraph",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
}
nitpick_ignore = [
("py:class", "docutils.nodes.document"),
("py:class", "docutils.parsers.rst.directives.body.Sidebar"),
]
suppress_warnings = ["myst.domains", "ref.ref"]
numfig = True
myst_enable_extensions = [
"dollarmath",
# "amsmath",
"deflist",
# "html_admonition",
# "html_image",
"colon_fence",
# "smartquotes",
# "replacements",
# "linkify",
# "substitution",
]
myst_url_schemes = ("http", "https", "mailto")
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_logo = "_static/logo-wide.png"
html_title = "Sphinx Book Theme"
html_copy_source = True
html_sourcelink_suffix = ""
html_favicon = "_static/logo-square.png"
html_last_updated_fmt = ""
html_sidebars = {
"reference/blog/*": [
"sidebar-logo.html",
"search-field.html",
"postcard.html",
"recentposts.html",
"tagcloud.html",
"categories.html",
"archives.html",
"sbt-sidebar-nav.html",
"sbt-sidebar-footer.html",
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
jupyter_execute_notebooks = "cache"
thebe_config = {
"repository_url": "https://github.com/binder-examples/jupyter-stacks-datascience",
"repository_branch": "master",
}
html_theme_options = {
"theme_dev_mode": True,
"path_to_docs": "docs",
"repository_url": "https://github.com/executablebooks/sphinx-book-theme",
# "repository_branch": "gh-pages", # For testing
"launch_buttons": {
"binderhub_url": "https://mybinder.org",
# "jupyterhub_url": "https://datahub.berkeley.edu", # For testing
"colab_url": "https://colab.research.google.com/",
"notebook_interface": "jupyterlab",
"thebe": True,
},
"use_edit_page_button": True,
"use_issues_button": True,
"use_repository_button": True,
"use_download_button": True,
"logo_only": True,
# For testing
# "use_fullscreen_button": False,
# "home_page_in_toc": True,
# "single_page": True,
# "extra_footer": "<a href='https://google.com'>Test</a>", # DEPRECATED KEY
# "extra_navbar": "<a href='https://google.com'>Test</a>",
# "show_navbar_depth": 2,
}
# -- ABlog config -------------------------------------------------
blog_path = "reference/blog"
blog_post_pattern = "reference/blog/*.md"
blog_baseurl = "https://sphinx-book-theme.readthedocs.io"
fontawesome_included = True
post_auto_image = 1
post_auto_excerpt = 2
execution_show_tb = "READTHEDOCS" in os.environ
bibtex_bibfiles = ["references.bib"]
bibtex_reference_style = "author_year"
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pgxpool/tx_test.go | package pgxpool_test
import (
"context"
"os"
"testing"
"github.com/matthewpi/pgx/v4/pgxpool"
"github.com/stretchr/testify/require"
)
func TestTxExec(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testExec(t, tx)
}
func TestTxQuery(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testQuery(t, tx)
}
func TestTxQueryRow(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testQueryRow(t, tx)
}
func TestTxSendBatch(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testSendBatch(t, tx)
}
func TestTxCopyFrom(t *testing.T) {
t.Parallel()
pool, err := pgxpool.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
defer pool.Close()
tx, err := pool.Begin(context.Background())
require.NoError(t, err)
defer tx.Rollback(context.Background())
testCopyFrom(t, tx)
}
| [
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
]
| []
| [
"PGX_TEST_DATABASE"
]
| [] | ["PGX_TEST_DATABASE"] | go | 1 | 0 | |
test/e2e/utils.go | package e2e
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"testing"
"time"
osv1 "github.com/openshift/api/route/v1"
osv1sec "github.com/openshift/api/security/v1"
"github.com/opentracing/opentracing-go"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/prometheus/common/log"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber/jaeger-client-go/config"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/jaegertracing/jaeger-operator/pkg/apis"
v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1"
"github.com/jaegertracing/jaeger-operator/pkg/apis/kafka/v1beta2"
"github.com/jaegertracing/jaeger-operator/pkg/util"
)
var (
retryInterval = time.Second * 5
timeout = time.Duration(getIntEnv("TEST_TIMEOUT", 2)) * time.Minute
storageNamespace = os.Getenv("STORAGE_NAMESPACE")
kafkaNamespace = os.Getenv("KAFKA_NAMESPACE")
debugMode = getBoolEnv("DEBUG_MODE", false)
usingOLM = getBoolEnv("OLM", false)
usingJaegerViaOLM = getBoolEnv("JAEGER_OLM", false)
saveLogs = getBoolEnv("SAVE_LOGS", false)
skipCassandraTests = getBoolEnv("SKIP_CASSANDRA_TESTS", false)
skipESExternal = getBoolEnv("SKIP_ES_EXTERNAL", false)
esServerUrls = "http://elasticsearch." + storageNamespace + ".svc:9200"
cassandraServiceName = "cassandra." + storageNamespace + ".svc"
cassandraKeyspace = "jaeger_v1_datacenter1"
cassandraDatacenter = "datacenter1"
jaegerCollectorPort = 14268
vertxExampleImage = getStringEnv("VERTX_EXAMPLE_IMAGE", "jaegertracing/vertx-create-span:operator-e2e-tests")
vertxDelaySeconds = int32(getIntEnv("VERTX_DELAY_SECONDS", 1))
vertxTimeoutSeconds = int32(getIntEnv("VERTX_TIMEOUT_SECONDS", 1))
ctx *framework.TestCtx
fw *framework.Framework
namespace string
t *testing.T
)
func getBoolEnv(key string, defaultValue bool) bool {
if value, ok := os.LookupEnv(key); ok {
boolValue, err := strconv.ParseBool(value)
if err != nil {
logrus.Warnf("Error [%v] received converting environment variable [%s] using [%v]", err, key, boolValue)
}
return boolValue
}
return defaultValue
}
func getIntEnv(key string, defaultValue int) int {
if value, ok := os.LookupEnv(key); ok {
intValue, err := strconv.Atoi(value)
if err != nil {
logrus.Warnf("Error [%v] received converting environment variable [%s] using [%v]", err, key, value)
}
return intValue
}
return defaultValue
}
func getStringEnv(key, defaultValue string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return defaultValue
}
// GetPod returns pod name
func GetPod(namespace, namePrefix, containsImage string, kubeclient kubernetes.Interface) corev1.Pod {
pods, err := kubeclient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
printTestStackTrace()
require.NoError(t, err)
}
for _, pod := range pods.Items {
if strings.HasPrefix(pod.Name, namePrefix) {
for _, c := range pod.Spec.Containers {
if strings.Contains(c.Image, containsImage) {
return pod
}
}
}
}
errorMessage := fmt.Sprintf("could not find pod in namespace %s with prefix %s and image %s", namespace, namePrefix, containsImage)
require.FailNow(t, errorMessage)
// We should never get here, but go requires a return statement
emptyPod := &corev1.Pod{}
return *emptyPod
}
func prepare(t *testing.T) (*framework.Context, error) {
t.Logf("debug mode: %v", debugMode)
ctx := framework.NewContext(t)
// Install jaeger-operator unless we've installed it from OperatorHub
start := time.Now()
if !usingJaegerViaOLM {
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: 10 * time.Minute, RetryInterval: retryInterval})
if err != nil {
t.Errorf("failed to initialize cluster resources: %v", err)
}
} else {
// Hacky - as of Operator SDK 0.18.2 calling getOperatorNamespace is required to actually create the namespace
_, err := ctx.GetOperatorNamespace()
require.NoError(t, err)
}
namespace := ctx.GetID()
logrus.Infof("Using namespace %s", namespace)
ns, err := framework.Global.KubeClient.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
if err != nil {
t.Errorf("failed to get the namespaces details: %v", err)
}
crb := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "jaeger-operator",
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
{
Name: ns.Name,
Kind: "Namespace",
APIVersion: "v1",
UID: ns.UID,
},
},
},
Subjects: []rbac.Subject{{
Kind: "ServiceAccount",
Name: "jaeger-operator",
Namespace: namespace,
}},
RoleRef: rbac.RoleRef{Kind: "ClusterRole", Name: "jaeger-operator"},
}
if _, err := framework.Global.KubeClient.RbacV1().ClusterRoleBindings().Create(context.Background(), crb, metav1.CreateOptions{}); err != nil {
t.Errorf("failed to create cluster role binding: %v", err)
}
t.Logf("initialized cluster resources on namespace %s", namespace)
// get global framework variables
f := framework.Global
// wait for the operator to be ready
if !usingJaegerViaOLM {
err := e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "jaeger-operator", 1, retryInterval, timeout)
if err != nil {
logrus.Errorf("WaitForDeployment returned error %v", err)
return nil, err
}
}
logrus.Infof("Creation of Jaeger Operator in namespace %s took %v", namespace, time.Since(start))
return ctx, nil
}
func getJaegerOperatorImages(kubeclient kubernetes.Interface, namespace string) (map[string]string, error) {
imageNamesMap := make(map[string]string)
deployment, err := kubeclient.AppsV1().Deployments(namespace).Get(context.Background(), "jaeger-operator", metav1.GetOptions{})
if err != nil {
if strings.HasSuffix(err.Error(), "not found") {
return imageNamesMap, nil
}
return imageNamesMap, err
}
containers := deployment.Spec.Template.Spec.Containers
for _, container := range containers {
if container.Name == "jaeger-operator" {
for _, env := range container.Env {
if env.Name == "WATCH_NAMESPACE" {
imageNamesMap[container.Image] = env.Value
}
}
}
}
return imageNamesMap, nil
}
func getJaegerOperatorNamespace() string {
if !usingJaegerViaOLM {
return namespace
}
namespaces, err := fw.KubeClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
for _, namespace := range namespaces.Items {
deployments, err := fw.KubeClient.AppsV1().Deployments(namespace.Name).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
for _, deployment := range deployments.Items {
if deployment.Name == "jaeger-operator" {
return namespace.Name
}
}
}
require.Fail(t, "Did not find a jaeger operator instance")
return "" // We'll never get here, but need this to keep go happy
}
func isOpenShift(t *testing.T) bool {
apiList, err := availableAPIs(framework.Global.KubeConfig)
if err != nil {
t.Logf("Error trying to find APIs: %v\n", err)
}
apiGroups := apiList.Groups
for _, group := range apiGroups {
if group.Name == "route.openshift.io" {
return true
}
}
return false
}
func availableAPIs(kubeconfig *rest.Config) (*metav1.APIGroupList, error) {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeconfig)
if err != nil {
return nil, err
}
apiList, err := discoveryClient.ServerGroups()
if err != nil {
return nil, err
}
return apiList, nil
}
func addToFrameworkSchemeForSmokeTests(t *testing.T) {
assert.NoError(t, framework.AddToFrameworkScheme(apis.AddToScheme, &v1.JaegerList{
TypeMeta: metav1.TypeMeta{
Kind: "Jaeger",
APIVersion: "jaegertracing.io/v1",
},
}))
if isOpenShift(t) {
assert.NoError(t, framework.AddToFrameworkScheme(osv1.AddToScheme, &osv1.Route{}))
assert.NoError(t, framework.AddToFrameworkScheme(osv1sec.AddToScheme, &osv1sec.SecurityContextConstraints{}))
}
}
// Print a stack trace to help analyze test failures. This is shorter and easier to read than debug.printstack()
func printTestStackTrace() {
i := 1
for {
_, filename, lineNumber, ok := runtime.Caller(i)
if !ok || !strings.Contains(filename, "jaeger-operator") {
break
}
fmt.Printf("\t%s#%d\n", filename, lineNumber)
i++
}
}
func undeployJaegerInstance(jaeger *v1.Jaeger) bool {
if saveLogs {
logFileNameBase := strings.ReplaceAll(t.Name(), "/", "-")
writePodLogsToFile(jaeger.Namespace, "app.kubernetes.io/part-of=jaeger", logFileNameBase)
}
if !debugMode || !t.Failed() {
err := fw.Client.Delete(context.TODO(), jaeger)
if err := fw.Client.Delete(context.TODO(), jaeger); err != nil {
return false
}
if err = e2eutil.WaitForDeletion(t, fw.Client.Client, jaeger, retryInterval, timeout); err != nil {
return false
}
return true
}
// Always return true, we don't care
return true
}
func writePodLogsToFile(namespace, labelSelector, logFileNameBase string) {
// Write logs for every container in every pod that we've matched.
logs := getLogsForNamespace(namespace, labelSelector, logFileNameBase)
for logFileName := range logs {
logString := logs[logFileName]
log := []byte(logString)
err := ioutil.WriteFile(logFileName, log, 0644)
if err != nil {
logrus.Warnf("Error writing log content to file %s: %v\n", logFileName, err)
}
logrus.Infof("Wrote %d bytes to logfile %s", len(log), logFileName)
}
}
func getLogsForNamespace(namespace, labelSelector, nameBase string) map[string]string {
pods, err := fw.KubeClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
logrus.Warnf("Got error listing pods in namespace %s with selector %s: %v", namespace, labelSelector, err)
return nil
}
// Write logs for every container in every pod that we've matched.
logs := make(map[string]string)
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
result := fw.KubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{Container: container.Name}).Do(context.Background())
if result.Error() != nil {
logrus.Warnf("Error getting log content for pod %s, container %s in namespace %s: %v", pod.Name, container.Name, namespace, result.Error())
} else {
log, _ := result.Raw()
var logName string
if container.Name == "jaeger-operator" {
logName = fmt.Sprintf("%s.log", nameBase)
} else {
logName = fmt.Sprintf("%s-%s-%s.log", nameBase, pod.Name, container.Name)
}
logs[logName] = string(log)
}
}
}
return logs
}
func getJaegerInstance(name, namespace string) *v1.Jaeger {
jaegerInstance := &v1.Jaeger{}
key := types.NamespacedName{Name: name, Namespace: namespace}
err := fw.Client.Get(context.Background(), key, jaegerInstance)
require.NoError(t, err)
return jaegerInstance
}
// ValidateHTTPResponseFunc should determine whether the response contains the desired content
type ValidateHTTPResponseFunc func(response *http.Response) (done bool, err error)
// WaitAndPollForHTTPResponse will try the targetURL until it gets the desired response or times out
func WaitAndPollForHTTPResponse(targetURL string, condition ValidateHTTPResponseFunc) (err error) {
client := http.Client{Timeout: 5 * time.Second}
request, err := http.NewRequest(http.MethodGet, targetURL, nil)
require.NoError(t, err)
err = wait.Poll(retryInterval, timeout, func() (done bool, err error) {
response, err := client.Do(request)
require.NoError(t, err)
defer response.Body.Close()
return condition(response)
})
return err
}
func handleSuiteTearDown() {
logrus.Info("Entering TearDownSuite()")
if saveLogs && !usingJaegerViaOLM {
var logFileNameBase string
// Sometimes t.Name() returns just the suite name, other times it returns suite/lastTestRun.
// Here we just want the suite name
i := strings.Index(t.Name(), "/")
if i > 0 {
logFileNameBase = t.Name()[:i] + "-operator"
} else {
logFileNameBase = t.Name() + "-operator"
}
writePodLogsToFile(namespace, "name=jaeger-operator", logFileNameBase)
}
if !debugMode || !t.Failed() {
ctx.Cleanup()
}
}
func handleTestFailure() {
if t.Failed() {
logWarningEvents()
}
if debugMode && t.Failed() {
logrus.Errorf("Test %s failed\n", t.Name())
// FIXME find a better way to terminate tests than os.Exit(1)
}
}
type resp struct {
Data []trace `json:"data"`
}
type trace struct {
TraceID string `json:"traceID"`
Spans []span `json:"spans"`
}
type span struct {
TraceID string `json:"traceID"`
SpanID string `json:"spanID"`
}
type services struct {
Data []string `json:"data"`
total int `json:"total"`
limit int `json:"limit"`
offset int `json:"offset"`
errors interface{} `json:"errors"`
}
func createJaegerInstanceFromFile(name, filename string) *v1.Jaeger {
// #nosec G204: Subprocess launching should be audited
cmd := exec.Command("kubectl", "create", "--namespace", namespace, "--filename", filename)
output, err := cmd.CombinedOutput()
if err != nil && !strings.Contains(string(output), "AlreadyExists") {
require.NoError(t, err, "Failed creating Jaeger instance with: [%s]\n", string(output))
}
return getJaegerInstance(name, namespace)
}
func smokeTestAllInOneExample(name, yamlFileName string) {
smokeTestAllInOneExampleWithTimeout(name, yamlFileName, timeout+1*time.Minute)
}
func smokeTestAllInOneExampleWithTimeout(name, yamlFileName string, to time.Duration) {
jaegerInstance := createJaegerInstanceFromFile(name, yamlFileName)
defer undeployJaegerInstance(jaegerInstance)
err := WaitForDeployment(t, fw.KubeClient, namespace, name, 1, retryInterval, to)
require.NoErrorf(t, err, "Error waiting for %s to deploy", name)
AllInOneSmokeTest(name)
}
func smokeTestProductionExample(name, yamlFileName string) {
jaegerInstance := createJaegerInstanceFromFile(name, yamlFileName)
defer undeployJaegerInstance(jaegerInstance)
queryDeploymentName := name + "-query"
collectorDeploymentName := name + "-collector"
if jaegerInstance.Spec.Strategy == v1.DeploymentStrategyStreaming {
ingesterDeploymentName := name + "-ingester"
err := WaitForDeployment(t, fw.KubeClient, namespace, ingesterDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", ingesterDeploymentName)
}
err := WaitForDeployment(t, fw.KubeClient, namespace, queryDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", queryDeploymentName)
err = WaitForDeployment(t, fw.KubeClient, namespace, collectorDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", collectorDeploymentName)
ProductionSmokeTest(name)
}
func findRoute(t *testing.T, f *framework.Framework, name, namespace string) *osv1.Route {
routeList := &osv1.RouteList{}
err := wait.Poll(retryInterval, timeout, func() (bool, error) {
if err := f.Client.List(context.Background(), routeList); err != nil {
return false, err
}
if len(routeList.Items) >= 1 {
return true, nil
}
return false, nil
})
if err != nil {
t.Fatalf("Failed waiting for route: %v", err)
}
// Truncate the namespace name and use that to find the route
target := util.DNSName(util.Truncate(name, 62-len(namespace)))
for _, r := range routeList.Items {
if r.Namespace == namespace && strings.HasPrefix(r.Spec.Host, target) {
return &r
}
}
t.Fatal("Could not find route")
return nil
}
func getQueryURL(jaegerInstanceName, namespace, urlPattern string) (url string) {
if isOpenShift(t) {
route := findRoute(t, fw, jaegerInstanceName, namespace)
require.Len(t, route.Status.Ingress, 1, "Wrong number of ingresses.")
url = fmt.Sprintf("https://"+urlPattern, route.Spec.Host)
} else {
ingress, err := WaitForIngress(t, fw.KubeClient, namespace, jaegerInstanceName+"-query", retryInterval, timeout)
require.NoError(t, err, "Failed waiting for ingress")
require.Len(t, ingress.Status.LoadBalancer.Ingress, 1, "Wrong number of ingresses.")
address := ingress.Status.LoadBalancer.Ingress[0].IP
url = fmt.Sprintf("http://"+urlPattern, address)
}
return url
}
func getHTTPCLient(insecure bool) (httpClient http.Client) {
if isOpenShift(t) {
transport := &http.Transport{
// #nosec G402: TLS InsecureSkipVerify set true
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
}
httpClient = http.Client{Timeout: 30 * time.Second, Transport: transport}
} else {
httpClient = http.Client{Timeout: time.Second}
}
return httpClient
}
func getQueryURLAndHTTPClient(jaegerInstanceName, urlPattern string, insecure bool) (string, http.Client) {
url := getQueryURL(jaegerInstanceName, namespace, urlPattern)
httpClient := getHTTPCLient(insecure)
return url, httpClient
}
func createSecret(secretName, secretNamespace string, secretData map[string][]byte) *corev1.Secret {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: secretNamespace,
},
Data: secretData,
}
createdSecret, err := fw.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.Background(), secret, metav1.CreateOptions{})
require.NoError(t, err)
WaitForSecret(secretName, secretNamespace)
return createdSecret
}
func deletePersistentVolumeClaims(namespace string) {
pvcs, err := fw.KubeClient.CoreV1().PersistentVolumeClaims(kafkaNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
emptyDeleteOptions := metav1.DeleteOptions{}
for _, pvc := range pvcs.Items {
logrus.Infof("Deleting PVC %s from namespace %s", pvc.Name, namespace)
fw.KubeClient.CoreV1().PersistentVolumeClaims(kafkaNamespace).Delete(context.Background(), pvc.Name, emptyDeleteOptions)
}
}
// testContainerInPod is a general function to test if the container exists in the pod
// provided that the pod has `app` label. Return true if and only if the container exists and
// the user-defined function `predicate` returns true if given.
func testContainerInPod(namespace, appName, containerName string, predicate func(corev1.Container) bool) bool {
var pods *corev1.PodList
var pod corev1.Pod
// Sometimes the app gets redeployed twice and we can get three pods, wait till there are either 1 or 2
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
pods, err = fw.KubeClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=" + appName})
require.NoError(t, err)
if len(pods.Items) > 0 && len(pods.Items) < 3 {
return true, nil
}
return false, nil
})
require.NoError(t, err)
if len(pods.Items) == 1 {
logrus.Infof("Found 1 pod %s", pods.Items[0].Name)
pod = pods.Items[0]
} else {
for _, p := range pods.Items {
if p.DeletionTimestamp == nil {
logrus.Infof("Using pod %s", p.Name)
pod = p
} else {
logrus.Infof("Skipping pod %s with deletionTimestamp %v", p.Name, p.DeletionTimestamp)
}
}
}
containers := pod.Spec.Containers
for _, container := range containers {
if container.Name == containerName {
if predicate != nil {
return predicate(container)
}
return true
}
}
require.Failf(t, "Did not find container %s for pod with label{app=%s} in namespace %s", containerName, appName, namespace)
return false
}
func logWarningEvents() {
eventList, err := fw.KubeClient.CoreV1().Events(namespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
firstWarning := true
for _, event := range eventList.Items {
if event.Type != "Normal" {
if firstWarning {
logrus.Infof("Warning events for test %s", t.Name())
firstWarning = false
}
logrus.Warnf("Event Warning: Reason: %s Message: %s", event.Reason, event.Message)
}
}
}
func waitForKafkaInstance() {
kafkaInstance := &v1beta2.Kafka{}
err := WaitForStatefulset(t, fw.KubeClient, kafkaNamespace, "my-cluster-zookeeper", retryInterval, timeout+1*time.Minute)
require.NoError(t, err)
err = WaitForStatefulset(t, fw.KubeClient, kafkaNamespace, "my-cluster-kafka", retryInterval, timeout)
require.NoError(t, err)
err = wait.Poll(retryInterval, timeout, func() (done bool, err error) {
err = fw.Client.Get(context.Background(), types.NamespacedName{Name: "my-cluster", Namespace: kafkaNamespace}, kafkaInstance)
require.NoError(t, err)
for _, condition := range kafkaInstance.Status.Conditions {
if strings.EqualFold(condition.Type, "ready") && strings.EqualFold(condition.Status, "true") {
return true, nil
}
}
return false, nil
})
require.NoError(t, err)
}
func waitForElasticSearch() {
err := WaitForStatefulset(t, fw.KubeClient, storageNamespace, string(v1.JaegerESStorage), retryInterval, timeout)
require.NoError(t, err, "Error waiting for elasticsearch")
}
func createESSelfProvDeployment(jaegerInstance *v1.Jaeger, jaegerInstanceName, jaegerNamespace string) {
err := fw.Client.Create(context.TODO(), jaegerInstance, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
require.NoError(t, err, "Error deploying example Jaeger")
// Wait for all elasticsearch instances to appear
waitForESDeployment(jaegerInstance)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerNamespace, jaegerInstanceName+"-collector", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerNamespace, jaegerInstanceName+"-query", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
logrus.Infof("Jaeger instance %s finished deploying in %s", jaegerInstanceName, jaegerNamespace)
}
func createSimpleProdDeployment(jaegerInstance *v1.Jaeger, jaegerInstanceName, jaegerNamespace string) {
err := fw.Client.Create(context.TODO(), jaegerInstance, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
require.NoError(t, err, "Error deploying example Jaeger")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerNamespace, jaegerInstanceName+"-collector", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerNamespace, jaegerInstanceName+"-query", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
logrus.Infof("Jaeger instance %s finished deploying in %s", jaegerInstanceName, jaegerNamespace)
}
func createESKafkaSelfProvDeployment(jaegerInstance *v1.Jaeger) {
err := fw.Client.Create(context.TODO(), jaegerInstance, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
require.NoError(t, err, "Error deploying example Jaeger")
// Wait for the kafka instance to start
err = WaitForStatefulset(t, fw.KubeClient, namespace, jaegerInstance.Name+"-zookeeper", retryInterval, timeout+1*time.Minute)
require.NoError(t, err)
err = WaitForStatefulset(t, fw.KubeClient, namespace, jaegerInstance.Name+"-kafka", retryInterval, timeout)
require.NoError(t, err)
err = WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstance.Name+"-entity-operator", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for entity-operator deployment")
waitForESDeployment(jaegerInstance)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerInstance.Namespace, jaegerInstance.Name+"-collector", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for collector deployment")
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerInstance.Namespace, jaegerInstance.Name+"-query", 1, retryInterval, timeout)
require.NoError(t, err, "Error waiting for query deployment")
logrus.Infof("Jaeger instance %s finished deploying in %s", jaegerInstance.Name, jaegerInstance.Namespace)
}
func waitForESDeployment(jaegerInstance *v1.Jaeger) {
// Wait for all elasticsearch instances to appear
listOptions := &metav1.ListOptions{LabelSelector: "component=elasticsearch"}
var deployments []appsv1.Deployment
err := wait.Poll(retryInterval, timeout, func() (done bool, err error) {
esDeployments, err := fw.KubeClient.AppsV1().Deployments(jaegerInstance.Namespace).List(context.Background(), *listOptions)
if int32(len(esDeployments.Items)) == jaegerInstance.Spec.Storage.Elasticsearch.NodeCount {
deployments = esDeployments.Items
return true, nil
}
return false, nil
})
require.NoError(t, err, "Failed waiting for elasticsearch deployments to be available")
// And then wait for them to finish deploying
for _, deployment := range deployments {
logrus.Infof("Waiting for deployment of %s", deployment.Name)
err = e2eutil.WaitForDeployment(t, fw.KubeClient, jaegerInstance.Namespace, deployment.Name, 1, retryInterval, 5*time.Minute)
require.NoError(t, err, "Failed waiting for elasticsearch deployment(s) %s to start", deployment.Name)
}
}
func getJaegerSelfProvisionedESAndKafka(instanceName string) *v1.Jaeger {
ingressEnabled := true
jaegerInstance := &v1.Jaeger{
TypeMeta: metav1.TypeMeta{
Kind: "Jaeger",
APIVersion: "jaegertracing.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: instanceName,
Namespace: namespace,
},
Spec: v1.JaegerSpec{
Ingress: v1.JaegerIngressSpec{
Enabled: &ingressEnabled,
Security: v1.IngressSecurityNoneExplicit,
},
Strategy: v1.DeploymentStrategyStreaming,
Storage: v1.JaegerStorageSpec{
Type: v1.JaegerESStorage,
Elasticsearch: v1.ElasticsearchSpec{
NodeCount: 1,
Resources: &corev1.ResourceRequirements{
Limits: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("1Gi")},
Requests: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("1Gi")},
},
},
},
},
}
return jaegerInstance
}
func getTracingClientWithCollectorEndpoint(serviceName, collectorEndpoint string) (opentracing.Tracer, io.Closer, error) {
if collectorEndpoint == "" {
collectorEndpoint = fmt.Sprintf("http://localhost:%d/api/traces", jaegerCollectorPort)
}
cfg := config.Configuration{
Reporter: &config.ReporterConfig{CollectorEndpoint: collectorEndpoint},
Sampler: &config.SamplerConfig{Type: "const", Param: 1},
ServiceName: serviceName,
}
return cfg.NewTracer()
}
func waitForDeploymentAndUpdate(deploymentName, containerName string, update func(container *corev1.Container)) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
deployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Get(context.Background(), deploymentName, metav1.GetOptions{})
require.NoError(t, err)
containers := deployment.Spec.Template.Spec.Containers
for index, container := range containers {
if container.Name == containerName {
update(&deployment.Spec.Template.Spec.Containers[index])
updatedDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Update(context.Background(), deployment, metav1.UpdateOptions{})
if err != nil {
log.Warnf("Error %v updating container, retrying", err)
return false, nil
}
log.Infof("Updated deployment %v", updatedDeployment.Name)
return true, nil
}
}
return false, fmt.Errorf("container %s in deployment %s not found", containerName, deploymentName)
})
}
func getBusinessAppCR() *os.File {
content, err := ioutil.ReadFile("../../examples/business-application-injected-sidecar.yaml")
require.NoError(t, err)
newContent := strings.Replace(string(content), "image: jaegertracing/vertx-create-span:operator-e2e-tests", "image: "+vertxExampleImage, 1)
file, err := ioutil.TempFile("", "vertx-example")
require.NoError(t, err)
err = ioutil.WriteFile(file.Name(), []byte(newContent), 0666)
require.NoError(t, err)
return file
}
| [
"\"STORAGE_NAMESPACE\"",
"\"KAFKA_NAMESPACE\""
]
| []
| [
"KAFKA_NAMESPACE",
"STORAGE_NAMESPACE"
]
| [] | ["KAFKA_NAMESPACE", "STORAGE_NAMESPACE"] | go | 2 | 0 | |
IAC-with-emoji.py | """
This programme is written and may only be used for educational purposes!
Using it for real purposes violates the Instagram guidelines!
Consequences are, for example, the blocking of the Instagram account.
Please read Instagram's guidelines for more information.
DO NOT indicate used program sections as your own. ©2020 - 2021 by www.github.com/JueK3y/
"""
import sys
import time
import json
import shutil
import random
import os.path
import pathlib
import datetime
import webbrowser
import requests
import tkinter as tk
import subprocess as sp
from tkinter import *
from threading import *
from zipfile import ZipFile
from selenium import webdriver
from tkinter import messagebox, ttk
from ttkthemes import ThemedTk
from tkinter.filedialog import askopenfilename
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException, NoSuchElementException, NoSuchWindowException, \
InvalidSessionIdException, InvalidArgumentException
from urllib3 import HTTPConnectionPool
from urllib3.exceptions import MaxRetryError
class Colors:
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
ENDC = '\033[0m'
def connected():
timeout = 10
try:
requests.get('https://instagram.com', timeout=timeout)
except (requests.ConnectionError, requests.Timeout):
try:
requests.get('https://google.com', timeout=timeout)
except (requests.ConnectionError, requests.Timeout):
messagebox.showerror("Internet is gone", "You don't have a working internet connection.")
try:
web.close(), web.quit()
return
except NoSuchWindowException:
print(Colors.WARNING, NoSuchWindowException, "for connected()", Colors.ENDC)
return
except NameError:
print(Colors.WARNING, NoSuchWindowException, "for connected()", Colors.ENDC)
sys.exit(1)
def line_count():
with open('Resource/JSON/settings.json', 'r') as setfil:
data_json = setfil.read()
obj_json = json.loads(data_json)
# Time for commenting
line_coun = 0
try:
for line in open(str(obj_json['commentsPath'])):
lin = line.strip()
if not lin.startswith("#"):
line_coun += 1
except UnicodeDecodeError:
messagebox.showerror("No emojis", "Sorry, emojis aren't supported currently :(")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
sys.exit(1)
obj_json['Comment Lines'] = line_coun
with open('Resource/JSON/settings.json', 'w') as settfi:
json.dump(obj_json, settfi)
setfi.close()
settfi.close()
def comment_time():
with open('Resource/JSON/settings.json', 'r') as settfi:
data_sett = settfi.read()
obj_sett = json.loads(data_sett)
ave_time = float(obj_sett['Max Y']) - 20
com_lines = obj_sett['Comment Lines']
obj_sett['Time'] = (com_lines * ave_time) / 60
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_sett, settfile)
settfi.close()
settfile.close()
def ask_file():
comment = tk.messagebox.askyesno('No comments',
"You don't have any sentences to comment on Instagram." + '\n' + "Do you "
"want to "
"create "
"some now?",
icon='warning')
if comment and pathlib.Path("Resource/txt/comments.txt").exists():
os.system("notepad Resource/txt/comments.txt")
return
elif comment:
comment_txt = open("Resource/txt/comments.txt", "a")
comment_txt.write(
"! Write only one comment per line. Comments with '!' at the beginning will be ignored.")
comment_txt.close()
os.system("notepad Resource/txt/comments.txt")
return
else:
return
def eula_file():
disagree = False
with open('Resource/txt/EULA.txt') as f:
if not ('User ' + os.getenv('username') + ' agreed to the EULA on') in f.read():
disagree = True
with open('Resource/JSON/firstRun.json', 'r') as runfi:
run_data = runfi.read()
run_obj = json.loads(run_data)
if str(run_obj['First Run?']) == "Yes" and str(run_obj['Agree to EULA?']) == "No":
eula = messagebox.askokcancel("Agree EULA", "Do you agree to the end user license agreement (EULA)?" + '\n' +
"You can find the EULA here: juek3y.com/en/code/download/terms-of-service")
if eula:
print(Colors.OKGREEN, "Agreed to EULA", Colors.ENDC)
first_run = {
'First Run?': "Yes",
'Agree to EULA?': "Yes"
}
with open('Resource/JSON/firstRun.json', 'w') as runfi:
json.dump(first_run, runfi)
runfi.close()
with open('Resource/txt/EULA.txt', 'a') as file:
file.write(
'\n' + 'User ' + os.getenv('username') + ' agreed to the EULA on %s/%s/%s' % (
e.day, e.month, e.year) +
' at %s:%s:%s.' % (e.hour, e.minute, e.second))
file.close()
else:
print(Colors.FAIL, "Rejected the EULA", Colors.ENDC)
sys.exit(1)
elif str(run_obj['First Run?']) == "No" and str(run_obj['Agree to EULA?']) != "Yes":
eula = messagebox.askokcancel("Agree EULA", "Do you agree to the end user license agreement (EULA)?" + '\n' +
"You can find the EULA here: juek3y.com/en/code/download/terms-of-service")
if eula:
print(Colors.OKGREEN, "Agreed to EULA", Colors.ENDC)
first_run = {
'First Run?': "No",
'Agree to EULA?': "Yes"
}
with open('Resource/JSON/firstRun.json', 'w') as runfi:
json.dump(first_run, runfi)
runfi.close()
with open('Resource/txt/EULA.txt', 'a') as file:
file.write(
'User ' + os.getenv('username') + ' agreed to the EULA on %s/%s/%s' % (e.day, e.month, e.year) +
' at %s:%s:%s.' % (e.hour, e.minute, e.second))
file.close()
else:
print(Colors.FAIL, "Rejected the EULA", Colors.ENDC)
sys.exit(1)
elif disagree:
eula = messagebox.askokcancel("Agree EULA", "Do you agree to the end user license agreement (EULA)?" + '\n' +
"You can find the EULA here: juek3y.com/en/code/download/terms-of-service")
if eula:
print(Colors.OKGREEN, "Agreed to EULA", Colors.ENDC)
first_run = {
'First Run?': "No",
'Agree to EULA?': "Yes"
}
with open('Resource/JSON/firstRun.json', 'w') as runfi:
json.dump(first_run, runfi)
runfi.close()
with open("Resource/txt/EULA.txt", "a+") as file_object:
file_object.seek(0)
data_f = file_object.read(118)
if len(data_f) > 0:
file_object.write("\n")
file_object.write('User ' + os.getenv('username') + ' agreed to the EULA on %s/%s/%s' % (e.day, e.month,
e.year) +
' at %s:%s:%s.' % (e.hour, e.minute, e.second))
file_object.close()
else:
print(Colors.FAIL, "Rejected the EULA", Colors.ENDC)
sys.exit(1)
f.close()
runfi.close()
def threading_run():
t1 = Thread(target=run)
t1.start()
def run():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
if str(e1.get()) == "" or str(e2.get()) == "" or str(e4.get()) == "" or str(e2.get()) == "None":
messagebox.showerror("Missing input", "All fields must be filled in.")
settingfile.close()
elif len(str(e4.get())) < 6:
messagebox.showerror("Incorrect password", "Your password can't be that short.")
settingfile.close()
elif len(str(e1.get())) < 11:
messagebox.showerror("Wrong link", "The link have to lead to an instagram post.")
settingfile.close()
elif not pathlib.Path(str(obj_setti['commentsPath'])).exists():
ask_file()
settingfile.close()
else:
if str(obj_setti['Looping comments?']):
run.loop = True
else:
run.loop = False
settingfile.close()
# Save URL
safe_url = {
'Last URL': url_text.get(),
}
with open('Resource/JSON/URLhistory.json', 'w') as urlfi:
json.dump(safe_url, urlfi)
urlfi.close()
# Save user login
login = {
'Username': username_text.get(),
'Password': password_text.get()
}
with open('Resource/JSON/LogIn.json', 'w') as lginfi:
json.dump(login, lginfi)
lginfi.close()
eula_file()
line_count()
comment_time()
with open('Resource/JSON/settings.json', 'r') as settfi:
data_json = settfi.read()
obj_sett = json.loads(data_json)
if obj_sett['Comment Lines'] == 0:
comment = tk.messagebox.askyesno('No comments',
"You don't have any sentences to comment on Instagram." + '\n' + "Do you "
"want to "
"create "
"some "
"now?",
icon='warning')
if comment and pathlib.Path("Resource/txt/comments.txt").exists():
os.system("notepad Resource/txt/comments.txt")
return
elif comment:
comment_txt = open("Resource/txt/comments.txt", "a")
comment_txt.write(
"! Write only one comment per line. Comments with '!' at the beginning will be ignored.")
comment_txt.close()
os.system("notepad Resource/txt/comments.txt")
return
else:
return
if obj_sett['Comment Lines'] < 5:
msg = messagebox.askokcancel("Very few comments",
"There are less than 5 comments to post." + "\n" + "Do you want to "
"continue?", icon='warning')
if msg:
check_comment()
else:
if not run.loop:
msg = messagebox.askokcancel("Duration", "The commenting will take an average of " +
str(round(obj_sett['Time'], 2)) + " minutes.")
if msg:
check_comment()
else:
check_comment()
settfi.close()
def check_comment():
global web
connected()
with open('Resource/JSON/firstRun.json', 'r') as runfil:
run__data = runfil.read()
run__obj = json.loads(run__data)
if str(run__obj['First Run?']) == "Yes":
first__run = {
'First Run?': "No",
'Agree to EULA?': "Yes"
}
with open('Resource/JSON/firstRun.json', 'w') as runfile:
json.dump(first__run, runfile)
runfil.close()
runfile.close()
runfil.close()
print(Colors.BOLD, "First Run", Colors.ENDC)
auto_comment()
else:
print(Colors.OKGREEN, "Start commenting", Colors.ENDC)
auto_comment()
runfil.close()
def auto_comment():
global web
try:
b1_text.set("Stop")
b1["command"] = stop
except RuntimeError:
sys.exit(1)
if browser_text.get() == 'Firefox':
print(Colors.BOLD, "Using Firefox")
try:
web = webdriver.Firefox(executable_path=os.getcwd() + '/Resource/driver/geckodriver.exe')
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Firefox"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Firefox couldn't be found. Please select another browser." + '\n' +
"It is also possible that Firefox was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Chrome 87':
print(Colors.BOLD, "Using Chrome", Colors.ENDC)
try:
chr_opt = webdriver.ChromeOptions()
chr_opt.add_argument("--incognito")
web = webdriver.Chrome(executable_path=os.getcwd() + '/Resource/driver/chromedriver_87.exe',
options=chr_opt)
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Chrome 87"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Chrome 87 couldn't be found. Please select another browser." + '\n' +
"It is also possible that Chrome was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Chrome 88':
print(Colors.BOLD, "Using Chrome", Colors.ENDC)
try:
chr_opt = webdriver.ChromeOptions()
chr_opt.add_argument("--incognito")
web = webdriver.Chrome(executable_path=os.getcwd() + '/Resource/driver/chromedriver_88.exe',
options=chr_opt)
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Chrome 88"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Chrome 88 couldn't be found. Please select another browser." + '\n' +
"It is also possible that Chrome was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Edge 88':
print(Colors.BOLD, "Using Edge", Colors.ENDC)
try:
web = webdriver.Edge(executable_path=os.getcwd() + '/Resource/driver/edgedriver-x64-88.exe')
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Edge 88"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Edge 88 couldn't be found. Please select another browser." + '\n' +
"It is also possible that Edge was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Edge 89':
print(Colors.BOLD, "Using Edge", Colors.ENDC)
try:
web = webdriver.Edge(executable_path=os.getcwd() + '/Resource/driver/edgedriver-x64-89.exe')
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Edge 89"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Edge 89 couldn't be found. Please select another browser." + '\n' +
"It is also possible that Edge was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Edge 90':
print(Colors.BOLD, "Using Edge", Colors.ENDC)
try:
web = webdriver.Edge(executable_path=os.getcwd() + '/Resource/driver/edgedriver-x64-90.exe')
time.sleep(5)
web.maximize_window()
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
# Save preferred browser
driv_obj['Browser'] = "Edge 90"
with open('Resource/JSON/Browser.json', 'w') as BrwFi:
json.dump(driv_obj, BrwFi)
BrwFi.close()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong browser", "Edge 90 couldn't be found. Please select another browser." + '\n' +
"It is also possible that Edge was closed accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
elif browser_text.get() == 'Own Browser':
try:
with open('Resource/JSON/Browser.json', 'r') as DriFi:
driv_data = DriFi.read()
driv_obj = json.loads(driv_data)
if str(driv_obj['Own Browser Name']) == "Chrome":
web = webdriver.Chrome(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
elif str(driv_obj['Own Browser Name']) == "Edge":
web = webdriver.Edge(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
elif str(driv_obj['Own Browser Name']) == "Firefox":
web = webdriver.Firefox(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
elif str(driv_obj['Own Browser Name']) == "Internet Explorer":
web = webdriver.Ie(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
elif str(driv_obj['Own Browser Name']) == "Opera":
web = webdriver.Opera(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
elif str(driv_obj['Own Browser Name']) == "Safari":
web = webdriver.Safari(executable_path=str(driv_obj['Driver Path']))
time.sleep(5)
web.maximize_window()
DriFi.close()
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment() in 'own Browser'", Colors.ENDC)
messagebox.showerror("Wrong browser", "The driver you selcted couldn't be found. Please select another "
"browser." + '\n' + "It is also possible that the Driver was closed "
"accidentally.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
else:
messagebox.showerror("Error occurred", "An error occurred with the browser selection." + '\n' +
"Please report this issue with the title ACNBxMB and use another browser.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
sys.exit(1)
connected()
try:
web.get(url_text.get())
except InvalidArgumentException:
print(Colors.WARNING, InvalidArgumentException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("No valid URL", "The URL provided is not a real link." + '\n' + "Please copy the URL "
"from the post you want "
"to comment.")
web.close(), web.quit()
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except NameError:
messagebox.showerror("Browser not found", "The selected browser couldn't be found.", icon='error')
except WebDriverException:
print(Colors.WARNING, WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Browser closed", "Action cancelled by user.", icon='warning')
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
try:
web.close(), web.quit()
b1_text.set("Run")
b1["command"] = threading_run
return
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
except (RuntimeError, MaxRetryError):
print(Colors.WARNING, RuntimeError, "Nr. 2 for auto_comment()", Colors.ENDC)
sys.exit(1)
try:
cookies = web.find_element_by_xpath('/html/body/div[4]/div/div/button[1]')
cookies.click()
except NoSuchElementException:
print(Colors.WARNING, NoSuchElementException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Wrong link", "The link does not lead (directly) to any Instagram post.")
web.close(), web.quit()
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except (NoSuchWindowException, WebDriverException):
print(Colors.WARNING, NoSuchWindowException, "or", WebDriverException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Browser closed", "Action cancelled by user.", icon='warning')
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
sys.exit(1)
# Search for LogIn Button
time.sleep(10)
try:
comment = web.find_element_by_xpath('/html/body/div[1]/section/nav/div[2]/div/div/div[3]/div/div/div/div/div[3]/div[1]/a')
comment.click()
except NoSuchElementException:
print(Colors.WARNING, NoSuchElementException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Error", "Something went wrong. Does the link lead to a picture?")
web.close(), web.quit()
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except NoSuchWindowException:
print(Colors.WARNING, NoSuchWindowException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Browser closed", "Action cancelled by user.", icon='warning')
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
sys.exit(1)
# Enters LogIn Data
try:
time.sleep(1)
print(Colors.BOLD, "Searching for LogIn field.", Colors.BOLD)
alias = web.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input')
alias.send_keys(username_text.get())
pw = web.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input')
pw.send_keys(password_text.get())
connected()
login = web.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button')
login.click()
time.sleep(5)
except NoSuchWindowException:
print(Colors.WARNING, NoSuchWindowException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Browser closed", "Action cancelled by user.", icon='warning')
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except NoSuchElementException:
print(Colors.WARNING, NoSuchElementException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Error", "Something went wrong. Please restart the program.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
sys.exit(1)
try:
web.find_element_by_css_selector('#slfErrorAlert')
messagebox.showerror("Wrong information", "Your username and / or your password was wrong.")
web.close(), web.quit()
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
sys.exit(1)
except NoSuchWindowException:
print(Colors.WARNING, NoSuchWindowException, "for auto_comment()", Colors.ENDC)
messagebox.showerror("Browser closed", "Action cancelled by user.", icon='warning')
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for auto_comment()", Colors.ENDC)
except NoSuchElementException:
web.get(url_text.get())
print(Colors.WARNING, NoSuchElementException, "for auto_comment()", Colors.ENDC)
web.find_element_by_css_selector('.sqdOP')
'''
try:
cookieClick = web.find_element_by_xpath('/html/body/div[4]/div/div/button[2]')
cookieClick.click()
except NoSuchElementException:
print(Colors.WARNING, NoSuchElementException, "for auto_comment() - Error when searching cookie field", Colors.ENDC)
messagebox.showerror("Error", "Something went wrong. Please restart the program.")
sys.exit(1)'''
# Search for comment field
print("Searching for comment field.")
try:
time.sleep(1)
svin = web.find_element_by_xpath('/html/body/div[1]/section/main/div/div/article/div[3]/section[3]/div/form/textarea')
print(Colors.OKGREEN, "Found target", Colors.ENDC)
svin.click()
time.sleep(1)
with open('Resource/JSON/settings.json', 'r') as setfil:
data_set = setfil.read()
obj_set = json.loads(data_set)
try:
comfi = open(str(obj_set['commentsPath'])).readlines()
except UnicodeDecodeError:
messagebox.showerror("No emojis", "Sorry, emojis aren't supported currently :( \n Please change your comments.")
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
sys.exit(1)
setfil.close()
run.last_comment = 0
def comment():
try:
for _ in comfi:
line = random.choice(comfi)
if line.strip() == run.last_comment:
line = random.choice(comfi)
while line.startswith('!'):
line = random.choice(comfi)
if line.strip() == run.last_comment:
line = random.choice(comfi)
print(Colors.BOLD, "Posting comment: " + line.strip(), Colors.ENDC)
with open('Resource/JSON/settings.json', 'r') as settfi:
data_json = settfi.read()
obj_sett = json.loads(data_json)
zeit = random.randint(20, int(obj_sett['Max Y']))
print(Colors.BOLD, "Time for the next comment: " + str(zeit), Colors.ENDC)
try:
select = web.find_element_by_xpath(
'//*[@id="react-root"]/section/main/div/div[1]/article/div[3]/section['
'3]/div/form/textarea')
select.click()
time.sleep(1)
text = web.find_element_by_css_selector('.Ypffh')
text.send_keys('😍')
run.last_comment = line.strip()
connected()
text.send_keys(Keys.ENTER)
time.sleep(zeit)
except InvalidSessionIdException:
print(Colors.WARNING, InvalidSessionIdException, "for auto_comment()", Colors.ENDC)
sys.exit(1)
except (HTTPConnectionPool, TypeError):
print(Colors.WARNING, " Exception in auto_comment()", Colors.ENDC)
sys.exit(1)
if run.loop:
while True:
comment()
else:
comment()
web.close(), web.quit()
b1_text.set("Run")
b1["command"] = threading_run
messagebox.showinfo("Finished", "All comments are posted.")
except NoSuchElementException:
# web.close(), web.quit()
print(Colors.FAIL, NoSuchElementException, "for auto_comment()", Colors.ENDC)
print(Colors.FAIL, "Please report this error here: "
"https://github.com/JueK3y/Instagram-automated-commenting/issues/new. Error Code: "
"ERR_IG_COM_ISS", Colors.ENDC)
b1_text.set("Run")
b1["command"] = threading_run
messagebox.showerror("Slow internet connection", "Please retry! Make sure you have a good Internet "
"connection." + "\n" + "If the error occurs again, "
"please create an issue via "
"'Settings -> Help' with the title "
"ERR_IG_COM_ISS.")
def sel_bro(value):
driver_path = askopenfilename(filetypes=(("* .exe", "*.exe"), ("All Files", "*.*")))
if driver_path:
messagebox.showinfo("Success", "The Driver has been added." + "\n" + "The program will restart now.")
with open('Resource/JSON/Browser.json', 'r') as DriFi:
dri_data = DriFi.read()
dri_obj = json.loads(dri_data)
dri_obj['Own Browser Name'] = value
print(value)
dri_obj['Driver Path'] = driver_path
print(driver_path)
with open('Resource/JSON/Browser.json', 'w') as DrivFi:
json.dump(dri_obj, DrivFi)
DriFi.close()
DrivFi.close()
restart()
def threading_browser():
t1 = Thread(target=import_browser)
t1.start()
def import_browser():
msg = messagebox.askokcancel("Import webdriver",
"Here you can import the driver for your browser." + "\n" + "Use this only if you "
"have experience with "
"Selenium." + "\n" +
"Google 'Selenium web driver' for more information.", icon='info')
if msg:
browserWin = Toplevel(root)
browserWin.title("Browser selection | AC")
browserWin.geometry('200x120'), browserWin.wm_attributes("-topmost", 1), browserWin.resizable(False, False)
try:
browserWin.iconbitmap('Resource/IAC-Icon.ico')
except TclError:
check_content()
if light:
browserWin['background'] = '#F5F6F7'
elif dark:
browserWin['background'] = '#464646'
else:
print(Colors.FAIL, "Uhh, this wasn't supposed happen.", Colors.ENDC)
restart()
ttk.Label(browserWin, text="Select the Browser to import").place(x=25, y=10)
List = ["Choose...", "Chrome", "Edge", "Firefox", "IE", "Opera", "Safari"]
txt = StringVar()
ttk.OptionMenu(browserWin, txt, *List, command=sel_bro).place(x=45, y=40, width=110)
ttk.Button(browserWin, text="Back", command=browserWin.destroy).place(x=45, y=80, width=110)
def threading_settings():
t1 = Thread(target=settings)
t1.start()
def settings():
settingsWin = Toplevel(root)
settingsWin.title("Settings | AC")
settingsWin.geometry('350x250'), settingsWin.wm_attributes("-topmost", 1), settingsWin.resizable(False, False)
try:
settingsWin.iconbitmap('Resource/IAC-Icon.ico')
except TclError:
check_content()
settings.scale = 0
if light:
settingsWin['background'] = '#F5F6F7'
elif dark:
settingsWin['background'] = '#464646'
else:
print(Colors.FAIL, "Uhh, this wasn't supposed happen.", Colors.ENDC)
restart()
def app_light():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
if str(obj_setti['lightMode']) == 'no':
msg = messagebox.askokcancel("Light Mode", "In order to activate the light mode," + '\n' + "the program "
"is restarted.")
if msg:
obj_setti['lightMode'] = 'yes'
obj_setti['darkMode'] = 'no'
print(Colors.OKGREEN, "Using light Mode", Colors.ENDC)
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setti, settfile)
settingfile.close()
settfile.close()
settingsWin.destroy()
restart()
elif str(obj_setti['lightMode']) == 'yes':
msg = messagebox.askyesno("Light Mode", "The light mode has already been activated." + '\n' + "Do you want "
"to reapply "
"it?")
if msg:
msg = messagebox.askokcancel("Light Mode", "In order to activate the light mode," + '\n' + "the "
"program "
"is "
"restarted.")
if msg:
obj_setti['lightMode'] = 'yes'
obj_setti['darkMode'] = 'no'
print(Colors.OKGREEN, "Using light Mode", Colors.ENDC)
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setti, settfile)
settingfile.close()
settfile.close()
settingsWin.destroy()
restart()
elif not msg:
return
else:
msg = messagebox.askyesno("File corrupted",
"Hm, the file for the light mode seems to be corrupted." + '\n' +
"Do you want to download it again?")
if msg:
shutil.rmtree("Resource/JSON")
mk_files()
settingfile.close()
def app_dark():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
if str(obj_setti['darkMode']) == 'no':
msg = messagebox.askokcancel("Dark Mode",
"In order to activate the dark mode," + '\n' + "the program is restarted.")
if msg:
obj_setti['lightMode'] = 'no'
obj_setti['darkMode'] = 'yes'
print(Colors.OKGREEN, "Using Dark Mode", Colors.ENDC)
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setti, settfile)
settingfile.close()
settfile.close()
settingsWin.destroy()
restart()
elif str(obj_setti['darkMode']) == 'yes':
msg = messagebox.askyesno("Dark Mode", "The dark mode has already been activated." + '\n' + "Do you want "
"to reapply "
"it?")
if msg:
msg = messagebox.askokcancel("Dark Mode",
"In order to activate the dark mode," + '\n' + "the program is restarted.")
if msg:
obj_setti['lightMode'] = 'no'
obj_setti['darkMode'] = 'yes'
print(Colors.OKGREEN, "Using Dark Mode", Colors.ENDC)
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setti, settfile)
settingfile.close()
settfile.close()
settingsWin.destroy()
restart()
elif not msg:
return
else:
msg = messagebox.askyesno("File corrupted", "Hm, the file for the dark mode seems to be corrupted." + '\n' +
"Do you want to download it again?")
if msg:
shutil.rmtree("Resource/JSON")
mk_files()
settingfile.close()
def hqm():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
if hqm_var == 1:
msg = messagebox.askokcancel("Activate HQ mode", "Use this function if the program is displayed blurred." +
'\n' + "Activation may cause elements to be moved." +
'\n' + "The program will restart itself.", icon="info")
if msg:
obj_setti['HQM'] = "Activated"
with open('Resource/JSON/settings.json', 'w') as settfil:
json.dump(obj_setti, settfil)
settingfile.close()
settfil.close()
restart()
elif hqm_var == 0:
msg = messagebox.askokcancel("De-activate HQ mode", "The function will be de-activated." +
'\n' + "The program will restart itself.", icon="info")
if msg:
obj_setti['HQM'] = ""
with open('Resource/JSON/settings.json', 'w') as settfil:
json.dump(obj_setti, settfil)
settingfile.close()
settfil.close()
restart()
def change_max_y(v):
try:
line_count()
with open('Resource/JSON/settings.json', 'r') as settingfile:
dat_json = settingfile.read()
obj_setting = json.loads(dat_json)
if str(obj_setting['Looping comments?']):
if not settings.scale:
messagebox.showinfo("", "The 'Looping Comments' mode has been activated." + "\n" +
"You can only set the average time when this mode is deactivated.")
settings.scale = 1
else:
max_y = int(15 * (float(v) + 1) + 21)
obj_setting['Max Y'] = str(max_y + 20)
average = (max_y / 60) * float(obj_setting['Comment Lines'])
la.config(text='Average duration: ' + str(round(average, 2)) + 'min')
la.place(x=24, y=67)
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setting, settfile)
settingfile.close()
settfile.close()
return
except FileNotFoundError:
if not settings.scale:
ask_file()
settings.scale = 1
def loop_com():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
if loop_var == 1:
msg = messagebox.askokcancel("Looping comments", "This function repeats your sentences when the program "
"reaches the end of the file." +
'\n' + "You can stop commenting at any time by clicking 'Stop'." +
'\n' + "The program restarts itself now.", icon="info")
if msg:
obj_setti['Looping comments?'] = True
with open('Resource/JSON/settings.json', 'w') as settfil:
json.dump(obj_setti, settfil)
settingfile.close()
settfil.close()
restart()
elif loop_var == 0:
msg = messagebox.askokcancel("De-activate Repeating", "Your comments will no longer be repeated." +
'\n' + "The program restarts itself now.", icon="info")
if msg:
obj_setti['Looping comments?'] = ""
with open('Resource/JSON/settings.json', 'w') as settfil:
json.dump(obj_setti, settfil)
settingfile.close()
settfil.close()
restart()
def edit_com():
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
f_comm = pathlib.Path(str(obj_setti['commentsPath']))
if not f_comm.exists():
if not pathlib.Path('Resource/txt/comments.txt').exists():
comment = tk.messagebox.askyesno('No comments', "You don't have any comments to edit." + '\n' +
"Do you want to create some now?", icon='info')
if comment:
obj_setti['commentsPath'] = 'Resource/txt/comments.txt'
comment_txt = open("Resource/txt/comments.txt", "a")
comment_txt.write("! Write only one comment per line. Comments with '!' at the beginning will be "
"ignored.")
comment_txt.close()
programName = "notepad.exe"
fileName = str(obj_setti['commentsPath'])
sp.Popen([programName, fileName])
settingsWin.update()
root.update()
return
else:
obj_setti['commentsPath'] = 'Resource/txt/comments.txt'
programName = "notepad.exe"
fileName = 'Resource/txt/comments.txt'
sp.Popen([programName, fileName])
settingsWin.update()
root.update()
return
else:
programName = "notepad.exe"
fileName = str(obj_setti['commentsPath'])
sp.Popen([programName, fileName])
settingsWin.update()
root.update()
return
settingfile.close()
def import_com():
commentspath = askopenfilename(filetypes=(("* .txt", "*.txt"), ("All Files", "*.*")))
if commentspath:
messagebox.showinfo("Success", "Your .txt file has been added to the comments.")
with open('Resource/JSON/settings.json', 'r') as settingfile:
data_sett = settingfile.read()
obj_setti = json.loads(data_sett)
obj_setti['commentsPath'] = commentspath
with open('Resource/JSON/settings.json', 'w') as settfile:
json.dump(obj_setti, settfile)
settingfile.close()
settfile.close()
def set_help():
webbrowser.open_new(r"https://github.com/JueK3y/Instagram-automated-commenting/wiki/Help")
# Content
# 1. line
ttk.Label(settingsWin, text="Appearance").place(x=59, y=5)
if light:
sw_appearance = tk.StringVar(value='lightMode')
else:
sw_appearance = tk.StringVar(value='darkMode')
ttk.Radiobutton(settingsWin, text="Light", variable=sw_appearance, value="lightMode", command=app_light). \
place(x=34, y=29, width=70)
ttk.Radiobutton(settingsWin, text="Dark", variable=sw_appearance, value="darkMode", command=app_dark). \
place(x=94, y=29, width=70)
ttk.Label(settingsWin, text="High quality mode").place(x=208, y=5)
with open('Resource/JSON/settings.json', 'r') as setfil:
data_json = setfil.read()
obj_sett = json.loads(data_json)
if str(obj_sett['HQM']) == "Activated":
ttk.Checkbutton(settingsWin, text="HQ Mode activated", variable=IntVar(value=1), command=hqm).place(x=203, y=30)
hqm_var = 0
else:
ttk.Checkbutton(settingsWin, text="Activate HQM", command=hqm).place(x=201, y=30)
hqm_var = 1
setfil.close()
# 2. Lines
try:
with open('Resource/JSON/settings.json', 'r') as setfil:
data_json = setfil.read()
obj_sett = json.loads(data_json)
if pathlib.Path(str(obj_sett['commentsPath'])).exists() and not str(obj_sett['Looping comments?']):
la = ttk.Label(settingsWin, text='Average duration: ' + str(round((((int(obj_sett['Max Y']) - 20) / 60) *
float(obj_sett['Comment Lines'])),
2)) + 'min')
la.place(x=24, y=67)
else:
la = ttk.Label(settingsWin, text='Average duration')
la.place(x=45, y=67)
setfi.close()
except KeyError:
la = ttk.Label(settingsWin, text='Average duration')
la.place(x=45, y=67)
ttk.Scale(settingsWin, orient=tk.HORIZONTAL, from_=0, to=4, length=110, command=change_max_y). \
place(x=39, y=98, width=110)
ttk.Label(settingsWin, text="Loop comments").place(x=214, y=67)
with open('Resource/JSON/settings.json', 'r') as setfil:
data_json = setfil.read()
obj_sett = json.loads(data_json)
if str(obj_sett['Looping comments?']):
ttk.Checkbutton(settingsWin, text="Looping comments", variable=IntVar(value=1), command=loop_com).place(x=201,
y=92)
loop_var = 0
else:
ttk.Checkbutton(settingsWin, text="Repeat comments", command=loop_com).place(x=201, y=92)
loop_var = 1
# 3. Line
ttk.Label(settingsWin, text="Comments").place(x=61, y=129)
ttk.Button(settingsWin, text="Edit", command=edit_com).place(x=36, y=151, width=50)
ttk.Button(settingsWin, text="Import", command=import_com).place(x=86, y=151, width=60)
ttk.Label(settingsWin, text="More Browser").place(x=221, y=129)
ttk.Button(settingsWin, text="Import", command=threading_browser).place(x=204, y=151, width=110)
# 4. Line
ttk.Button(settingsWin, text="Help", command=set_help).place(x=36, y=200, width=110)
ttk.Button(settingsWin, text="Back", command=settingsWin.destroy).place(x=204, y=200, width=110)
def check_alive():
try:
web.title
return True
except (InvalidSessionIdException, MaxRetryError, NameError):
return False
def close():
if check_alive():
msg_box = tk.messagebox.askquestion('Exit Application', 'Are you sure you want to exit the application?',
icon='warning')
if msg_box == 'yes':
root.destroy()
try:
web.close(), web.quit()
sys.exit(1)
except (NameError, InvalidSessionIdException, WebDriverException, TclError):
print(Colors.WARNING, "An exception in close() occurred", Colors.ENDC)
sys.exit(1)
else:
return
else:
root.destroy()
sys.exit(1)
def stop():
msg_box = tk.messagebox.askquestion('Stop Commenting', 'Are you sure you want to stop commenting?',
icon='warning')
if msg_box == 'yes':
try:
web.close(), web.quit()
try:
b1_text.set("Run")
b1["command"] = threading_run
return
except RuntimeError:
print(Colors.WARNING, RuntimeError, "for stop()", Colors.ENDC)
except (NameError, InvalidSessionIdException, WebDriverException):
print(Colors.WARNING, " An exception in stop() occurred", Colors.ENDC)
return
else:
return
def restart():
print(Colors.OKGREEN, "The program restarts now", Colors.ENDC)
root.destroy()
os.system('python ' + str(os.path.basename(__file__)))
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
def check_content():
d_Resource = pathlib.Path("Resource")
f_icon = pathlib.Path("Resource/IAC-Icon.ico")
d_txt = pathlib.Path("Resource/txt")
f_eula = pathlib.Path("Resource/txt/EULA.txt")
d_driver = pathlib.Path("Resource/driver")
f_gecko = pathlib.Path("Resource/driver/geckodriver.exe")
f_chrome_87 = pathlib.Path("Resource/driver/chromedriver_87.exe")
f_chrome_88 = pathlib.Path("Resource/driver/chromedriver_88.exe")
d_JSON = pathlib.Path("Resource/JSON")
f_login = pathlib.Path("Resource/JSON/LogIn.json")
f_url = pathlib.Path("Resource/JSON/URLhistory.json")
f_run = pathlib.Path("Resource/JSON/firstRun.json")
f_set = pathlib.Path("Resource/JSON/settings.json")
try:
if d_Resource.exists():
if d_driver.exists():
if d_JSON.exists():
if d_txt.exists() & f_run.exists() & f_login.exists() & f_url.exists() & f_set.exists() & \
f_gecko.exists() & f_chrome_87.exists() & f_chrome_88.exists() & f_eula.exists() & f_icon. \
exists():
print(Colors.OKGREEN, "All files are downloaded", Colors.ENDC)
else:
msg_box = messagebox.askokcancel("Creating files", "Some files are being downloaded. This will "
"take some time.")
if msg_box:
print(Colors.BOLD, "Downloading files...", Colors.ENDC)
shutil.rmtree("Resource")
mk_folder()
dow_driver()
exe_driver()
mk_files()
restart()
sys.exit(1)
else:
print(Colors.BOLD, "Download canceled by user", Colors.ENDC)
sys.exit()
else:
if f_gecko.exists() & f_chrome_87.exists() & f_chrome_88.exists() & f_eula.exists() & f_icon. \
exists():
msg_box = messagebox.askokcancel("Creating files",
"Some files are being created. This will take some time.")
if msg_box:
print(Colors.BOLD, "Creating JSON files...", Colors.ENDC)
try:
shutil.rmtree("Resource/JSON")
mk_folder_2()
mk_files()
restart()
sys.exit(1)
except FileNotFoundError:
mk_folder_2()
mk_files()
restart()
else:
print(Colors.BOLD, "Download canceled by user", Colors.ENDC)
sys.exit(1)
else:
msg_box = messagebox.askokcancel("Creating files",
"Some files are being downloaded. This will take some time.")
if msg_box:
print(Colors.BOLD, "Downloading files...", Colors.ENDC)
shutil.rmtree("Resource")
mk_folder()
dow_driver()
exe_driver()
mk_files()
restart()
sys.exit(1)
else:
print(Colors.BOLD, "Download canceled by user", Colors.ENDC)
sys.exit(1)
else:
msg_box = messagebox.askokcancel("Creating files",
"Some files are being downloaded. This will take some time.")
if msg_box:
print(Colors.BOLD, "Downloading files...", Colors.ENDC)
shutil.rmtree("Resource")
mk_folder()
dow_driver()
exe_driver()
mk_files()
restart()
sys.exit(1)
else:
print(Colors.BOLD, "Download canceled by user", Colors.ENDC)
sys.exit(1)
else:
msg_box = messagebox.askokcancel("Creating files",
"Some files are being downloaded. This will take some time.")
if msg_box:
print(Colors.BOLD, "Downloading files...", Colors.ENDC)
mk_folder()
dow_driver()
exe_driver()
mk_files()
restart()
else:
print(Colors.BOLD, "Download canceled by user", Colors.ENDC)
sys.exit(1)
except PermissionError:
messagebox.showerror("Permission Error",
"Restart the program with administrator rights." + "\n" +
"Reinstall the program if the error keeps occurring.")
def mk_folder():
# Make Resource folder
res_dir = os.getcwd()
directory_name = "Resource"
json_path = os.path.join(res_dir, directory_name)
os.mkdir(json_path)
# Make driver folder
driver_dir = os.getcwd() + '/Resource'
directory_name = "driver"
json_path = os.path.join(driver_dir, directory_name)
os.mkdir(json_path)
# Make JSON folder
json_dir = os.getcwd() + '/Resource'
directory_name = "JSON"
json_path = os.path.join(json_dir, directory_name)
os.mkdir(json_path)
# Make txt folder
txt_dir = os.getcwd() + '/Resource'
directory_name = "txt"
txt_path = os.path.join(txt_dir, directory_name)
os.mkdir(txt_path)
return
def mk_folder_2():
# Make JSON folder
json_dir = os.getcwd() + '/Resource'
directory_name = "JSON"
json_path = os.path.join(json_dir, directory_name)
os.mkdir(json_path)
def dow_driver():
connected()
gecko = "https://github.com/mozilla/geckodriver/releases/download/v0.28.0/geckodriver-v0.28.0-win64.zip"
chr87 = "https://chromedriver.storage.googleapis.com/87.0.4280.88/chromedriver_win32.zip"
chr88 = "https://chromedriver.storage.googleapis.com/88.0.4324.27/chromedriver_win32.zip"
edg88 = "https://msedgedriver.azureedge.net/88.0.705.56/edgedriver_win64.zip"
edg89 = "https://msedgedriver.azureedge.net/89.0.774.18/edgedriver_win64.zip"
edg90 = "https://msedgedriver.azureedge.net/90.0.787.0/edgedriver_win64.zip"
EULA = "https://juek3y.com/src/download/txt/End%20User%20License%20Agreement%20for%20IAC.txt"
icon = "https://juek3y.com/src/download/img/IAC-Icon-Ver.-2.ico"
try:
tk.Label(root, text="Downloading files...").place(x=160, y=35)
root.update()
a = requests.get(gecko)
root.update()
b = requests.get(chr87)
root.update()
c = requests.get(chr88)
root.update()
d = requests.get(edg88)
root.update()
ea = requests.get(edg89)
root.update()
f = requests.get(edg90)
root.update()
g = requests.get(EULA)
root.update()
h = requests.get(icon)
root.update()
with open("Resource/driver/geckodriver.zip", 'wb') as gec:
gec.write(a.content)
gec.close()
with open("Resource/driver/chromedriver-87.zip", 'wb') as c87:
c87.write(b.content)
c87.close()
with open("Resource/driver/chromedriver-88.zip", 'wb') as c88:
c88.write(c.content)
c88.close()
with open("Resource/driver/edgedriver-x64-88.zip", 'wb') as edg88:
edg88.write(d.content)
edg88.close()
with open("Resource/driver/edgedriver-x64-89.zip", 'wb') as edg89:
edg89.write(ea.content)
edg89.close()
with open("Resource/driver/edgedriver-x64-90.zip", 'wb') as edg90:
edg90.write(f.content)
edg90.close()
with open("Resource/txt/EULA.txt", 'wb') as eul:
eul.write(g.content)
eul.close()
with open("Resource/IAC-Icon.ico", "wb") as ico:
ico.write(h.content)
root.iconbitmap('Resource/IAC-Icon.ico')
ico.close()
return
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
print(Colors.FAIL, "An exception in dow_driver() occurred", Colors.ENDC)
print(Colors.FAIL, "This is an time-out error! Please restart the program and try it again.", Colors.ENDC)
messagebox.showerror("Time out", "Something went wrong when downloading the files. Please restart the program.")
def exe_driver():
# Firefox
with ZipFile('Resource/driver/geckodriver.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
zipObj.close()
# Chrome
with ZipFile('Resource/driver/chromedriver-87.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
old_file_name = "Resource/driver/chromedriver.exe"
new_file_name = "Resource/driver/chromedriver_87.exe"
os.rename(old_file_name, new_file_name)
zipObj.close()
with ZipFile('Resource/driver/chromedriver-88.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
old_file_name = "Resource/driver/chromedriver.exe"
new_file_name = "Resource/driver/chromedriver_88.exe"
os.rename(old_file_name, new_file_name)
zipObj.close()
# Edge
with ZipFile('Resource/driver/edgedriver-x64-88.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
old_file_name = "Resource/driver/msedgedriver.exe"
new_file_name = "Resource/driver/edgedriver-x64-88.exe"
os.rename(old_file_name, new_file_name)
zipObj.close()
with ZipFile('Resource/driver/edgedriver-x64-89.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
old_file_name = "Resource/driver/msedgedriver.exe"
new_file_name = "Resource/driver/edgedriver-x64-89.exe"
os.rename(old_file_name, new_file_name)
zipObj.close()
with ZipFile('Resource/driver/edgedriver-x64-90.zip', 'r') as zipObj:
zipObj.extractall('Resource/driver')
old_file_name = "Resource/driver/msedgedriver.exe"
new_file_name = "Resource/driver/edgedriver-x64-90.exe"
os.rename(old_file_name, new_file_name)
zipObj.close()
os.remove("Resource/driver/geckodriver.zip")
os.remove("Resource/driver/chromedriver-87.zip")
os.remove("Resource/driver/chromedriver-88.zip")
os.remove("Resource/driver/edgedriver-x64-88.zip")
os.remove("Resource/driver/edgedriver-x64-89.zip")
os.remove("Resource/driver/edgedriver-x64-90.zip")
shutil.rmtree("Resource/driver/Driver_Notes")
return
# Make files inside JSON folder
def mk_files():
# Generating LogIn.json
login = {
'Username': "",
'Password': ""
}
with open('Resource/JSON/LogIn.json', 'w') as lginfi:
json.dump(login, lginfi)
lginfi.close()
# Generating URLhistory.json
safe_url = {
'Last URL': ""
}
with open('Resource/JSON/URLhistory.json', 'w') as urlfi:
json.dump(safe_url, urlfi)
urlfi.close()
# Generating firstRun.json
first__run = {
'First Run?': "Yes",
'Agree to EULA?': "No"
}
with open('Resource/JSON/firstRun.json', 'w') as runfil:
json.dump(first__run, runfil)
runfil.close()
# Generating Browser.json
browser = {
'Browser': "",
'Driver Path': "",
"Own Browser Name": ""
}
with open('Resource/JSON/Browser.json', 'w') as brofil:
json.dump(browser, brofil)
brofil.close()
# Generating Settings.json
sett = {
'commentsPath': "Resource/txt/comments.txt",
'lightMode': "yes",
'darkMode': "no",
'Max Y': 86,
'HQM': "",
"Looping comments?": ""
}
with open('Resource/JSON/settings.json', 'w') as setfil:
json.dump(sett, setfil)
setfil.close()
return
def check_json():
try:
with open('Resource/JSON/firstRun.json', 'r') as json_file:
data_json = json_file.read()
obj_json = json.loads(data_json)
str(obj_json['First Run?'])
str(obj_json['Agree to EULA?'])
json_file.close()
except KeyError:
print(Colors.WARNING, "firstRun file error", Colors.ENDC)
shutil.rmtree("Resource/JSON")
check_content()
except FileNotFoundError:
check_content()
try:
with open('Resource/JSON/LogIn.json', 'r') as json_file:
data_json = json_file.read()
obj_json = json.loads(data_json)
str(obj_json['Username'])
str(obj_json['Password'])
json_file.close()
except KeyError:
print(Colors.WARNING, "LogIn file error", Colors.ENDC)
shutil.rmtree("Resource/JSON")
check_content()
except FileNotFoundError:
check_content()
try:
with open('Resource/JSON/Browser.json', 'r') as json_file:
data_json = json_file.read()
obj_json = json.loads(data_json)
str(obj_json['Browser'])
str(obj_json['Driver Path'])
str(obj_json['Own Browser Name'])
json_file.close()
except KeyError:
print(Colors.WARNING, "Browser file error", Colors.ENDC)
shutil.rmtree("Resource/JSON")
check_content()
except FileNotFoundError:
check_content()
try:
with open('Resource/JSON/settings.json', 'r') as json_file:
data_json = json_file.read()
obj_json = json.loads(data_json)
str(obj_json['commentsPath'])
str(obj_json['lightMode'])
str(obj_json['darkMode'])
str(obj_json['Max Y'])
str(obj_json['HQM'])
str(obj_json['Looping comments?'])
json_file.close()
except KeyError:
print(Colors.WARNING, "settings file error", Colors.ENDC)
shutil.rmtree("Resource/JSON")
check_content()
except FileNotFoundError:
check_content()
try:
with open('Resource/JSON/URLhistory.json', 'r') as json_file:
data_json = json_file.read()
obj_json = json.loads(data_json)
str(obj_json['Last URL'])
json_file.close()
except KeyError:
print(Colors.WARNING, "URLhistory file error", Colors.ENDC)
shutil.rmtree("Resource/JSON")
check_content()
except FileNotFoundError:
check_content()
# Start of the actual program
try:
with open('Resource/JSON/settings.json', 'r') as setfi:
data = setfi.read()
obj = json.loads(data)
if str(obj['lightMode']) == "yes":
light = True
dark = False
root = ThemedTk(theme="arc")
root['background'] = '#F5F6F7'
print(Colors.BOLD, "Using Light Mode", Colors.ENDC)
setfi.close()
elif str(obj['darkMode']) == "yes":
light = False
dark = True
root = ThemedTk(theme="equilux")
root['background'] = '#464646'
print(Colors.BOLD, "Using Dark Mode", Colors.ENDC)
setfi.close()
else:
root = ThemedTk(theme="yaru")
light = True
dark = False
root.title("Automated Commenting")
check_content()
if str(obj['HQM']) == "Activated":
import ctypes
ctypes.windll.shcore.SetProcessDpiAwareness(1)
print(Colors.OKGREEN, "Using HQ Mode", Colors.ENDC)
setfi.close()
except FileNotFoundError:
root = ThemedTk(theme="yaru")
root.geometry("440x105")
light = True
dark = False
root.title("Automated Commenting")
check_content()
except KeyError:
root = tk.Tk()
root.geometry("440x105")
light = True
dark = False
root.title("Automated Commenting")
shutil.rmtree("Resource/JSON")
check_content()
screen_width, screen_height = 440, 105
try:
root.geometry(str(screen_width) + "x" + str(screen_height))
except TclError:
print(Colors.WARNING, "This error occurs only during the first run. You can ignore it.", Colors.ENDC)
x_Left = int(root.winfo_screenwidth() / 2 - screen_width / 2)
y_Top = int(root.winfo_screenheight() / 2 - screen_height / 2)
root.geometry("+{}+{}".format(x_Left, y_Top))
root.title("Automated Commenting"), root.resizable(False, False)
try:
root.iconbitmap('Resource/IAC-Icon.ico')
except TclError:
check_content()
check_content()
check_json()
exit_program = False
e = datetime.datetime.now()
eula_file()
root.update()
add_msg = "\nIf something is not working correctly for you, please report it under Settings --> Help.\n"
messagebox.showwarning("Educational purpose only", "This program was written for educational purposes only.\nPlease "
"use it accordingly!" + '\n' + add_msg + "\n\n@2020 - %s" % e.year +
" by JueK3y")
# Label
li = ttk.Label(root, text="Post URL")
li.grid(row=0, column=0)
li = ttk.Label(root, text="Username")
li.grid(row=0, column=2)
li = ttk.Label(root, text="Browser")
li.grid(row=1, column=0)
li = ttk.Label(root, text="Password")
li.grid(row=1, column=2)
with open('Resource/JSON/URLhistory.json', 'r') as URLFi:
data = URLFi.read()
obj = json.loads(data)
# Input
url_text = StringVar()
e1 = ttk.Entry(root, textvariable=url_text)
e1.insert(0, str(obj['Last URL']))
e1.grid(row=0, column=1)
URLFi.close()
try:
with open('Resource/JSON/Browser.json', 'r') as BroFi:
data = BroFi.read()
obj_b = json.loads(data)
if str(obj_b['Own Browser Name']) != "":
OptionList = [str(obj_b['Own Browser Name'] + " (Own)"), str(obj_b['Own Browser Name'] + " (Own)"), "Firefox",
"Chrome 87", "Chrome 88", "Edge 88", "Edge 89", "Edge 90"]
else:
if str(obj_b['Browser']) == "Chrome 87":
OptionList = ["Chrome 87", "Chrome 87", "Chrome 88", "Edge 88", "Edge 89", "Edge 90", "Firefox"]
elif str(obj_b['Browser']) == "Chrome 87":
OptionList = ["Chrome 88", "Chrome 88", "Chrome 87", "Edge 90", "Edge 89", "Edge 88", "Firefox"]
elif str(obj_b['Browser']) == "Edge 88":
OptionList = ["Edge 88", "Edge 88", "Edge 89", "Edge 90", "Chrome 87", "Chrome 88", "Firefox"]
elif str(obj_b['Browser']) == "Edge 89":
OptionList = ["Edge 89", "Edge 89", "Edge 88", "Edge 90", "Chrome 87", "Chrome 88", "Firefox"]
elif str(obj_b['Browser']) == "Edge 90":
OptionList = ["Edge 90", "Edge 90", "Edge 89", "Edge 80", "Chrome 88", "Chrome 87", "Firefox"]
else:
OptionList = ["Firefox", "Firefox", "Chrome 87", "Chrome 88", "Edge 88", "Edge 89", "Edge 90"]
BroFi.close()
except FileNotFoundError:
OptionList = ["Firefox", "Firefox", "Chrome 87", "Chrome 88", "Edge 88", "Edge 89", "Edge 90"]
browser_text = StringVar()
e3 = ttk.OptionMenu(root, browser_text, *OptionList).place(x=48, y=23.5, width=110) # height=25
with open('Resource/JSON/LogIn.json', 'r') as LgInFi:
data = LgInFi.read()
obj = json.loads(data)
username_text = StringVar()
e2 = ttk.Entry(root, textvariable=username_text)
e2.insert(0, str(obj['Username']))
e2.grid(row=0, column=3)
password_text = StringVar()
e4 = ttk.Entry(root, textvariable=password_text, show='*')
e4.insert(0, str(obj['Password']))
e4.grid(row=1, column=3)
LgInFi.close()
def password():
if var.get() == 1:
e4.configure(show="")
elif var.get() == 0:
e4.configure(show="*")
# Buttons
var = IntVar()
bp = ttk.Checkbutton(root, command=password, offvalue=0, onvalue=1, variable=var)
bp.grid(row=1, column=4)
b1_text = tk.StringVar()
b1 = ttk.Button(root, textvariable=b1_text, width=12,
command=threading_run) # .place(x=57.5, y=60, width=100) # height=25
b1_text.set("Run")
b1.grid(row=2, column=1, pady=(15, 10))
b2 = ttk.Button(root, text="Settings", width=12,
command=threading_settings) # .place(x=170, y=60, width=100) # height=25
b2.grid(row=2, column=2, pady=(15, 10))
b3 = ttk.Button(root, text="Exit", width=12, command=close)
b3.grid(row=2, column=3, pady=(15, 10))
root.mainloop()
| []
| []
| [
"username"
]
| [] | ["username"] | python | 1 | 0 | |
genv_test.go | package genv
import (
"os"
"testing"
)
func TestLoad(t *testing.T) {
os.Clearenv()
err := Load()
if err != nil {
t.Fatal(err)
}
if os.Getenv("KEY") != "1234567890" {
t.Fatal("No valid os.Getenv result.")
}
}
func TestLoadCustomFile(t *testing.T) {
os.Clearenv()
err := Load(".env-custom")
if err != nil {
t.Fatal(err)
}
if os.Getenv("KEY") != "0987654321" {
t.Fatal("No valid os.Getenv result.")
}
}
func TestGetFilenames(t *testing.T) {
if filenames := getFileNames([]string{}); filenames[0] != ".env" {
t.Fatal("Zero filenames lenght.")
}
}
| [
"\"KEY\"",
"\"KEY\""
]
| []
| [
"KEY"
]
| [] | ["KEY"] | go | 1 | 0 | |
examples/deltadebugging/test-dd-env-match-stderr.go | // +build example-main
package main
import (
"fmt"
"io/ioutil"
"os"
)
func main() {
f := os.Getenv("TAVOR_DD_FILE")
if f == "" {
panic("No TAVOR_DD_FILE defined")
}
v, err := ioutil.ReadFile(f)
if err != nil {
panic(err)
}
s := string(v)
for _, c := range s {
fmt.Fprintf(os.Stderr, "Got %c\n", c)
}
os.Exit(0)
}
| [
"\"TAVOR_DD_FILE\""
]
| []
| [
"TAVOR_DD_FILE"
]
| [] | ["TAVOR_DD_FILE"] | go | 1 | 0 | |
pkg/harness/harness.go | package harness
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/spf13/afero"
yaml "gopkg.in/yaml.v2"
)
const (
defaultConfigFile = "config.yaml"
// DefaultKubeConfig is the file path of the testing cluster
// kubeconfig.
//
// NOTE: This value is used in its raw form in helmclient repo. When it
// changes we need to change it in helmclient too.
//
// See https://github.com/giantswarm/helmclient/pull/63
//
DefaultKubeConfig = "/workdir/.shipyard/config"
)
type Harness struct {
logger micrologger.Logger
fs afero.Fs
cfg Config
}
type Config struct {
ExistingCluster bool `yaml:"existingCluster"`
RemoteCluster bool `yaml:"remoteCluster"`
}
func New(logger micrologger.Logger, fs afero.Fs, cfg Config) *Harness {
return &Harness{
logger: logger,
fs: fs,
cfg: cfg,
}
}
// Init initializes the harness.
func (h *Harness) Init(ctx context.Context) error {
h.logger.Log("info", "starting harness initialization")
baseDir, err := BaseDir()
if err != nil {
return microerror.Mask(err)
}
workDir := filepath.Join(baseDir, "workdir")
err = h.fs.MkdirAll(workDir, 0777)
if err != nil {
return microerror.Mask(err)
}
shipyardDir := filepath.Join(workDir, ".shipyard")
err = h.fs.MkdirAll(shipyardDir, 0777)
if err != nil {
return microerror.Mask(err)
}
// circumvent umask settings, by assigning the right
// permissions to workdir and its parent
for _, d := range []string{baseDir, workDir, shipyardDir} {
err = h.fs.Chmod(d, 0777)
if err != nil {
return microerror.Mask(err)
}
}
h.logger.Log("info", "finished harness initialization")
return nil
}
// WriteConfig is a Task that persists the current config to a file.
func (h *Harness) WriteConfig(ctx context.Context) error {
dir, err := BaseDir()
if err != nil {
return microerror.Mask(err)
}
content, err := yaml.Marshal(&h.cfg)
if err != nil {
return microerror.Mask(err)
}
err = ioutil.WriteFile(filepath.Join(dir, defaultConfigFile), []byte(content), 0644)
return microerror.Mask(err)
}
// ReadConfig populates a Config struct data read
// from a default file location.
func (h *Harness) ReadConfig() (Config, error) {
dir, err := BaseDir()
if err != nil {
return Config{}, microerror.Mask(err)
}
afs := &afero.Afero{Fs: h.fs}
content, err := afs.ReadFile(filepath.Join(dir, defaultConfigFile))
if err != nil {
return Config{}, microerror.Mask(err)
}
c := &Config{}
if err := yaml.Unmarshal(content, c); err != nil {
return Config{}, microerror.Mask(err)
}
return *c, nil
}
func BaseDir() (string, error) {
dir, err := os.Getwd()
if err != nil {
return "", microerror.Mask(err)
}
return filepath.Join(dir, ".e2e-harness"), nil
}
func GetProjectName() string {
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
return os.Getenv("CIRCLE_PROJECT_REPONAME")
}
dir, err := os.Getwd()
if err != nil {
return "e2e-harness"
}
return filepath.Base(dir)
}
func GetProjectTag() string {
if os.Getenv("CIRCLE_SHA1") != "" {
return os.Getenv("CIRCLE_SHA1")
}
return "latest"
}
| [
"\"CIRCLE_PROJECT_REPONAME\"",
"\"CIRCLE_PROJECT_REPONAME\"",
"\"CIRCLE_SHA1\"",
"\"CIRCLE_SHA1\""
]
| []
| [
"CIRCLE_PROJECT_REPONAME",
"CIRCLE_SHA1"
]
| [] | ["CIRCLE_PROJECT_REPONAME", "CIRCLE_SHA1"] | go | 2 | 0 | |
toby.py | # The Core of Toby
from flask import Flask, request, jsonify, g
import os
import logging
from ax.log import trace_error
from ax.connection import DatabaseConnection
from ax.datetime import now
from ax.tools import load_function, get_uuid, decrypt
from ax.exception import InvalidToken
logger = logging.getLogger('werkzeug')
debug_flg = True if os.getenv('TOBY_DEBUG', 'True') == 'True' else False
token = os.environ['TOBY_TOKEN']
app = Flask('Toby')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.logger.setLevel(logging.DEBUG if debug_flg else logging.INFO)
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'db'):
g.db = DatabaseConnection(os.getenv('TOBY_DB_USER', 'toby'), os.environ['TOBY_DB_PASSWORD'])
return g.db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.disconnect()
if error:
logger.error('Database connection closed because of :' + str(error))
@app.route("/")
def ping():
return "<h1 style='color:blue'>Hello There! This is Toby</h1>"
@app.route("/process")
def process():
request_id = None
try:
in_param = request.get_json(force=True, silent=False, cache=False)
if decrypt(in_param['request_token']) != token:
# verify token
raise InvalidToken(in_param)
if 'request_id' not in in_param:
request_id = get_uuid()
in_param['request_id'] = request_id
else:
request_id = in_param['request_id']
if 'request_timestamp' not in in_param:
in_param['request_timestamp'] = now()
in_param['logger'] = logger
in_param['get_db_connection'] = get_db
func = load_function(in_param)
resp = func()
except:
e = trace_error(logger)
resp = {'request_id': request_id, 'request_status': 'error', 'request_error': str(e[-1])}
return jsonify(resp)
if __name__ == "__main__":
app.run()
| []
| []
| [
"TOBY_DB_USER",
"TOBY_DB_PASSWORD",
"TOBY_DEBUG",
"TOBY_TOKEN"
]
| [] | ["TOBY_DB_USER", "TOBY_DB_PASSWORD", "TOBY_DEBUG", "TOBY_TOKEN"] | python | 4 | 0 | |
zmirror/zmirror.py | #!/usr/bin/env python3
# coding=utf-8
import os
import sys
import re
import copy
import zlib
import sched
import queue
import base64
import random
import traceback
import ipaddress
import threading
from fnmatch import fnmatch
from time import time, sleep, process_time
from html import escape as html_escape
from datetime import datetime, timedelta
from urllib.parse import urljoin, urlsplit, urlunsplit, quote_plus
import urllib.parse
import requests
from flask import Flask, request, make_response, Response, redirect
from . import CONSTS
try:
# for python 3.5+ Type Hint
from typing import Union, List, Any, Tuple
except:
pass
try: # 用于检测html的文本编码, cchardet是chardet的c语言实现, 非常快
from cchardet import detect as c_chardet
except:
cchardet_available = False
else:
cchardet_available = True
if os.path.abspath(os.getcwd()) != CONSTS.ZMIRROR_ROOT:
os.chdir(CONSTS.ZMIRROR_ROOT)
from .external_pkgs.ColorfulPyPrint import * # TODO: Migrate logging tools to the stdlib logging
if "ZMIRROR_UNITTEST" in os.environ:
# 这边根据环境变量得到的unittest_mode信息会被config中的覆盖掉
# 只是因为此时还没有加载 config, 所以先根据env里的临时定一下
unittest_mode = True
else:
unittest_mode = False
try: # lru_cache的c语言实现, 比Python内置lru_cache更快
from fastcache import lru_cache # lru_cache用于缓存函数的执行结果
except:
from functools import lru_cache
warnprint('package fastcache not found, '
'fallback to stdlib lru_cache, '
'no FUNCTION is effected, only maybe a bit slower. '
'Considering install it using "pip3 install fastcache"'
)
else:
if not unittest_mode:
infoprint('lru_cache loaded successfully from fastcache')
from .threadlocal import ZmirrorThreadLocal
if not unittest_mode: # 在unittest时不输出这几行
infoprint('zmirror version: {version} author: {author}'.format(version=CONSTS.__VERSION__, author=CONSTS.__AUTHOR__))
infoprint('Github: {site_url}'.format(site_url=CONSTS.__GITHUB_URL__))
try: # 加载默认设置
from config_default import *
except: # coverage: exclude
errprint('the config_default.py is missing, this program may not works normally\n'
'config_default.py 文件丢失, 这会导致配置文件不向后兼容, 请重新下载一份 config_default.py')
raise # v0.23.1+ 当 config_default.py 不存在时, 程序会终止运行
try: # 加载用户自定义配置文件, 覆盖掉默认配置的同名项
from config import *
except: # coverage: exclude
errprint(
'the config_default.py is missing, fallback to default configs(if we can), '
'please COPY the config_default.py to config.py, and change it\'s content, '
'or use the configs in the more_configs folder\n'
'自定义配置文件 config.py 丢失或存在错误, 将使用默认设置, 请将 config_default.py 复制一份为 config.py, '
'并根据自己的需求修改里面的设置'
'(或者使用 more_configs 中的配置文件)'
)
raise # v0.23.1+ 当config文件存在错误或不存在时, 程序会终止运行
else:
target_domain = target_domain.strip("./ \t").replace("https://", "").replace("http://", "")
infoprint('config file found, mirroring: ', target_domain)
if unittest_mode:
import importlib
importlib.reload(importlib.import_module("zmirror.utils"))
importlib.reload(importlib.import_module("zmirror.connection_pool"))
from .utils import *
from .lru_dict import LRUDict
from . import connection_pool
if local_cache_enable:
try:
from .cache_system import FileCache, get_expire_from_mime
cache = FileCache()
except: # coverage: exclude
traceback.print_exc()
errprint('Can Not Create Local File Cache, local file cache is disabled automatically.')
local_cache_enable = False
else:
if not unittest_mode:
infoprint('Local file cache enabled')
# ########## Basic Init #############
# 开始从配置文件加载配置, 在读代码时可以先跳过这部分, 从 main_function() 开始看
ColorfulPyPrint_set_verbose_level(verbose_level)
if developer_enable_experimental_feature: # coverage: exclude
# 先处理实验性功能开关
pass
my_host_name_no_port = my_host_name # 不带有端口号的本机域名
if my_host_port is not None:
my_host_name += ':' + str(my_host_port) # 带有端口号的本机域名, 如果为标准端口则不带显式端口号
my_host_name_urlencoded = quote_plus(my_host_name) # url编码后的
else:
my_host_name_urlencoded = my_host_name
if external_domains is None:
external_domains = []
external_domains = list([d.strip("./ \t").replace("https://", "").replace("http://", "") for d in external_domains])
external_domains_set = set(external_domains or [])
allowed_domains_set = external_domains_set.copy()
allowed_domains_set.add(target_domain)
for _domain in external_domains: # for support domain with port
allowed_domains_set.add(urlsplit('http://' + _domain).hostname)
domain_alias_to_target_set = set() # 那些被视为主域名的域名, 如 www.google.com和google.com可以都被视为主域名
domain_alias_to_target_set.add(target_domain)
domains_alias_to_target_domain = list(domains_alias_to_target_domain)
if domains_alias_to_target_domain:
for _domain in domains_alias_to_target_domain:
allowed_domains_set.add(_domain)
domain_alias_to_target_set.add(_domain)
domains_alias_to_target_domain.append(target_domain)
else:
domains_alias_to_target_domain = [target_domain]
my_host_scheme_escaped = my_host_scheme.replace('/', r'\/')
myurl_prefix = my_host_scheme + my_host_name # http(s)://www.my-mirror-site.com 末尾没有反斜线
myurl_prefix_escaped = myurl_prefix.replace('/', r'\/')
cdn_domains_number = len(CDN_domains)
allowed_remote_response_headers = {
'content-type', 'date', 'expires', 'cache-control', 'last-modified', 'server', 'location',
'accept-ranges',
'access-control-allow-origin', 'access-control-allow-headers', 'access-control-allow-methods',
'access-control-expose-headers', 'access-control-max-age', 'access-control-allow-credentials',
'timing-allow-origin',
}
allowed_remote_response_headers.update(custom_allowed_remote_headers)
# ## Get Target Domain and MyHostName's Root Domain ##
target_domain_root = extract_root_domain(target_domain)[0] # type: str
my_host_name_root = extract_root_domain(target_domain)[0] # type: str
# ########## Handle dependencies #############
if not enable_stream_content_transfer:
steamed_mime_keywords = ()
if not url_custom_redirect_enable:
url_custom_redirect_list = {}
url_custom_redirect_regex = ()
shadow_url_redirect_regex = ()
plain_replace_domain_alias = []
if isinstance(plain_replace_domain_alias, tuple):
plain_replace_domain_alias = list(plain_replace_domain_alias)
if not enable_stream_content_transfer:
enable_stream_transfer_async_preload = False
if not enable_automatic_domains_whitelist:
domains_whitelist_auto_add_glob_list = tuple()
if not enable_individual_sites_isolation:
isolated_domains = set()
else:
for isolated_domain in isolated_domains:
if isolated_domain not in external_domains_set:
warnprint('An isolated domain:', isolated_domain,
'would not have effect because it did not appears in the `external_domains` list')
if enable_custom_access_cookie_generate_and_verify:
human_ip_verification_whitelist_from_cookies = False
if not is_use_proxy:
requests_proxies = None
if human_ip_verification_enabled:
buff = []
for network in human_ip_verification_default_whitelist_networks:
buff.append(ipaddress.ip_network(network, strict=False))
human_ip_verification_default_whitelist_networks = tuple(buff)
for question in human_ip_verification_questions:
human_ip_verification_answers_hash_str += question[1]
else:
identity_verify_required = False
human_ip_verification_whitelist_from_cookies = False
must_verify_cookies = False
if not human_ip_verification_whitelist_from_cookies and not enable_custom_access_cookie_generate_and_verify:
must_verify_cookies = False
# ########### Global Variables ###############
# 与flask的request变量功能类似, 存储了一些解析后的请求信息, 在程序中会经常被调用
parse = ZmirrorThreadLocal()
# task_scheduler
task_scheduler = sched.scheduler(time, sleep)
# 记录一个URL的一些信息, 以及是否应该使用CDN
url_to_use_cdn = LRUDict(40960)
# 结构例子见下
url_to_use_cdn["www.fake-domain.com/folder/foo/bar.png"] = [
True, # Should this url use CDN
"image/png", # MIME
17031, # size, if size too small, will not redirect to cdn
]
# 记录最近请求的100个域名, 用于 domain_guess
# 虽然是个 dict, 但是只有key有用, value是无用的, 暂时全部赋值为 True
recent_domains = LRUDict(100)
recent_domains[target_domain] = True
# domain_guess 中已知的记录
# 对于已知的记录, 会使用307重定向
domain_guess_cache = LRUDict(1000)
# 格式如下:
domain_guess_cache[("example.com", "/path/no/query/string")] = "target.domain.com"
# ########### PreCompile Regex ###############
# 冒号(colon :)可能的值为:
# : %3A %253A 完整列表见 tests.TestRegex.REGEX_POSSIBLE_COLON
REGEX_COLON = r"""(?::|%(?:25)?3[Aa])"""
# 斜线(slash /)可能的值为(包括大小写):
# 完整列表见 tests.TestRegex.REGEX_POSSIBLE_COLON
# / \/ \\/ \\\(N个反斜线)/ %2F %5C%2F %5C%5C(N个5C)%2F %255C%252F %255C%255C%252F \x2F
REGEX_SLASH = r"""(?:\\*(?:/|x2[Ff])|%(?:(?:25)?5[Cc]%)*(?:25)?2[Ff])"""
# 引号 可能值的完整列表见 tests.TestRegex.REGEX_POSSIBLE_QUOTE
# " ' \\(可能有N个反斜线)' \\(可能有N个反斜线)"
# %22 %27 %5C(可能N个5C)%22 %5C(可能N个5C)%27
# %2522 %2527 %255C%2522 %255C%2527
# "
REGEX_QUOTE = r"""(?:\\*["']|%(?:(?:25)?5[Cc]%)*2(?:52)?[27]|")"""
# 代表本镜像域名的正则
if my_host_port is not None:
REGEX_MY_HOST_NAME = r'(?:' + re.escape(my_host_name_no_port) + REGEX_COLON + re.escape(str(my_host_port)) \
+ r'|' + re.escape(my_host_name_no_port) + r')'
else:
REGEX_MY_HOST_NAME = re.escape(my_host_name)
# Advanced url rewriter, see function response_text_rewrite()
# #### 这个正则表达式是整个程序的最核心的部分, 它的作用是从 html/css/js 中提取出长得类似于url的东西 ####
# 如果需要阅读这个表达式, 请一定要在IDE(如PyCharm)的正则高亮下阅读
# 这个正则并不保证匹配到的东西一定是url, 在 regex_url_reassemble() 中会进行进一步验证是否是url
regex_adv_url_rewriter = re.compile(
# 前缀, 必须有 'action='(表单) 'href='(链接) 'src=' 'url('(css) '@import'(css) '":'(js/json, "key":"value")
# \s 表示空白字符,如空格tab
r"""(?P<prefix>\b(?:(?:src|href|action)\s*=|url\s*\(|@import\s*|"\s*:)\s*)""" + # prefix, eg: src=
# 左边引号, 可选 (因为url()允许没有引号). 如果是url以外的, 必须有引号且左右相等(在重写函数中判断, 写在正则里可读性太差)
r"""(?P<quote_left>["'])?""" + # quote "'
# 域名和协议头, 可选. http:// https:// // http:\/\/ (json) https:\/\/ (json) \/\/ (json)
r"""(?P<domain_and_scheme>(?P<scheme>(?:https?:)?\\?/\\?/)(?P<domain>(?:[-a-z0-9]+\.)+[a-z]+(?P<port>:\d{1,5})?))?""" +
# url路径, 含参数 可选
r"""(?P<path>[^\s;+$?#'"\{}]*?""" + # full path(with query string) /foo/bar.js?love=luciaZ
# 查询字符串, 可选
r"""(?P<query_string>\?[^\s?#'"]*?)?)""" + # query string ?love=luciaZ
# 右引号(可以是右括弧), 必须
r"""(?P<quote_right>["')])(?P<right_suffix>\W)""", # right quote "'
flags=re.IGNORECASE
)
# Response Cookies Rewriter, see response_cookie_rewrite()
regex_cookie_rewriter = re.compile(r'\bdomain=(\.?([\w-]+\.)+\w+)\b', flags=re.IGNORECASE)
regex_cookie_path_rewriter = re.compile(r'(?P<prefix>[pP]ath)=(?P<path>[\w\._/-]+?;)')
# Request Domains Rewriter, see client_requests_text_rewrite()
# 该正则用于匹配类似于下面的东西
# [[[http(s):]//]www.mydomain.com/]extdomains/(https-)target.com
# 兼容各种urlencode/escape
#
# 注意, 若想阅读下面的正则表达式, 请一定要在 Pycharm 的正则高亮下进行
# 否则不对可能的头晕/恶心负责
# 下面那个正则, 在组装以后的样子大概是这样的(已大幅简化):
# 假设b.test.com是本机域名
# ((https?:/{2})?b\.test\.com/)?extdomains/(https-)?((?:[\w-]+\.)+\w+)\b
#
# 对应的 unittest 见 TestRegex.test__regex_request_rewriter_extdomains()
regex_request_rewriter_extdomains = re.compile(
r"""(?P<domain_prefix>""" +
( # [[[http(s):]//]www.mydomain.com/]
r"""(?P<scheme>""" +
( # [[http(s):]//]
( # [http(s):]
r"""(?:https?(?P<colon>{REGEX_COLON}))?""".format(REGEX_COLON=REGEX_COLON) # https?:
) +
r"""(?P<scheme_slash>%s)(?P=scheme_slash)""" % REGEX_SLASH # //
) +
r""")?""" +
REGEX_MY_HOST_NAME + # www.mydomain.com[:port] 本部分的正则在上面单独组装
r"""(?P<slash2>(?(scheme_slash)(?P=scheme_slash)|{REGEX_SLASH}))""".format(REGEX_SLASH=REGEX_SLASH) # # /
) +
r""")?""" +
r"""extdomains(?(slash2)(?P=slash2)|{REGEX_SLASH})(?P<is_https>https-)?""".format(
REGEX_SLASH=REGEX_SLASH) + # extdomains/(https-)
r"""(?P<real_domain>(?:[\w-]+\.)+\w+)\b""", # target.com
flags=re.IGNORECASE,
)
regex_request_rewriter_main_domain = re.compile(REGEX_MY_HOST_NAME)
# 以下正则为*实验性*的 response_text_basic_rewrite() 的替代品
# 用于函数 response_text_basic_mirrorlization()
# 理论上, 在大量域名的情况下, 会比现有的暴力字符串替换要快, 并且未来可以更强大的域名通配符
# v0.28.0加入, v0.28.3后默认启用
def _regex_generate__basic_mirrorlization():
"""产生 regex_basic_mirrorlization
用一个函数包裹起来是因为在 try_match_and_add_domain_to_rewrite_white_list()
中需要动态修改 external_domains, 修改以后可能需要随之生成新的正则, 包裹一下比较容易调用
"""
from collections import Counter
# 统计各个后缀出现的频率, 并且按照出现频率降序排列, 有助于提升正则效率
c = Counter(re.escape(x.split(".")[-1]) for x in allowed_domains_set)
regex_all_remote_tld = sorted(list(c.keys()), key=lambda x: c[x], reverse=True)
regex_all_remote_tld = "(?:" + "|".join(regex_all_remote_tld) + ")"
return re.compile(
r"""(?:""" +
( # [[http(s):]//] or [\?["']] or %27 %22 or "
r"""(?P<scheme>""" +
( # [[http(s):]//]
( # [http(s):]
r"""(?:https?(?P<colon>{REGEX_COLON}))?""".format(REGEX_COLON=REGEX_COLON) # https?:
) +
r"""(?P<scheme_slash>%s)(?P=scheme_slash)""" % REGEX_SLASH # //
) +
r""")""" +
r"""|""" +
# [\?["']] or %27 %22 or "
r"""(?P<quote>{REGEX_QUOTE})""".format(REGEX_QUOTE=REGEX_QUOTE)
) +
r""")""" +
# End prefix.
# Begin domain
r"""(?P<domain>([a-zA-Z0-9-]+\.){1,5}%s)\b""" % regex_all_remote_tld +
# Optional suffix slash
r"""(?P<suffix_slash>(?(scheme_slash)(?P=scheme_slash)|{SLASH}))?""".format(SLASH=REGEX_SLASH) +
# right quote (if we have left quote)
r"""(?(quote)(?P=quote))"""
)
regex_basic_mirrorlization = _regex_generate__basic_mirrorlization()
# 用于移除掉cookie中类似于 zmirror_verify=75bf23086a541e1f; 的部分
regex_remove__zmirror_verify__header = re.compile(
r"""zmirror_verify=[a-zA-Z0-9]+\b;? ?"""
)
# 遍历编译 custom_inject_content 中的regex
custom_inject_content = custom_inject_content or {}
for k, v in custom_inject_content.items():
if not v:
continue
for a in v:
if a.get("url_regex") is None:
continue
a["url_regex"] = re.compile(a["url_regex"], flags=re.I)
# ########## Flask app ###########
app = Flask( # type: Flask
__name__ if not unittest_mode
else 'unittest' + str(random.random()).replace('.', ''),
static_folder=None,
template_folder=None,
)
# ########## Begin Utils #############
def response_text_basic_mirrorlization(text):
"""
response_text_basic_rewrite() 的实验性升级版本, 默认启用
*v0.28.1.dev*
之前版本是在正则中匹配所有允许的域名, 现在改为匹配所有可能允许的TLD,
可以带来一些性能的提升, 并且容易进行动态域名添加和通配符支持
*v0.28.2*
进一步优化正则, 性能提升 47% 左右 (速度约为传统暴力替换的4.4倍)
*v0.28.3*
目前来看该功能工作得相当好, 由实验性特性改为正式使用
移除旧版 response_text_basic_rewrite(), 只保留一个为了向下兼容的 alias
:param text: 远程响应文本
:type text: str
:return: 重写后的响应文本
:rtype: str
"""
def regex_reassemble(m):
remote_domain = get_group("domain", m)
if remote_domain not in allowed_domains_set:
if not enable_automatic_domains_whitelist or \
not try_match_and_add_domain_to_rewrite_white_list(remote_domain):
return m.group()
suffix_slash = get_group("suffix_slash", m)
slash = get_group("scheme_slash", m) or suffix_slash or "/"
colon = get_group("colon", m) or guess_colon_from_slash(slash)
_my_host_name = my_host_name.replace(":", colon) if my_host_port else my_host_name
if remote_domain not in domain_alias_to_target_set:
# 外部域名
core = _my_host_name + slash + "extdomains" + slash + remote_domain + suffix_slash
else:
# 主域名
core = _my_host_name + suffix_slash
quote = get_group("quote", m)
if quote: # "target.domain"
return quote + core + quote
else: # http(s)://target.domain //target.domain
if get_group("colon", m): # http(s)://target.domain
return my_host_scheme.replace(":", colon).replace("/", slash) + core
else: # //target.domain
return slash * 2 + core
return regex_basic_mirrorlization.sub(regex_reassemble, text)
def encoding_detect(byte_content):
"""
试图解析并返回二进制串的编码, 如果失败, 则返回 None
:param byte_content: 待解码的二进制串
:type byte_content: bytes
:return: 编码类型或None
:rtype: Union[str, None]
"""
if force_decode_remote_using_encode is not None:
return force_decode_remote_using_encode
if possible_charsets:
for charset in possible_charsets:
try:
byte_content.decode(encoding=charset)
except:
pass
else:
return charset
if cchardet_available: # detect the encoding using cchardet (if we have)
return c_chardet(byte_content)['encoding']
return None
def cache_clean(is_force_flush=False):
"""
清理程序运行中产生的垃圾, 在程序运行期间会被自动定期调用
包括各种重写缓存, 文件缓存等
默认仅清理过期的
:param is_force_flush: 是否无视有效期, 清理所有缓存
:type is_force_flush: bool
"""
if enable_connection_keep_alive:
connection_pool.clear(force_flush=is_force_flush)
if local_cache_enable:
cache.check_all_expire(force_flush_all=is_force_flush)
if is_force_flush:
try:
url_to_use_cdn.clear()
is_domain_match_glob_whitelist.cache_clear()
is_mime_streamed.cache_clear()
extract_real_url_from_embedded_url.cache_clear()
embed_real_url_to_embedded_url.cache_clear()
check_global_ua_pass.cache_clear()
is_mime_represents_text.cache_clear()
extract_mime_from_content_type.cache_clear()
is_content_type_using_cdn.cache_clear()
is_ua_in_whitelist.cache_clear()
verify_ip_hash_cookie.cache_clear()
is_denied_because_of_spider.cache_clear()
is_ip_not_in_allow_range.cache_clear()
# client_requests_text_rewrite.cache_clear()
# extract_url_path_and_query.cache_clear()
except: # coverage: exclude
errprint('ErrorWhenCleaningFunctionLruCache')
traceback.print_exc()
def cron_task_container(task_dict, add_task_only=False):
"""
定时任务容器. 调用目标函数, 并在运行结束后创建下一次定时
:param task_dict: 定时任务的相关参数, dict
{ "target":目标函数(可调用的函数对象,不是函数名字符串) 必须,
"iterval":任务延时(秒) 可选,
"priority":优先级 可选,
"name":定时任务别名 可选
"args":位置型参数 (arg1,arg2) 可选,
"kwargs":键值型参数 {key:value,} 可选,
}
:param add_task_only: 是否只添加定时任务而不执行
"""
global task_scheduler
if not add_task_only:
# 执行任务
try:
infoprint('CronTask:', task_dict.get('name', str(task_dict['target'])), 'Target:', str(task_dict['target']))
target_func = task_dict.get('target')
if target_func is None:
raise ValueError("target is not given in " + str(task_dict))
target_func(
*(task_dict.get('args', ())), # 解开参数以后传递
**(task_dict.get('kwargs', {}))
)
except: # coverage: exclude
errprint('ErrorWhenProcessingCronTasks', task_dict)
traceback.print_exc()
# 当全局开关关闭时, 自动退出线程
if not enable_cron_tasks:
if threading.current_thread() != threading.main_thread():
exit()
else:
return
# 添加下一次定时任务
task_scheduler.enter(
task_dict.get('interval', 300),
task_dict.get('priority', 999),
cron_task_container,
(task_dict,)
)
def cron_task_host():
"""定时任务宿主, 每分钟检查一次列表, 运行时间到了的定时任务"""
while True:
# 当全局开关关闭时, 自动退出线程
if not enable_cron_tasks:
if threading.current_thread() != threading.main_thread():
exit()
else:
return
sleep(60)
try:
task_scheduler.run()
except: # coverage: exclude
errprint('ErrorDuringExecutingCronTasks')
traceback.print_exc()
def add_temporary_domain_alias(source_domain, replaced_to_domain):
"""
添加临时域名替换列表
用于纯文本域名替换, 见 `plain_replace_domain_alias` 选项
:param source_domain: 被替换的域名
:param replaced_to_domain: 替换成这个域名
:type source_domain: str
:type replaced_to_domain: str
"""
if parse.temporary_domain_alias is None:
parse.temporary_domain_alias = []
else:
parse.temporary_domain_alias = list(parse.temporary_domain_alias)
parse.temporary_domain_alias.append((source_domain, replaced_to_domain))
dbgprint('A domain', source_domain, 'to', replaced_to_domain, 'added to temporary_domain_alias',
parse.temporary_domain_alias)
def is_external_domain(domain):
"""是否是外部域名"""
return domain not in domains_alias_to_target_domain
# noinspection PyGlobalUndefined
def try_match_and_add_domain_to_rewrite_white_list(domain, force_add=False):
"""
若域名与`domains_whitelist_auto_add_glob_list`中的通配符匹配, 则加入 external_domains 列表
被加入 external_domains 列表的域名, 会被应用重写机制
用于在程序运行过程中动态添加域名到external_domains中
也可在外部函数(custom_func.py)中使用
关于 external_domains 更详细的说明, 请看 default_config.py 中对应的文档
:type domain: str
:type force_add: bool
:rtype: bool
"""
global external_domains, external_domains_set, allowed_domains_set, prefix_buff
global regex_basic_mirrorlization
if domain is None or not domain:
return False
if domain in allowed_domains_set:
return True
if not force_add and not is_domain_match_glob_whitelist(domain):
return False
infoprint('A domain:', domain, 'was added to external_domains list')
_buff = list(external_domains) # external_domains是tuple类型, 添加前需要先转换
_buff.append(domain)
external_domains = tuple(_buff) # 转换回tuple, tuple有一些性能优势
external_domains_set.add(domain)
allowed_domains_set.add(domain)
prefix_buff[domain] = calc_domain_replace_prefix(domain)
# 重新生成匹配正则
regex_basic_mirrorlization = _regex_generate__basic_mirrorlization()
# write log
try:
with open(zmirror_root('automatic_domains_whitelist.log'), 'a', encoding='utf-8') as fp:
fp.write(domain + '\n')
except: # coverage: exclude
traceback.print_exc()
return True
def decode_mirror_url(mirror_url=None):
"""
解析镜像url(可能含有extdomains), 并提取出原始url信息
可以不是完整的url, 只需要有 path 部分即可(query_string也可以有)
若参数留空, 则使用当前用户正在请求的url
支持json (处理 \/ 和 \. 的转义)
:rtype: dict[str, Union[str, bool]]
:return: {'domain':str, 'is_https':bool, 'path':str, 'path_query':str}
"""
_is_escaped_dot = False
_is_escaped_slash = False
result = {}
if mirror_url is None:
input_path_query = extract_url_path_and_query() # type: str
else:
if r'\/' in mirror_url: # 如果 \/ 在url中, 先反转义, 处理完后再转义回来
_is_escaped_slash = True
mirror_url = mirror_url.replace(r'\/', '/')
if r'\.' in mirror_url: # 如果 \. 在url中, 先反转义, 处理完后再转义回来
_is_escaped_dot = True
mirror_url = mirror_url.replace(r'\.', '.')
input_path_query = extract_url_path_and_query(mirror_url) # type: str
if input_path_query[:12] == '/extdomains/':
# 12 == len('/extdomains/')
split = urlsplit("//" + input_path_query[12:].lstrip("/")) # type: urllib.parse.SplitResult
real_domain = split.netloc
real_path_query = (split.path or "/") + (("?" + split.query) if split.query else "")
if real_domain[:6] == 'https-':
# 如果显式指定了 /extdomains/https-域名 形式(为了兼容老版本)的, 那么使用https
real_domain = real_domain[6:]
_is_https = True
else:
# 如果是 /extdomains/域名 形式, 没有 "https-" 那么根据域名判断是否使用HTTPS
_is_https = is_target_domain_use_https(real_domain)
real_path_query = client_requests_text_rewrite(real_path_query)
if _is_escaped_dot: real_path_query = real_path_query.replace('.', r'\.')
if _is_escaped_slash: real_path_query = s_esc(real_path_query)
result['domain'] = real_domain
result['is_https'] = _is_https
result['path_query'] = real_path_query
result['path'] = urlsplit(result['path_query']).path
return result
input_path_query = client_requests_text_rewrite(input_path_query)
if _is_escaped_dot: input_path_query = input_path_query.replace('.', r'\.')
if _is_escaped_slash: input_path_query = s_esc(input_path_query)
result['domain'] = target_domain
result['is_https'] = (target_scheme == 'https://')
result['path_query'] = input_path_query
result['path'] = urlsplit(result['path_query']).path
return result
# 函数别名, 为了兼容早期版本的配置文件
extract_from_url_may_have_extdomains = decode_mirror_url
# noinspection PyShadowingNames
def encode_mirror_url(raw_url_or_path, remote_domain=None, is_scheme=None, is_escape=False):
"""convert url from remote to mirror url
:type raw_url_or_path: str
:type remote_domain: str
:type is_scheme: bool
:type is_escape: bool
:rtype: str
"""
if is_escape:
_raw_url_or_path = raw_url_or_path.replace('r\/', r'/')
else:
_raw_url_or_path = raw_url_or_path
sp = urlsplit(_raw_url_or_path)
if '/extdomains/' == sp.path[:12]:
return raw_url_or_path
domain = remote_domain or sp.netloc or parse.remote_domain or target_domain
if domain not in allowed_domains_set:
return raw_url_or_path
if is_scheme is not False:
if _raw_url_or_path[:2] == '//':
our_prefix = '//' + my_host_name
elif is_scheme or sp.scheme:
our_prefix = myurl_prefix
else:
our_prefix = ''
else:
our_prefix = ''
if is_external_domain(domain):
middle_part = '/extdomains/' + domain
else:
middle_part = ''
result = urljoin(our_prefix + middle_part + '/',
extract_url_path_and_query(_raw_url_or_path).lstrip('/'))
if is_escape:
result = s_esc(result)
return result
# 函数别名, 为了兼容早期版本的配置文件
convert_to_mirror_url = encode_mirror_url
def is_target_domain_use_https(domain):
"""请求目标域名时是否使用https"""
if force_https_domains == 'NONE':
return False
if force_https_domains == 'ALL':
return True
if domain in force_https_domains:
return True
else:
return False
def add_ssrf_allowed_domain(domain):
"""添加域名到ssrf白名单, 不支持通配符
:type domain: str
"""
global allowed_domains_set
allowed_domains_set.add(domain)
def dump_zmirror_snapshot(folder="error_dump", msg=None, our_response=None):
"""
dump当前状态到文件
:param folder: 文件夹名
:type folder: str
:param our_response: Flask返回对象, 可选
:type our_response: Response
:param msg: 额外的信息
:type msg: str
:return: dump下来的文件绝对路径
:rtype: Union[str, None]
"""
import pickle
try:
if not os.path.exists(zmirror_root(folder)):
os.mkdir(zmirror_root(folder))
_time_str = datetime.now().strftime('snapshot_%Y-%m-%d_%H-%M-%S')
import config
snapshot = {
"time": datetime.now(),
"parse": parse.dump(),
"msg": msg,
"traceback": traceback.format_exc(),
"config": attributes(config, to_dict=True),
"FlaskRequest": attributes(request, to_dict=True),
}
if our_response is not None:
our_response.freeze()
snapshot["OurResponse"] = our_response
dump_file_path = os.path.abspath(os.path.join(zmirror_root(folder), _time_str + '.dump'))
with open(dump_file_path, 'wb') as fp:
pickle.dump(snapshot, fp, pickle.HIGHEST_PROTOCOL)
return dump_file_path
except:
return None
def generate_error_page(errormsg='Unknown Error', error_code=500, is_traceback=False, content_only=False):
"""
:type content_only: bool
:type errormsg: Union(str, bytes)
:type error_code: int
:type is_traceback: bool
:rtype: Union[Response, str]
"""
if is_traceback:
traceback.print_exc()
errprint(errormsg)
if isinstance(errormsg, bytes):
errormsg = errormsg.decode()
dump_file_path = dump_zmirror_snapshot(msg=errormsg)
request_detail = ""
for attrib in filter(lambda x: x[0] != '_' and x[-2:] != '__', dir(parse)):
request_detail += "<tr><td>{attrib}</td><td>{value}</td></tr>" \
.format(attrib=attrib, value=html_escape(str(parse.__getattribute__(attrib))))
error_page = """<!doctype html><html lang="zh-CN"><head><meta charset="UTF-8">
<title>zmirror internal error</title>
<style>code{{background-color: #cccaca;}}</style>
</head>
<body>
<h1>zmirror internal error</h1>
An fatal error occurs. 服务器中运行的zmirror出现一个内部错误.<br>
<hr>
<h2>If you are visitor 如果你是访客</h2>
This site is temporary unavailable because some internal error<br>
Please contact your site admin. <br>
该镜像站暂时出现了临时的内部故障, 请联系网站管理员<br>
<hr>
<h2>If you are admin</h2>
You can find full detail log in your server's log.<br>
For apache, typically at <code>/var/log/apache2/YOUR_SITE_NAME_error.log</code><br>
tips: you can use <code>tail -n 100 -f YOUR_SITE_NAME_error.log</code> to view real-time log<br>
<br>
If you can't solve it by your self, here are some ways may help:<br>
<ul>
<li>contact the developer by email: <a href="mailto:[email protected]" target="_blank">aploium <[email protected]></a></li>
<li>seeking for help in zmirror's <a href="https://gitter.im/zmirror/zmirror" target="_blank">online chat room</a></li>
<li>open an <a href="https://github.com/aploium/zmirror/issues" target="_blank">issue</a> (as an bug report) in github</li>
</ul>
<h3>Snapshot Dump</h3>
An snapshot has been dumped to <code>{dump_file_path}</code> <br>
You can load it using (Python3 code) <code>pickle.load(open(r"{dump_file_path}","rb"))</code><br>
The snapshot contains information which may be helpful for debug
<h3>Detail</h3>
<table border="1"><tr><th>Attrib</th><th>Value</th></tr>
{request_detail}
</table>
<h3>Additional Information</h3>
<pre>{errormsg}</pre>
<h3>Traceback</h3>
<pre>{traceback_str}</pre>
<hr>
<div style="font-size: smaller">Powered by <em>zmirror {version}</em><br>
<a href="{official_site}" target="_blank">{official_site}</a></div>
</body></html>""".format(
errormsg=errormsg, request_detail=request_detail,
traceback_str=html_escape(traceback.format_exc()) if is_traceback else 'None or not displayed',
dump_file_path=dump_file_path,
version=CONSTS.__VERSION__, official_site=CONSTS.__GITHUB_URL__
)
if not content_only:
return make_response(error_page.encode(), error_code)
else:
return error_page
def generate_304_response(_content_type=None):
""":rtype Response"""
r = Response(content_type=_content_type, status=304)
r.headers.add('X-Cache', 'FileHit-304')
return r
def generate_ip_verify_hash(input_dict):
"""
生成一个标示用户身份的hash
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
:rtype str
"""
strbuff = human_ip_verification_answers_hash_str
for key in input_dict:
strbuff += key + input_dict[key] + str(random.randint(0, 9000000))
input_key_hash = hex(zlib.adler32(strbuff.encode(encoding='utf-8')))[2:]
while len(input_key_hash) < 7:
input_key_hash += '0'
output_hash = hex(zlib.adler32((input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')))[2:]
while len(output_hash) < 7:
output_hash += '0'
return input_key_hash + output_hash
@lru_cache(maxsize=1024)
def verify_ip_hash_cookie(hash_cookie_value):
"""
根据cookie中的hash判断是否允许用户访问
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
:type hash_cookie_value: str
:rtype: bool
"""
try:
input_key_hash = hash_cookie_value[:8]
output_hash = hash_cookie_value[8:]
calculated_hash = hex(zlib.adler32(
(input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')
))[2:]
if output_hash == calculated_hash:
return True
else:
return False
except:
return False
def update_content_in_local_cache(url, content, method='GET'):
"""更新 local_cache 中缓存的资源, 追加content
在stream模式中使用"""
if local_cache_enable and method == 'GET' and cache.is_cached(url):
info_dict = cache.get_info(url)
resp = cache.get_obj(url)
resp.set_data(content)
# 当存储的资源没有完整的content时, without_content 被设置为true
# 此时该缓存不会生效, 只有当content被添加后, 缓存才会实际生效
# 在stream模式中, 因为是先接收http头, 然后再接收内容, 所以会出现只有头而没有内容的情况
# 此时程序会先将只有头部的响应添加到本地缓存, 在内容实际接收完成后再追加内容
info_dict['without_content'] = False
if verbose_level >= 4: dbgprint('LocalCache_UpdateCache', url, content[:30], len(content))
cache.put_obj(
url,
resp,
obj_size=len(content),
expires=get_expire_from_mime(parse.mime),
last_modified=info_dict.get('last_modified'),
info_dict=info_dict,
)
def put_response_to_local_cache(url, _our_resp, without_content=False):
"""
put our response object(headers included) to local cache
:param without_content: for stream mode use
:param url: client request url
:param _our_resp: our response(flask response object) to client, would be storge
:type url: str
:type _our_resp: Response
:type without_content: bool
"""
# Only cache GET method, and only when remote returns 200(OK) status
if parse.method != 'GET' or _our_resp.status_code != 200:
return
dbgprint('PuttingCache:', url, "without_content:", without_content)
if without_content:
our_resp = copy.copy(_our_resp)
our_resp.response = None # delete iterator
obj_size = 0
else:
our_resp = _our_resp
obj_size = len(parse.remote_response.content)
# requests' header are CaseInsensitive
last_modified = parse.remote_response.headers.get('Last-Modified', None)
cache.put_obj(
url,
our_resp,
expires=get_expire_from_mime(parse.mime),
obj_size=obj_size,
last_modified=last_modified,
info_dict={'without_content': without_content,
'last_modified': last_modified,
},
)
def try_get_cached_response(url, client_header=None):
"""
尝试从本地缓存中取出响应
:param url: real url with query string
:type client_header: dict
:rtype: Union[Response, None]
"""
# Only use cache when client use GET
if local_cache_enable and parse.method == 'GET' and cache.is_cached(url):
if client_header is not None and 'if-modified-since' in client_header and \
cache.is_unchanged(url, client_header.get('if-modified-since', None)):
dbgprint('FileCacheHit-304', url)
return generate_304_response()
else:
cached_info = cache.get_info(url)
if cached_info.get('without_content', True):
# 关于 without_content 的解释, 请看update_content_in_local_cache()函数
return None
# dbgprint('FileCacheHit-200')
resp = cache.get_obj(url)
assert isinstance(resp, Response)
parse.set_extra_resp_header('x-zmirror-cache', 'FileHit')
return resp
else:
return None
def regex_url_reassemble(match_obj):
"""
Reassemble url parts split by the regex.
:param match_obj: match object of stdlib re
:return: re assembled url string (included prefix(url= etc..) and suffix.)
:rtype: str
"""
prefix = get_group('prefix', match_obj)
quote_left = get_group('quote_left', match_obj)
quote_right = get_group('quote_right', match_obj)
path = get_group('path', match_obj)
match_domain = get_group('domain', match_obj)
scheme = get_group('scheme', match_obj)
whole_match_string = match_obj.group()
# dbgprint('prefix', prefix, 'quote_left', quote_left, 'quote_right', quote_right,
# 'path', path, 'match_domain', match_domain, 'scheme', scheme, 'whole', whole_match_string, v=5)
if r"\/" in path or r"\/" in scheme:
require_slash_escape = True
path = path.replace(r"\/", "/")
# domain_and_scheme = domain_and_scheme.replace(r"\/", "/")
else:
require_slash_escape = False
# path must be not blank
if (not path # path is blank
# only url(something) and @import are allowed to be unquoted
or ('url' not in prefix and 'import' not in prefix) and (not quote_left or quote_right == ')')
# for "key":"value" type replace, we must have at least one '/' in url path (for the value to be regard as url)
or (':' in prefix and '/' not in path)
# if we have quote_left, it must equals to the right
or (quote_left and quote_left != quote_right)
# in javascript, those 'path' contains one or only two slash, should not be rewrited (for potential error)
# or (parse.mime == 'application/javascript' and path.count('/') < 2)
# in javascript, we only rewrite those with explicit scheme ones.
# v0.21.10+ in "key":"value" format, we should ignore those path without scheme
or (not scheme and ('javascript' in parse.mime or '"' in prefix))
):
dbgprint('returned_un_touch', whole_match_string, v=5)
return whole_match_string
# v0.19.0+ Automatic Domains Whitelist (Experimental)
if enable_automatic_domains_whitelist:
try_match_and_add_domain_to_rewrite_white_list(match_domain)
# dbgprint(match_obj.groups(), v=5)
domain = match_domain or parse.remote_domain
# dbgprint('rewrite match_obj:', match_obj, 'domain:', domain, v=5)
# skip if the domain are not in our proxy list
if domain not in allowed_domains_set:
# dbgprint('return untouched because domain not match', domain, whole_match_string, v=5)
return match_obj.group() # return raw, do not change
# this resource's absolute url path to the domain root.
# dbgprint('match path', path, "remote path", parse.remote_path, v=5)
path = urljoin(parse.remote_path, path) # type: str
# 在 Python3.5 以前, urljoin无法正确处理如 urljoin("/","../233") 的情况, 需要手动处理一下
if sys.version_info < (3, 5) and "/../" in path:
path = path.replace("/../", "/")
if not path.startswith("/"):
# 当整合后的path不以 / 开头时, 如果当前是主域名, 则不处理, 如果是外部域名则加上 / 前缀
path = "/" + path
# dbgprint('middle path', path, v=5)
if ':' not in parse.remote_domain: # the python's builtin urljoin has a bug, cannot join domain with port correctly
url_no_scheme = urljoin(domain + '/', path.lstrip('/'))
else:
url_no_scheme = domain + '/' + path.lstrip('/')
# dbgprint('url_no_scheme', url_no_scheme, v=5)
# add extdomains prefix in path if need
if domain in external_domains_set:
path = '/extdomains/' + url_no_scheme
# dbgprint('final_path', path, v=5)
if enable_static_resource_CDN and url_no_scheme in url_to_use_cdn:
# dbgprint('We Know:', url_no_scheme, v=5)
_this_url_mime_cdn = url_to_use_cdn[url_no_scheme][0]
else:
# dbgprint('We Don\'t know:', url_no_scheme,v=5)
_this_url_mime_cdn = False
# Apply CDN domain
if _this_url_mime_cdn:
# pick an cdn domain due to the length of url path
# an advantage of choose like this (not randomly), is this can make higher CDN cache hit rate.
# CDN rewrite, rewrite static resources to cdn domains.
# A lot of cases included, the followings are just the most typical examples.
# http(s)://target.com/img/love_lucia.jpg --> http(s)://your.cdn.domains.com/img/love_lucia.jpg
# http://external.com/css/main.css --> http(s)://your.cdn.domains.com/extdomains/external.com/css/main.css
# http://external.pw/css/main.css --> http(s)://your.cdn.domains.com/extdomains/external.pw/css/main.css
replace_to_scheme_domain = my_host_scheme + CDN_domains[zlib.adler32(path.encode()) % cdn_domains_number]
# else: # parse.mime == 'application/javascript':
# replace_to_scheme_domain = '' # Do not use explicit url prefix in js, to prevent potential error
elif not scheme:
replace_to_scheme_domain = ''
elif 'http' not in scheme:
replace_to_scheme_domain = '//' + my_host_name
else:
replace_to_scheme_domain = myurl_prefix
reassembled_url = urljoin(replace_to_scheme_domain, path)
if _this_url_mime_cdn and cdn_redirect_encode_query_str_into_url:
reassembled_url = embed_real_url_to_embedded_url(
reassembled_url,
url_mime=url_to_use_cdn[url_no_scheme][1],
escape_slash=require_slash_escape
)
if require_slash_escape:
reassembled_url = s_esc(reassembled_url)
# reassemble!
# prefix: src= quote_left: "
# path: /extdomains/target.com/foo/bar.js?love=luciaZ
reassembled = prefix + quote_left + reassembled_url + quote_right + get_group('right_suffix', match_obj)
# dbgprint('---------------------', v=5)
return reassembled
@lru_cache(maxsize=256)
def is_ua_in_whitelist(ua_str):
"""
当机器人或蜘蛛的请求被ban时, 检查它是否处在允许的白名单内
被 is_denied_because_of_spider() 调用
:type ua_str: str
"""
ua_str = ua_str.lower()
if global_ua_white_name in ua_str:
return True
for allowed_ua in spider_ua_white_list:
if allowed_ua in ua_str:
return True
return False
@lru_cache(maxsize=256)
def is_denied_because_of_spider(ua_str):
"""检查user-agent是否因为是蜘蛛或机器人而需要ban掉"""
ua_str = ua_str.lower()
if 'spider' in ua_str or 'bot' in ua_str:
if is_ua_in_whitelist(ua_str):
infoprint("A Spider/Bot's access was granted", ua_str)
return False
infoprint('A Spider/Bot was denied, UA is:', ua_str)
return True
else:
return False
def load_ip_whitelist_file():
"""从文件加载ip白名单"""
set_buff = set()
if os.path.exists(zmirror_root(human_ip_verification_whitelist_file_path)):
with open(zmirror_root(human_ip_verification_whitelist_file_path), 'r', encoding='utf-8') as fp:
set_buff.add(fp.readline().strip())
return set_buff
def append_ip_whitelist_file(ip_to_allow):
"""写入ip白名单到文件"""
try:
with open(zmirror_root(human_ip_verification_whitelist_file_path), 'a', encoding='utf-8') as fp:
fp.write(ip_to_allow + '\n')
except: # coverage: exclude
errprint('Unable to write whitelist file')
traceback.print_exc()
def ip_whitelist_add(ip_to_allow, info_record_dict=None):
"""添加ip到白名单, 并写入文件"""
if ip_to_allow in single_ip_allowed_set:
return
dbgprint('ip white added', ip_to_allow, 'info:', info_record_dict)
single_ip_allowed_set.add(ip_to_allow)
is_ip_not_in_allow_range.cache_clear()
append_ip_whitelist_file(ip_to_allow)
# dbgprint(single_ip_allowed_set)
try:
with open(zmirror_root(human_ip_verification_whitelist_log), 'a', encoding='utf-8') as fp:
fp.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " " + ip_to_allow
+ " " + str(request.user_agent)
+ " " + repr(info_record_dict) + "\n")
except: # coverage: exclude
errprint('Unable to write log file', os.path.abspath(human_ip_verification_whitelist_log))
traceback.print_exc()
@lru_cache(maxsize=256)
def is_ip_not_in_allow_range(ip_address):
"""判断ip是否在白名单中"""
if ip_address in single_ip_allowed_set:
return False
ip_address_obj = ipaddress.ip_address(ip_address)
for allowed_network in human_ip_verification_default_whitelist_networks:
if ip_address_obj in allowed_network:
return False
return True
# ########## End utils ###############
# ################# Begin Server Response Handler #################
def preload_streamed_response_content_async(requests_response_obj, buffer_queue):
"""
stream模式下, 预读远程响应的content
:param requests_response_obj:
:type buffer_queue: queue.Queue
"""
for particle_content in requests_response_obj.iter_content(stream_transfer_buffer_size):
try:
buffer_queue.put(particle_content, timeout=10)
except queue.Full: # coverage: exclude
traceback.print_exc()
exit()
if verbose_level >= 3: dbgprint('BufferSize', buffer_queue.qsize())
buffer_queue.put(None, timeout=10)
exit()
def iter_streamed_response_async():
"""异步, 一边读取远程响应, 一边发送给用户"""
total_size = 0
_start_time = time()
_content_buffer = b''
_disable_cache_temporary = False
buffer_queue = queue.Queue(maxsize=stream_transfer_async_preload_max_packages_size)
t = threading.Thread(
target=preload_streamed_response_content_async,
args=(parse.remote_response, buffer_queue),
daemon=True,
)
t.start()
while True:
try:
particle_content = buffer_queue.get(timeout=15)
except queue.Empty: # coverage: exclude
warnprint('WeGotAnSteamTimeout')
traceback.print_exc()
return
buffer_queue.task_done()
if particle_content is not None:
# 由于stream的特性, content会被消耗掉, 所以需要额外储存起来
if local_cache_enable and not _disable_cache_temporary:
if len(_content_buffer) > 8 * 1024 * 1024: # 8MB
_disable_cache_temporary = True
_content_buffer = None
else:
_content_buffer += particle_content
yield particle_content
else:
if parse.url_no_scheme in url_to_use_cdn:
# 更新记录中的响应的长度
url_to_use_cdn[parse.url_no_scheme][2] = len(_content_buffer)
if local_cache_enable and not _disable_cache_temporary:
update_content_in_local_cache(parse.remote_url, _content_buffer,
method=parse.remote_response.request.method)
return
if verbose_level >= 4:
total_size += len(particle_content)
dbgprint('total_size:', total_size, 'total_speed(KB/s):',
total_size / 1024 / (time() - _start_time + 0.000001))
def copy_response(is_streamed=False):
"""
Copy and parse remote server's response headers, generate our flask response object
:type is_streamed: bool
:return: flask response object
:rtype: Response
"""
if is_streamed:
parse.time["req_time_body"] = 0
# 异步传输内容, 不进行任何重写, 返回一个生成器
content = iter_streamed_response_async()
else:
# 如果不是异步传输, 则(可能)进行重写
content, parse.time["req_time_body"] = response_content_rewrite()
dbgprint('RemoteRespHeaders', parse.remote_response.headers)
# 创建基础的Response对象
resp = Response(content, status=parse.remote_response.status_code)
# --------------------- 将远程响应头筛选/重写并复制到我们都响应中 -----------------------
# 筛选远程响应头时采用白名单制, 只有在 `allowed_remote_response_headers` 中的远程响应头才会被发送回浏览器
for header_key in parse.remote_response.headers:
header_key_lower = header_key.lower()
# Add necessary response headers from the origin site, drop other headers
if header_key_lower in allowed_remote_response_headers:
if header_key_lower == 'location':
# 对于重定向的 location 的重写, 改写为zmirror的url
_location = parse.remote_response.headers[header_key]
if custom_text_rewriter_enable:
# location头也会调用自定义重写函数进行重写, 并且有一个特殊的MIME: mwm/headers-location
# 这部分以后可能会单独独立出一个自定义重写函数
_location = custom_response_text_rewriter(_location, 'mwm/headers-location', parse.remote_url)
resp.headers[header_key] = encode_mirror_url(_location)
elif header_key_lower == 'content-type':
# force add utf-8 to content-type if it is text
if is_mime_represents_text(parse.mime) and 'utf-8' not in parse.content_type:
resp.headers[header_key] = parse.mime + '; charset=utf-8'
else:
resp.headers[header_key] = parse.remote_response.headers[header_key]
elif header_key_lower in ('access-control-allow-origin', 'timing-allow-origin'):
if custom_allowed_origin is None:
resp.headers[header_key] = myurl_prefix
elif custom_allowed_origin == '_*_': # coverage: exclude
_origin = request.headers.get('origin') or request.headers.get('Origin') or myurl_prefix
resp.headers[header_key] = _origin
else:
resp.headers[header_key] = custom_allowed_origin
else:
resp.headers[header_key] = parse.remote_response.headers[header_key]
# If we have the Set-Cookie header, we should extract the raw ones
# and then change the cookie domain to our domain
if header_key_lower == 'set-cookie':
for cookie_string in response_cookies_deep_copy():
resp.headers.add('Set-Cookie', response_cookie_rewrite(cookie_string))
dbgprint('OurRespHeaders:\n', resp.headers)
return resp
# noinspection PyProtectedMember
def response_cookies_deep_copy():
"""
It's a BAD hack to get RAW cookies headers, but so far, we don't have better way.
We'd go DEEP inside the urllib's private method to get raw headers
raw_headers example:
[('Cache-Control', 'private'),
('Content-Length', '48234'),
('Content-Type', 'text/html; Charset=utf-8'),
('Server', 'Microsoft-IIS/8.5'),
('Set-Cookie','BoardList=BoardID=Show; expires=Mon, 02-May-2016 16:00:00 GMT; path=/'),
('Set-Cookie','aspsky=abcefgh; expires=Sun, 24-Apr-2016 16:00:00 GMT; path=/; HttpOnly'),
('Set-Cookie', 'ASPSESSIONIDSCSSDSSQ=OGKMLAHDHBFDJCDMGBOAGOMJ; path=/'),
('X-Powered-By', 'ASP.NET'),
('Date', 'Tue, 26 Apr 2016 12:32:40 GMT')]
"""
raw_headers = parse.remote_response.raw._original_response.headers._headers
header_cookies_string_list = []
for name, value in raw_headers:
if name.lower() == 'set-cookie':
if my_host_scheme == 'http://':
value = value.replace('Secure;', '')
value = value.replace(';Secure', ';')
value = value.replace('; Secure', ';')
if 'httponly' in value.lower():
if enable_aggressive_cookies_path_rewrite:
# 暴力cookie path重写, 把所有path都重写为 /
value = regex_cookie_path_rewriter.sub('path=/;', value)
elif enable_aggressive_cookies_path_rewrite is not None:
# 重写HttpOnly Cookies的path到当前url下
# eg(/extdomains/a.foobar.com): path=/verify; -> path=/extdomains/a.foobar.com/verify
if parse.remote_domain not in domain_alias_to_target_set: # do not rewrite main domains
value = regex_cookie_path_rewriter.sub(
'\g<prefix>=/extdomains/' + parse.remote_domain + '\g<path>', value)
header_cookies_string_list.append(value)
return header_cookies_string_list
def response_content_rewrite():
"""
Rewrite requests response's content's url. Auto skip binary (based on MIME).
:return: Tuple[bytes, float]
"""
_start_time = time()
_content = parse.remote_response.content
req_time_body = time() - _start_time
if not is_mime_represents_text(parse.mime):
# simply don't touch binary response content
dbgprint('Binary', parse.content_type)
return _content, req_time_body
# Do text rewrite if remote response is text-like (html, css, js, xml, etc..)
if verbose_level >= 3: dbgprint('Text-like', parse.content_type,
parse.remote_response.text[:15], _content[:15])
# 自己进行编码检测, 因为 requests 内置的编码检测在天朝GBK面前非常弱鸡
encoding = encoding_detect(parse.remote_response.content)
if encoding is not None:
parse.remote_response.encoding = encoding
# simply copy the raw text, for custom rewriter function first.
resp_text = parse.remote_response.text
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears in the RAW remote response text, code line no. ', current_line_number())
# try to apply custom rewrite function
if custom_text_rewriter_enable:
resp_text2 = custom_response_text_rewriter(resp_text, parse.mime, parse.remote_url)
if isinstance(resp_text2, str):
resp_text = resp_text2
elif isinstance(resp_text2, tuple) or isinstance(resp_text2, list):
resp_text, is_skip_builtin_rewrite = resp_text2
if is_skip_builtin_rewrite:
infoprint('Skip_builtin_rewrite', request.url)
return resp_text.encode(encoding='utf-8'), req_time_body
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after custom text rewrite, code line no. ', current_line_number())
# then do the normal rewrites
resp_text = response_text_rewrite(resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after builtin rewrite, code line no. ', current_line_number())
# 在页面中插入自定义内容
# 详见 default_config.py 的 `Custom Content Injection` 部分
if custom_inject_content and parse.mime == "text/html":
for position, items in custom_inject_content.items(): # 遍历设置中的所有位置
for item in items: # 每个位置中的条目
# 判断正则是否匹配当前url, 不匹配跳过
r = item.get("url_regex")
if r is not None and not r.match(parse.url_no_scheme):
continue
# 将内容插入到html
resp_text = inject_content(position, resp_text, item["content"])
return resp_text.encode(encoding='utf-8'), req_time_body # return bytes
def response_text_basic_rewrite(*args, **kwargs): # coverage: exclude
"""本函数在v0.28.3被移除, 对本函数的调用会被映射出去
如果需要查看本函数代码, 请查看git历史到 v0.28.3 以前
"""
from warnings import warn
warn("This function is deprecated since v0.28.3, use response_text_basic_mirrorlization() instead", DeprecationWarning)
return response_text_basic_mirrorlization(*args, **kwargs)
def response_text_rewrite(resp_text):
"""
rewrite urls in text-like content (html,css,js)
:type resp_text: str
:rtype: str
"""
# v0.20.6+ plain replace domain alias, support json/urlencoded/json-urlencoded/plain
if url_custom_redirect_enable:
for before_replace, after_replace in (plain_replace_domain_alias + parse.temporary_domain_alias):
resp_text = resp_text.replace(before_replace, after_replace)
# v0.9.2+: advanced url rewrite engine
resp_text = regex_adv_url_rewriter.sub(regex_url_reassemble, resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after advanced rewrite, code line no. ', current_line_number())
# v0.28.0 实验性功能, 在v0.28.3后默认启用
resp_text = response_text_basic_mirrorlization(resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after basic mirrorlization, code line no. ', current_line_number())
# for cookies set string (in js) replace
# eg: ".twitter.com" --> "foo.com"
resp_text = resp_text.replace('\".' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'." + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
resp_text = resp_text.replace("domain=." + target_domain_root, "domain=" + my_host_name_no_port)
resp_text = resp_text.replace('\"' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'" + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after js cookies string rewrite, code line no. ', current_line_number())
# resp_text = resp_text.replace('lang="zh-Hans"', '', 1)
return resp_text
def response_cookie_rewrite(cookie_string):
"""
rewrite response cookie string's domain to `my_host_name`
:type cookie_string: str
"""
cookie_string = regex_cookie_rewriter.sub('domain=' + my_host_name_no_port, cookie_string)
return cookie_string
# ################# End Server Response Handler #################
# ################# Begin Client Request Handler #################
def assemble_remote_url():
"""
组装目标服务器URL, 即生成 parse.remote_url 的值
:rtype: str
"""
if parse.is_external_domain:
# 请求的是外部域名 (external domains)
scheme = 'https://' if parse.is_https else 'http://'
return urljoin(scheme + parse.remote_domain, parse.remote_path_query)
else:
# 请求的是主域名及可以被当做(alias)主域名的域名
return urljoin(target_scheme + target_domain, parse.remote_path_query)
def ssrf_check_layer_1():
"""
SSRF防护, 第一层, 在请求刚开始时被调用, 检查域名是否允许
:return: 如果请求触发了SSRF防护, 则返回True
:rtype: bool
"""
# Only external in-zone domains are allowed (SSRF check layer 1)
if parse.remote_domain not in allowed_domains_set:
if not try_match_and_add_domain_to_rewrite_white_list(parse.remote_domain): # 请求的域名是否满足通配符
if developer_temporary_disable_ssrf_prevention: # 是否在设置中临时关闭了SSRF防护
add_ssrf_allowed_domain(parse.remote_domain)
return False
else:
return True
return False
def extract_client_header():
"""
Extract necessary client header, filter out some.
对于浏览器请求头的策略是黑名单制, 在黑名单中的头会被剔除, 其余所有请求头都会被保留
对于浏览器请求头, zmirror会移除掉其中的 host和content-length
并重写其中的cookie头, 把里面可能存在的本站域名修改为远程服务器的域名
:return: 重写后的请求头
:rtype: dict
"""
rewrited_headers = {}
dbgprint('BrowserRequestHeaders:', request.headers)
for head_name, head_value in request.headers:
head_name_l = head_name.lower() # requests的请求头是区分大小写的, 统一变为小写
# ------------------ 特殊请求头的处理 -------------------
if head_name_l in ('host', 'content-length'):
# 丢弃浏览器的这两个头, 会在zmirror请求时重新生成
continue
elif head_name_l == 'content-type' and head_value == '':
# 跳过请求头中的空白的 content-type
# 在flask的request中, 无论浏览器实际有没有传入, content-type头会始终存在,
# 如果它是空值, 则表示实际上没这个头, 则剔除掉
continue
elif head_name_l == 'accept-encoding' and ('br' in head_value or 'sdch' in head_value):
# 一些现代浏览器支持sdch和br编码, 而requests不支持, 所以会剔除掉请求头中sdch和br编码的标记
# For Firefox, they may send 'Accept-Encoding: gzip, deflate, br'
# For Chrome, they may send 'Accept-Encoding: gzip, deflate, sdch, br'
# however, requests cannot decode the br encode, so we have to remove it from the request header.
_str_buff = ''
if 'gzip' in head_value:
_str_buff += 'gzip, '
if 'deflate' in head_value:
_str_buff += 'deflate'
if _str_buff:
rewrited_headers[head_name_l] = _str_buff
continue
else:
# ------------------ 其他请求头的处理 -------------------
# 对于其他的头, 进行一次内容重写后保留
rewrited_headers[head_name_l] = client_requests_text_rewrite(head_value)
# 移除掉 cookie 中的 zmirror_verify
if head_name_l == "cookie":
rewrited_headers[head_name_l] = regex_remove__zmirror_verify__header.sub(
"",
rewrited_headers[head_name_l],
)
dbgprint('FilteredBrowserRequestHeaders:', rewrited_headers)
return rewrited_headers
# noinspection SpellCheckingInspection
def client_requests_text_rewrite(raw_text):
"""
Rewrite proxy domain to origin domain, extdomains supported.
Also Support urlencoded url.
This usually used in rewriting request params
eg. http://foo.bar/extdomains/accounts.google.com to http://accounts.google.com
eg2. foo.bar/foobar to www.google.com/foobar
eg3. http%3a%2f%2fg.zju.tools%2fextdomains%2Faccounts.google.com%2f233
to http%3a%2f%2faccounts.google.com%2f233
:type raw_text: str
:rtype: str
"""
def replace_to_real_domain(match_obj):
scheme = get_group("scheme", match_obj) # type: str
colon = match_obj.group("colon") # type: str
scheme_slash = get_group("scheme_slash", match_obj) # type: str
_is_https = bool(get_group("is_https", match_obj)) # type: bool
real_domain = match_obj.group("real_domain") # type: str
result = ""
if scheme:
if "http" in scheme:
if _is_https or is_target_domain_use_https(real_domain):
result += "https" + colon
else:
result += "http" + colon
result += scheme_slash * 2
result += real_domain
return result
# 使用一个复杂的正则进行替换, 这次替换以后, 理论上所有 extdomains 都会被剔除
# 详见本文件顶部, regex_request_rewriter_extdomains 本体
replaced = regex_request_rewriter_extdomains.sub(replace_to_real_domain, raw_text)
if developer_string_trace is not None and developer_string_trace in replaced:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears client_requests_text_rewrite, code line no. ', current_line_number())
# 正则替换掉单独的, 不含 /extdomains/ 的主域名
replaced = regex_request_rewriter_main_domain.sub(target_domain, replaced)
# 为了保险起见, 再进行一次裸的替换
replaced = replaced.replace(my_host_name, target_domain)
dbgprint('ClientRequestedUrl: ', raw_text, '<- Has Been Rewrited To ->', replaced)
return replaced
def extract_url_path_and_query(full_url=None, no_query=False):
"""
Convert http://foo.bar.com/aaa/p.html?x=y to /aaa/p.html?x=y
:param no_query:
:type full_url: str
:param full_url: full url
:return: str
"""
if full_url is None:
full_url = request.url
split = urlsplit(full_url)
result = split.path or "/"
if not no_query and split.query:
result += '?' + split.query
return result
# ################# End Client Request Handler #################
# ################# Begin Middle Functions #################
def send_request(url, method='GET', headers=None, param_get=None, data=None):
"""实际发送请求到目标服务器, 对于重定向, 原样返回给用户
被request_remote_site_and_parse()调用"""
final_hostname = urlsplit(url).netloc
dbgprint('FinalRequestUrl', url, 'FinalHostname', final_hostname)
# Only external in-zone domains are allowed (SSRF check layer 2)
if final_hostname not in allowed_domains_set and not developer_temporary_disable_ssrf_prevention:
raise ConnectionAbortedError('Trying to access an OUT-OF-ZONE domain(SSRF Layer 2):', final_hostname)
# set zero data to None instead of b''
if not data:
data = None
prepped_req = requests.Request(
method,
url,
headers=headers,
params=param_get,
data=data,
).prepare()
# get session
if enable_connection_keep_alive:
_session = connection_pool.get_session(final_hostname)
else:
_session = requests.Session()
# Send real requests
parse.time["req_start_time"] = time()
r = _session.send(
prepped_req,
proxies=requests_proxies,
allow_redirects=False,
stream=enable_stream_content_transfer,
verify=not developer_do_not_verify_ssl,
)
# remote request time
parse.time["req_time_header"] = time() - parse.time["req_start_time"]
dbgprint('RequestTime:', parse.time["req_time_header"], v=4)
# Some debug output
# print(r.request.headers, r.headers)
if verbose_level >= 3:
dbgprint(r.request.method, "FinalSentToRemoteRequestUrl:", r.url, "\nRem Resp Stat: ", r.status_code)
dbgprint("RemoteRequestHeaders: ", r.request.headers)
if data:
dbgprint('RemoteRequestRawData: ', r.request.body)
dbgprint("RemoteResponseHeaders: ", r.headers)
return r
def prepare_client_request_data():
"""
解析出浏览者发送过来的data, 如果是文本, 则进行重写
如果是文本, 则对文本内容进行重写后返回str
如果是二进制则, 则原样返回, 不进行任何处理 (bytes)
:rtype: Union[str, bytes, None]
"""
data = request.get_data() # type: bytes
# 尝试解析浏览器传入的东西的编码
encoding = encoding_detect(data)
if encoding is not None:
try:
data = data.decode(encoding=encoding) # type: str
except:
# 解码失败, data是二进制内容或无法理解的编码, 原样返回, 不进行重写
encoding = None
pass
else:
# data是文本内容, 则进行重写, 并返回str
data = client_requests_text_rewrite(data) # type: str
# 下面这个if是debug用代码, 对正常运行无任何作用
if developer_string_trace: # coverage: exclude
if isinstance(data, str):
data = data.encode(encoding=encoding)
if developer_string_trace.encode(encoding=encoding) in data:
infoprint('StringTrace: appears after client_requests_bin_rewrite, code line no. ', current_line_number())
return data, encoding
def generate_our_response():
"""
生成我们的响应
:rtype: Response
"""
# copy and parse remote response
resp = copy_response(is_streamed=parse.streamed_our_response)
if parse.time["req_time_header"] >= 0.00001:
parse.set_extra_resp_header('X-Header-Req-Time', "%.4f" % parse.time["req_time_header"])
if parse.time.get("start_time") is not None and not parse.streamed_our_response:
# remote request time should be excluded when calculating total time
parse.set_extra_resp_header('X-Body-Req-Time', "%.4f" % parse.time["req_time_body"])
parse.set_extra_resp_header('X-Compute-Time',
"%.4f" % (process_time() - parse.time["start_time"]))
parse.set_extra_resp_header('X-Powered-By', 'zmirror/%s' % CONSTS.__VERSION__)
if developer_dump_all_traffics and not parse.streamed_our_response:
dump_zmirror_snapshot("traffic")
return resp
def parse_remote_response():
"""处理远程服务器的响应"""
# extract response's mime to thread local var
parse.content_type = parse.remote_response.headers.get('Content-Type', '')
parse.mime = extract_mime_from_content_type(parse.content_type)
# only_serve_static_resources
if only_serve_static_resources and not is_content_type_using_cdn(parse.content_type):
return generate_simple_resp_page(b'This site is just for static resources.', error_code=403)
# 是否以stream(流)式传输响应内容
# 关于flask的stream传输, 请看官方文档 http://flask.pocoo.org/docs/0.11/patterns/streaming/
# 如果启用stream传输, 并且响应的mime在启用stream的类型中, 就使用stream传输
# 关于stream模式的更多内容, 请看 config_default.py 中 `enable_stream_content_transfer` 的部分
# 如果你正在用PyCharm, 只需要按住Ctrl然后点下面↓↓这个变量↓↓就行
parse.streamed_our_response = enable_stream_content_transfer and is_mime_streamed(parse.mime)
# extract cache control header, if not cache, we should disable local cache
parse.cache_control = parse.remote_response.headers.get('Cache-Control', '')
# 判断响应是否允许缓存. 使用相当保守的缓存策略
parse.cacheable = 'no-store' not in parse.cache_control and 'must-revalidate' not in parse.cache_control \
and "max-age=0" not in parse.cache_control and "private" not in parse.cache_control \
and parse.remote_response.request.method == 'GET' and parse.remote_response.status_code == 200
if verbose_level >= 4:
dbgprint('Response Content-Type:', parse.content_type,
'IsStreamed:', parse.streamed_our_response,
'cacheable:', parse.cacheable,
'Line', current_line_number(), v=4)
# add url's MIME info to record, for MIME-based CDN rewrite,
# next time we access this url, we would know it's mime
if enable_static_resource_CDN and parse.cacheable:
# we should only cache GET method, and response code is 200
# noinspection PyUnboundLocalVariable
if parse.url_no_scheme not in url_to_use_cdn:
# 计算远程响应的长度
if "Content-Length" in parse.remote_response.headers:
# 如果服务器在响应头中指定了长度, 那么就直接读取
length = parse.remote_response.headers.get("Content-Length")
elif parse.streamed_our_response:
# 在流式传输下, 我们无法立即读取响应内容, 所以如果服务器没有提供响应, 我们无法知道到底有多长
# 响应的实际长度会在实际读取响应时被计算出来, 但是可能会不准确
length = -1
else:
# 在非流式传输的情况下, requests会立即取回整个响应体, 所以可以直接测量它的长度
length = len(parse.remote_response.content)
# 记录本URL的信息
url_to_use_cdn[parse.url_no_scheme] = [False, parse.mime, length]
if is_content_type_using_cdn(parse.mime):
# mark it to use cdn, and record it's url without scheme.
# eg: If SERVER's request url is http://example.com/2333?a=x, we record example.com/2333?a=x
# because the same url for http and https SHOULD be the same, drop the scheme would increase performance
url_to_use_cdn[parse.url_no_scheme][0] = True # 标记为使用CDN
dbgprint('CDN enabled for:', parse.url_no_scheme)
else:
dbgprint('CDN disabled for:', parse.url_no_scheme)
def guess_correct_domain(depth=7):
"""
猜测url所对应的正确域名
当响应码为 404 或 500 时, 很有可能是把请求发送到了错误的域名
而应该被发送到的正确域名, 很有可能在最近几次请求的域名中
本函数会尝试最近使用的域名, 如果其中有出现响应码为 200 的, 那么就认为这条url对应这个域名
相当于发生了一次隐式url重写
* 本函数很可能会改写 parse 与 request
:rtype: Union[Tuple[Response, float], None]
"""
current_domain = parse.remote_domain
sp = list(urlsplit(parse.remote_url))
redirected = None
for i, domain in enumerate(recent_domains.keys()[:depth]):
if domain == current_domain:
continue
sp[1] = domain # 设置域名
try:
# 尝试发送请求, 允许请求失败
resp = send_request(
urlunsplit(sp),
method=request.method,
headers=parse.client_header,
data=parse.request_data_encoded,
)
except: # coverage: exclude
continue
if 400 <= resp.status_code <= 599: # 40x or 50x, eg:404 503 500
# 失败
dbgprint("Domain guess failed:", domain, v=4)
if i != depth - 1 or redirected is None:
continue
else:
# 在所有结果都尝试失败时, 如果之前有请求到重定向的域名, 则取出
resp, domain = redirected
elif 300 <= resp.status_code <= 399:
if i != depth - 1:
# 对于重定向结果, 暂时进行缓存, 仅当所有尝试都失败时, 才取出它们
if redirected is None:
# 当之前已经出现过一次重定向的结果, 则丢弃迟出现的
# 因为越靠前的域名, 越有可能是真正的域名
redirected = (resp, domain)
continue
elif redirected is not None: # 最后一轮执行
# 当之前已经出现过一次重定向的结果, 则丢弃迟出现的
resp, domain = redirected
else:
continue
# 成功找到
dbgprint("domain guess successful, from", current_domain, "to", domain)
parse.set_extra_resp_header("X-Domain-Guess", domain)
# 隐式重写域名
rewrited_url = encode_mirror_url( # 重写后的url
parse.remote_path_query,
remote_domain=domain,
is_scheme=True,
)
dbgprint("Shadow rewriting, from", request.url, "to", rewrited_url)
request.url = rewrited_url
# 写入缓存
domain_guess_cache[(current_domain, request.path)] = domain
# 写log
try:
with open(zmirror_root("domain_guess.log"), "a", encoding="utf-8") as fw:
fw.write("{}\t{}\t{}\t-->\t{}\n".format(datetime.now(), current_domain, request.path, domain))
except: # coverage: exclude
pass
request.path = urlsplit(rewrited_url).path
# 重新生成 parse 变量
assemble_parse()
return resp
else: # 全部尝试失败 # coverage: exclude
return None
def request_remote_site():
"""
请求远程服务器(high-level), 并在返回404/500时进行 domain_guess 尝试
"""
# 请求被镜像的网站
# 注意: 在zmirror内部不会处理重定向, 重定向响应会原样返回给浏览器
parse.remote_response = send_request(
parse.remote_url,
method=request.method,
headers=parse.client_header,
data=parse.request_data_encoded,
)
if parse.remote_response.url != parse.remote_url:
warnprint("requests's remote url", parse.remote_response.url,
'does no equals our rewrited url', parse.remote_url)
if 400 <= parse.remote_response.status_code <= 599:
# 猜测url所对应的正确域名
dbgprint("Domain guessing for", request.url)
result = guess_correct_domain()
if result is not None:
parse.remote_response = result
def filter_client_request():
"""过滤用户请求, 视情况拒绝用户的访问
:rtype: Union[Response, None]
"""
dbgprint('Client Request Url: ', request.url)
# crossdomain.xml
if os.path.basename(request.path) == 'crossdomain.xml':
dbgprint('crossdomain.xml hit from', request.url)
return crossdomain_xml()
# Global whitelist ua
if check_global_ua_pass(str(request.user_agent)):
return None
if is_deny_spiders_by_403 and is_denied_because_of_spider(str(request.user_agent)):
return generate_simple_resp_page(b'Spiders Are Not Allowed To This Site', 403)
if human_ip_verification_enabled and (
((human_ip_verification_whitelist_from_cookies or enable_custom_access_cookie_generate_and_verify)
and must_verify_cookies)
or is_ip_not_in_allow_range(request.remote_addr)
):
dbgprint('ip', request.remote_addr, 'is verifying cookies')
if 'zmirror_verify' in request.cookies and \
((human_ip_verification_whitelist_from_cookies and verify_ip_hash_cookie(request.cookies.get('zmirror_verify')))
or (enable_custom_access_cookie_generate_and_verify and custom_verify_access_cookie(
request.cookies.get('zmirror_verify'), request))):
ip_whitelist_add(request.remote_addr, info_record_dict=request.cookies.get('zmirror_verify'))
dbgprint('add to ip_whitelist because cookies:', request.remote_addr)
else:
return redirect(
"/ip_ban_verify_page?origin=" + base64.urlsafe_b64encode(str(request.url).encode(encoding='utf-8')).decode(
encoding='utf-8'),
code=302)
return None
def prior_request_redirect():
"""对用户的请求进行按需重定向处理
与 rewrite_client_request() 不同, 使用301/307等进行外部重定向, 不改变服务器内部数据
遇到任意一个需要重定向的, 就跳出本函数
这是第一阶段重定向
第一阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之前* 的重定向
第二阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之后* 的重定向
如果 `custom_prior_request_redirect_enable` 启用, 则会调用 custom_func.custom_prior_redirect_func() 进行自定义重定向
:return: 如果不需要重定向, 则返回None, 否则返回重定向的 Response
:rtype: Union[Response, None]
"""
# 非外部域名被错误地当成了外部域名, 则需要重定向修正
if not parse.is_external_domain and '/extdomains/' == request.path[:12]:
dbgprint('Requesting main domain in extdomains, redirect back.')
return redirect(parse.remote_path_query, code=307)
# 镜像隔离机制, 根据 referer 判断当前所处的镜像, 在子镜像中, 若请求不包含 /extdomains/ 的url, 将会被重定向修正
if enable_individual_sites_isolation and '/extdomains/' != request.path[:12] and request.headers.get('referer'):
reference_domain = decode_mirror_url(request.headers.get('referer'))['domain']
if reference_domain in isolated_domains:
return redirect(encode_mirror_url(parse.remote_path_query, reference_domain), code=307)
if url_custom_redirect_enable:
# 简单的自定义重定向, 详见 config: url_custom_redirect_list
if request.path in url_custom_redirect_list:
redirect_to = request.url.replace(request.path, url_custom_redirect_list[request.path], 1)
dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
# 基于正则的自定义重定向, 详见 config: url_custom_redirect_regex
for regex_match, regex_replace in url_custom_redirect_regex:
if re.match(regex_match, parse.remote_path_query, flags=re.IGNORECASE) is not None:
redirect_to = re.sub(regex_match, regex_replace, parse.remote_path_query, flags=re.IGNORECASE)
dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
if custom_prior_request_redirect_enable:
# 自定义重定向
redirection = custom_prior_redirect_func(request, parse) # type: Union[Response, None]
if redirection is not None:
return redirection
def posterior_request_redirect():
"""
这是第二阶段重定向, 内部隐式重写 *之后* 的重定向
第一阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之前* 的重定向
第二阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之后* 的重定向
遇到任意一个需要重定向的, 就跳出本函数
:return: 如果不需要重定向, 则返回None, 否则返回重定向的 Response
:rtype: Union[Response, None]
"""
# CDN软重定向
# 具体请看 config 中 cdn_redirect_code_if_cannot_hard_rewrite 选项的说明
if enable_static_resource_CDN: # CDN总开关
if (cdn_redirect_code_if_cannot_hard_rewrite # CDN软(301/307)重定向开关
# 该URL所对应的资源已知, 即之前已经被成功请求过
and parse.url_no_scheme in url_to_use_cdn
# 并且该资源已经被判断为可以应用CDN
and url_to_use_cdn[parse.url_no_scheme][0]
# 只缓存 GET 方法的资源
and parse.method == 'GET'
# 只有超过大小下限才会重定向
and int(url_to_use_cdn[parse.url_no_scheme][2]) > cdn_soft_redirect_minimum_size
# 请求者的UA符合CDN提供商的爬虫, 则返回实际的资源
and not is_ua_in_whitelist(str(request.user_agent))
):
# 下面这个urljoin, 是把形如 https://foo.com/a.png?q=233 的url转化为对应的CDN URL https://cdn.com/a.png?q=233
redirect_to_url = urljoin(
my_host_scheme
# 根据url的crc32取余来选取一个CDN域名
# 使用crc32, 而不是随机数, 是为了确保相同的URL每次都能应用相同的CDN域名
# 以增加CDN和缓存命中率
+ CDN_domains[zlib.adler32(parse.url_no_scheme.encode()) % cdn_domains_number],
extract_url_path_and_query() # 得到目标url的 /a.png?q=233 这么个部分
)
if cdn_redirect_encode_query_str_into_url:
# 将 ?q=233 这种查询字串编码进path, 详情看config里的说明
redirect_to_url = embed_real_url_to_embedded_url(
redirect_to_url, url_mime=url_to_use_cdn[parse.url_no_scheme][1])
return redirect(redirect_to_url, code=cdn_redirect_code_if_cannot_hard_rewrite)
# 本地缓存若命中则直接返回
if local_cache_enable:
resp = try_get_cached_response(parse.remote_url, parse.client_header)
if resp is not None:
dbgprint('CacheHit,Return')
if parse.time.get("start_time") is not None:
parse.set_extra_resp_header('X-Compute-Time', "%.4f" % (process_time() - parse.time["start_time"]))
return resp
# 基于 domain_guess 的重定向
if (parse.remote_domain, request.path) in domain_guess_cache:
domain = domain_guess_cache[(parse.remote_domain, request.path)]
rewrited_url = encode_mirror_url( # 重写后的url
parse.remote_path_query,
remote_domain=domain,
is_scheme=True,
)
dbgprint("Redirect via domain_guess_cache, from", request.url, "to", rewrited_url)
return redirect(rewrited_url, code=307)
def assemble_parse():
"""将用户请求的URL解析为对应的目标服务器URL"""
_temp = decode_mirror_url()
parse.remote_domain = _temp['domain'] # type: str
parse.is_https = _temp['is_https'] # type: bool
parse.remote_path = _temp['path'] # type: str
parse.remote_path_query = _temp['path_query'] # type: str
parse.is_external_domain = is_external_domain(parse.remote_domain)
parse.remote_url = assemble_remote_url() # type: str
parse.url_no_scheme = parse.remote_url[parse.remote_url.find('//') + 2:] # type: str
recent_domains[parse.remote_domain] = True # 写入最近使用的域名
dbgprint('after assemble_parse, url:', parse.remote_url, ' path_query:', parse.remote_path_query)
def rewrite_client_request():
"""
在这里的所有重写都只作用程序内部, 对请求者不可见
与 prior_request_redirect() 的外部301/307重定向不同,
本函数通过改变程序内部变量来起到重定向作用
返回True表示进行了重定向, 需要重载某些设置, 返回False表示未重定向
遇到重写后, 不会跳出本函数, 而是会继续下一项. 所以重写顺序很重要
"""
has_been_rewrited = False
# ------------- 请求重写代码开始 ----------------
if cdn_redirect_encode_query_str_into_url:
real_url = extract_real_url_from_embedded_url(request.url)
if real_url is not None:
dbgprint("BeforeEmbeddedExtract:", request.url, " After:", real_url)
request.url = real_url
request.path = urlsplit(real_url).path
has_been_rewrited = True
if url_custom_redirect_enable and shadow_url_redirect_regex:
_path_query = extract_url_path_and_query()
_path_query_raw = _path_query
for before, after in shadow_url_redirect_regex:
_path_query = re.sub(before, after, _path_query)
if _path_query != _path_query_raw:
dbgprint('ShadowUrlRedirect:', _path_query_raw, 'to', _path_query)
request.url = myurl_prefix + _path_query
request.path = urlsplit(_path_query).path
has_been_rewrited = True
break
# ------------- 请求重写代码结束 ----------------
# 如果进行了重写, 那么 has_been_rewrited 为 True
# 在 rewrite_client_request() 函数内部会更改 request.url
# 所以此时需要重新解析一遍
if has_been_rewrited:
assemble_parse()
return has_been_rewrited
# ################# End Middle Functions #################
# ################# Begin Flask After Request ################
@app.after_request
def zmirror_after_request(response):
# 移除 connection_pool 中的锁
if enable_connection_keep_alive:
connection_pool.release_lock()
return response
# ################# End Flask After Request ################
# ################# Begin Flask #################
@app.route('/zmirror_stat')
def zmirror_status():
"""返回服务器的一些状态信息"""
if request.remote_addr and request.remote_addr != '127.0.0.1':
return generate_simple_resp_page(b'Only 127.0.0.1 are allowed', 403)
output = ""
output += strx('extract_real_url_from_embedded_url', extract_real_url_from_embedded_url.cache_info())
output += strx('\nis_content_type_streamed', is_mime_streamed.cache_info())
output += strx('\nembed_real_url_to_embedded_url', embed_real_url_to_embedded_url.cache_info())
output += strx('\ncheck_global_ua_pass', check_global_ua_pass.cache_info())
output += strx('\nextract_mime_from_content_type', extract_mime_from_content_type.cache_info())
output += strx('\nis_content_type_using_cdn', is_content_type_using_cdn.cache_info())
output += strx('\nis_ua_in_whitelist', is_content_type_using_cdn.cache_info())
output += strx('\nis_mime_represents_text', is_mime_represents_text.cache_info())
output += strx('\nis_domain_match_glob_whitelist', is_domain_match_glob_whitelist.cache_info())
output += strx('\nverify_ip_hash_cookie', verify_ip_hash_cookie.cache_info())
output += strx('\nis_denied_because_of_spider', is_denied_because_of_spider.cache_info())
output += strx('\nis_ip_not_in_allow_range', is_ip_not_in_allow_range.cache_info())
output += strx('\n\ncurrent_threads_number', threading.active_count())
# output += strx('\nclient_requests_text_rewrite', client_requests_text_rewrite.cache_info())
# output += strx('\nextract_url_path_and_query', extract_url_path_and_query.cache_info())
output += strx('\n----------------\n')
output += strx('\ndomain_alias_to_target_set', domain_alias_to_target_set)
return "<pre>" + output + "</pre>\n"
@app.route('/ip_ban_verify_page', methods=['GET', 'POST'])
def ip_ban_verify_page():
"""生成一个身份验证页面"""
if request.method == 'GET':
dbgprint('Verifying IP:', request.remote_addr)
form_body = ''
for q_id, _question in enumerate(human_ip_verification_questions):
form_body += r"""%s <input type="text" name="%d" placeholder="%s" style="width: 190px;" /><br/>""" \
% (_question[0], q_id, (html_escape(_question[2]) if len(_question) >= 3 else ""))
for rec_explain_string, rec_name, input_type in human_ip_verification_identity_record:
form_body += r"""%s %s<input type="%s" name="%s" /><br/>""" % (
rec_explain_string,
('<span style="color: red;">(必填)<span> ' if human_ip_verification_answer_any_one_questions_is_ok else ""),
html_escape(input_type), html_escape(rec_name))
if 'origin' in request.args:
form_body += r"""<input type="hidden" name="origin" value="%s" style="width: 190px;" />""" % html_escape(
request.args.get('origin'))
return r"""<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>%s</title>
</head>
<body>
<h1>%s</h1>
<p>这样的验证只会出现一次,通过后您会被加入白名单,之后相同IP的访问不会再需要验证。<br/>
提示: 由于手机和宽带IP经常会发生改变,您可能会多次看到这一页面。</p>
%s <br>
<pre style="border: 1px dashed;">%s</pre>
<form method='post'>%s<button type='submit'>递交</button>
</form>
</body>
</html>""" % (
html_escape(human_ip_verification_title), html_escape(human_ip_verification_title),
("只需要回答出以下<b>任意一个</b>问题即可" if human_ip_verification_answer_any_one_questions_is_ok
else "你需要回答出以下<b>所有问题</b>"),
human_ip_verification_description, form_body)
elif request.method == 'POST':
dbgprint('Verifying Request Form', request.form)
# 遍历所有问题, 看有没有正确回答上来
for q_id, _question in enumerate(human_ip_verification_questions):
submitted_answer = request.form.get(str(q_id), '')
if submitted_answer == '': # 没有回答这个问题
if human_ip_verification_answer_any_one_questions_is_ok: # 如果只需要回答一个, 那么就跳过
continue
else: # 如果全部都需要回答, 那么报错
return generate_simple_resp_page(b'Please answer question: ' + _question[0].encode(), 200)
if submitted_answer != _question[1]: # 如果回答了, 但是答案错误
return generate_simple_resp_page(b'Wrong answer in: ' + _question[0].encode(), 200)
elif human_ip_verification_answer_any_one_questions_is_ok:
break # 只需要正确回答出一个, 就通过
else: # 如果在for中是break跳出的, 就不会执行else, 只有正常执行完for才会进入else
if human_ip_verification_answer_any_one_questions_is_ok: # 如果只需要回答一个, 进入else表示一个问题都没回答
return generate_simple_resp_page(b'Please answer at least ONE question', 200)
record_dict = {}
for rec_explain_string, rec_name, form_type in human_ip_verification_identity_record:
if rec_name not in request.form or not request.form[rec_name]:
return generate_simple_resp_page(b'Param Missing or Blank: ' + rec_explain_string.encode(), 200)
else:
record_dict[rec_name] = request.form[rec_name]
origin = '/'
if 'origin' in request.form:
try:
origin = base64.urlsafe_b64decode(request.form.get('origin')).decode(encoding='utf-8')
except: # coverage: exclude
return generate_error_page(
"Unable to decode origin from value:" + html_escape(request.form.get('origin')), is_traceback=True)
else:
netloc = urlsplit(origin).netloc
if netloc and netloc != my_host_name:
origin = '/'
if identity_verify_required:
if not custom_identity_verify(record_dict):
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp = generate_html_redirect_page(origin, msg=human_ip_verification_success_msg)
if human_ip_verification_whitelist_from_cookies:
_hash = generate_ip_verify_hash(record_dict)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
elif enable_custom_access_cookie_generate_and_verify:
_hash = custom_generate_access_cookie(record_dict, request)
dbgprint('SelfGeneratedCookie:', _hash)
if _hash is None:
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
ip_whitelist_add(request.remote_addr, info_record_dict=record_dict)
return resp
@app.route('/', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
@app.route('/<path:input_path>', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
def zmirror_enter(input_path='/'):
"""入口函数的壳, 只是包了一层异常处理, 实际是 main_function() """
try:
resp = main_function(input_path=input_path)
# 加入额外的响应头
for name, value in parse.extra_resp_headers.items():
resp.headers.set(name, value)
# 加入额外的cookies
for name, cookie_string in parse.extra_cookies.items():
resp.headers.add("Set-Cookie", cookie_string)
except: # coverage: exclude
return generate_error_page(is_traceback=True)
else:
return resp
# noinspection PyUnusedLocal
def main_function(input_path='/'):
"""本程序的实际入口函数
:rtype: Response
"""
dbgprint('-----BeginRequest-----')
# parse 类似于 flask 的 request, 是 zmirror 特有的一个 thread-local 变量
# 这个变量的重要性不亚于 request, 在 zmirror 各个部分都会用到
# 其各个变量的含义请看 zmirror.threadlocal.ZmirrorThreadLocal 中的说明
parse.init()
parse.method = request.method
parse.time["start_time"] = process_time() # to display compute time
# 将用户请求的URL解析为对应的目标服务器URL
assemble_parse()
# 对用户请求进行检查和过滤
# 不符合条件的请求(比如爬虫)将终止执行
# 函数不会修改 parse
r = filter_client_request()
if r is not None: # 如果函数返回值不是None, 则表示需要响应给用户
dbgprint('-----EndRequest(filtered out)-----')
return r
# 对用户请求进行第一级重定向(隐式重写前的重定向)
# 函数不会修改 parse
# 此重定向对用户可见, 是301/302/307重定向
r = prior_request_redirect()
if r is not None:
# 如果返回的是None, 则表示未发生重定向, 照常继续
# 如果返回的是一个flask Response 对象, 则表示需要进行重定向, 原样返回此对象即可
# 下同
return r
# 进行请求的隐式重写/重定向
# 隐式重写只对 zmirror 内部生效, 对浏览器透明
# 重写可能会修改 flask 的内置 request 变量
# 可能会修改 parse
has_been_rewrited = rewrite_client_request()
# 第一层SSRF检查, 防止请求不允许的网站
if ssrf_check_layer_1():
return generate_simple_resp_page(b'SSRF Prevention! Your domain is NOT ALLOWED.', 403)
# 提取出经过必要重写后的浏览器请求头
parse.client_header = extract_client_header() # type: dict
# 对用户请求进行第二级重定向(隐式重写后的重定向)
# 与一级重定向一样, 是301/302/307重定向
r = posterior_request_redirect()
if r is not None:
return r
# 解析并重写浏览器请求的data内容
parse.request_data, parse.request_data_encoding = prepare_client_request_data()
# 请求真正的远程服务器
# 并在返回404/500时进行 domain_guess 尝试
# domain_guess的解释请看函数 guess_correct_domain() 中的注释
request_remote_site()
# 解析远程服务器的响应
parse_remote_response()
# 生成我们的响应
resp = generate_our_response()
# storge entire our server's response (headers included)
if local_cache_enable and parse.cacheable:
put_response_to_local_cache(parse.remote_url, resp, without_content=parse.streamed_our_response)
dbgprint('-----EndRequest-----')
return resp
@app.route('/crossdomain.xml')
def crossdomain_xml():
return Response("""<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="*"/>
<site-control permitted-cross-domain-policies="all"/>
<allow-http-request-headers-from domain="*" headers="*" secure="false"/>
</cross-domain-policy>""", content_type='text/x-cross-domain-policy')
@app.route('/about_zmirror')
def about_zmirror():
return Response("""zmirror
version: {version}
Author: {author}
Github: {github_url}
Note: Love Luciaz Forever!
Mirroring: {source_site}
This site: {my_domain}
""".format(version=CONSTS.__VERSION__, author=CONSTS.__AUTHOR__,
github_url=CONSTS.__GITHUB_URL__, source_site=target_domain,
my_domain=my_host_name),
content_type='text/plain')
# ################# End Flask #################
# ################# Begin Post (auto)Exec Section #################
# ########### domain replacer prefix string buff ###############
prefix_buff = {}
for _domain in allowed_domains_set:
prefix_buff[_domain] = calc_domain_replace_prefix(_domain)
if human_ip_verification_enabled:
single_ip_allowed_set = load_ip_whitelist_file()
else:
single_ip_allowed_set = set()
try:
if unittest_mode:
import importlib
# 在 unittest 中, 由于 custom_func 也会引用 zmirror
# 带来一个额外的引用计数
# 所以在 unittest 中, 每次重载 zmirror 的时候, 都需要重载一次 custom_func
importlib.reload(importlib.import_module("custom_func"))
from custom_func import *
except: # coverage: exclude
pass
if custom_text_rewriter_enable:
try:
from custom_func import custom_response_text_rewriter
except: # coverage: exclude
warnprint('Cannot import custom_response_text_rewriter custom_func.py,'
' `custom_text_rewriter` is now disabled(if it was enabled)')
raise
if identity_verify_required:
try:
from custom_func import custom_identity_verify
except: # coverage: exclude
identity_verify_required = False
warnprint('Cannot import custom_identity_verify from custom_func.py,'
' `identity_verify` is now disabled (if it was enabled)')
raise
if enable_custom_access_cookie_generate_and_verify:
try:
from custom_func import custom_generate_access_cookie, custom_verify_access_cookie
except: # coverage: exclude
enable_custom_access_cookie_generate_and_verify = False
errprint('Cannot import custom_generate_access_cookie and custom_generate_access_cookie from custom_func.py,'
' `enable_custom_access_cookie_generate_and_verify` is now disabled (if it was enabled)')
raise
if enable_cron_tasks:
for _task_dict in cron_tasks_list:
try:
_task_dict['target'] = globals()[_task_dict['target']]
cron_task_container(_task_dict, add_task_only=True)
except Exception as e:
errprint('UnableToInitCronTask', e)
raise
th = threading.Thread(target=cron_task_host, daemon=True)
th.start()
# ################# End Post (auto)Exec Section #################
if __name__ == '__main__':
errprint('Please use `python3 wsgi.py` to run')
exit()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
import multiprocessing
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
create_aws_config_files()
edge_status = get_instance_status(os.environ['conf_service_base_name'] + '-Tag',
os.environ['conf_service_base_name'] + '-' + os.environ[
'edge_user_name'] + '-edge')
if edge_status != 'running':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
ssn_hostname = get_instance_hostname(os.environ['conf_service_base_name'] + '-Tag',
os.environ['conf_service_base_name'] + '-ssn')
put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
ssn_hostname)
append_result("Edge node is unavailable")
sys.exit(1)
print('Generating infrastructure names and tags')
data_engine = dict()
try:
data_engine['exploratory_name'] = os.environ['exploratory_name']
except:
data_engine['exploratory_name'] = ''
try:
data_engine['computational_name'] = os.environ['computational_name']
except:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['region'] = os.environ['aws_region']
data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['edge_user_name'] + \
'-de-' + data_engine['exploratory_name'] + '-' + \
data_engine['computational_name']
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
data_engine['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
.format(data_engine['service_base_name'], os.environ['edge_user_name'])
data_engine['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
.format(data_engine['service_base_name'], os.environ['edge_user_name'])
data_engine['tag_name'] = '{}-Tag'.format(data_engine['service_base_name'])
tag = {"Key": data_engine['tag_name'],
"Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['edge_user_name'])}
data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(data_engine['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['cluster_nodes_tag'] = {"Key": "dataengine_notebook_name",
"Value": os.environ['notebook_instance_name']}
data_engine['cluster_nodes_resource_tag'] = {"Key": os.environ['conf_tag_resource_id'],
"Value": data_engine['service_base_name'] + ':' +
data_engine['cluster_name']}
data_engine['primary_disk_size'] = '30'
data_engine['instance_class'] = 'dataengine'
data_engine['expected_image_name'] = '{}-{}-notebook-image'.format(os.environ['conf_service_base_name'],
os.environ['application'])
data_engine['notebook_image_name'] = (lambda x: os.environ['notebook_image_name'] if x != 'None'
else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
print('Searching pre-configured images')
data_engine['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
image_id = get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
if image_id != '' and os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
data_engine['ami_id'] = image_id
print('Pre-configured image found. Using: {}'.format(data_engine['ami_id']))
else:
os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
print('No pre-configured image found. Using default one: {}'.format(data_engine['ami_id']))
except Exception as err:
print("Failed to generate variables dictionary.")
append_result("Failed to generate variables dictionary. Exception:" + str(err))
sys.exit(1)
with open('/root/result.json', 'w') as f:
data = {"hostname": data_engine['cluster_name'], "error": ""}
json.dump(data, f)
try:
logging.info('[CREATE MASTER NODE]')
print('[CREATE MASTER NODE]')
data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "master"}
params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
.format(data_engine['master_node_name'], data_engine['ami_id'], data_engine['master_size'],
data_engine['key_name'],
get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
get_subnet_by_cidr(data_engine['subnet_cidr']),
data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
data_engine['master_node_name'], data_engine['primary_disk_size'], data_engine['instance_class'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
data_engine['master_id'] = get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name'])
create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to create master instance.", str(err))
sys.exit(1)
try:
for i in range(data_engine['instance_count'] - 1):
logging.info('[CREATE SLAVE NODE {}]'.format(i + 1))
print('[CREATE SLAVE NODE {}]'.format(i + 1))
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "slave"}
params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
.format(slave_name, data_engine['ami_id'], data_engine['slave_size'],
data_engine['key_name'],
get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
get_subnet_by_cidr(data_engine['subnet_cidr']),
data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
slave_name, data_engine['primary_disk_size'], data_engine['instance_class'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
data_engine['slave_id'] = get_instance_by_name(data_engine['tag_name'], slave_name)
create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
except:
traceback.print_exc()
raise Exception
except Exception as err:
remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
try:
remove_ec2(data_engine['tag_name'], slave_name)
except:
print("The slave instance {} hasn't been created.".format(slave_name))
append_result("Failed to create slave instances.", str(err))
sys.exit(1)
| []
| []
| [
"aws_dataengine_master_shape",
"application",
"conf_tag_resource_id",
"notebook_instance_name",
"edge_user_name",
"ssn_dlab_path",
"conf_service_base_name",
"computational_name",
"dataengine_image_notebooks",
"exploratory_name",
"notebook_image_name",
"request_id",
"conf_os_user",
"conf_resource",
"aws_{}_image_name'.format(os.environ['conf_os_family",
"conf_key_name",
"aws_dataengine_slave_shape",
"dataengine_instance_count",
"aws_region"
]
| [] | ["aws_dataengine_master_shape", "application", "conf_tag_resource_id", "notebook_instance_name", "edge_user_name", "ssn_dlab_path", "conf_service_base_name", "computational_name", "dataengine_image_notebooks", "exploratory_name", "notebook_image_name", "request_id", "conf_os_user", "conf_resource", "aws_{}_image_name'.format(os.environ['conf_os_family", "conf_key_name", "aws_dataengine_slave_shape", "dataengine_instance_count", "aws_region"] | python | 19 | 0 | |
application.py | import os
import sys
import json
import logging
from urllib.parse import urlparse
from flask import Flask, request, redirect, Response, render_template
from actingweb import config, aw_web_request, actor
import on_aw
from actingweb.handlers import callbacks, properties, meta, root, trust, devtest, \
subscription, resources, oauth, callback_oauth, bot, www, factory
# To debug in pycharm inside the Docker container, remember to uncomment import pydevd as well
# (and add to requirements.txt)
#import pydevd_pycharm
logging.basicConfig(stream=sys.stderr, level=os.getenv('LOG_LEVEL', "INFO"))
LOG = logging.getLogger()
LOG.setLevel(os.getenv('LOG_LEVEL', "INFO"))
# Prefix url_path here with path if app is deployed in non-root URL
app = Flask(__name__, static_url_path='/static')
# The on_aw object we will use to do app-specific processing
OBJ_ON_AW = on_aw.OnAWDemo()
def get_config():
# Having settrace here will make sure the process reconnects to the debug server on each request
# which makes it easier to keep in sync when doing code changes
# This is for pycharm
#pydevd_pycharm.settrace('docker.for.mac.localhost', port=3001, stdoutToServer=True, stderrToServer=True,
# suspend=False)
#
# The greger.ngrok.io address will be overriden by env variables from serverless.yml
myurl = os.getenv('APP_HOST_FQDN', "greger.ngrok.io")
proto = os.getenv('APP_HOST_PROTOCOL', "https://")
# Replace with your URN here
aw_type = "urn:actingweb:actingweb.org:actingwebdemo"
bot_token = os.getenv('APP_BOT_TOKEN', "")
bot_email = os.getenv('APP_BOT_EMAIL', "")
bot_secret = os.getenv('APP_BOT_SECRET', "")
bot_admin_room = os.getenv('APP_BOT_ADMIN_ROOM', "")
oauth = {
'client_id': os.getenv('APP_OAUTH_ID', ""),
'client_secret': os.getenv('APP_OAUTH_KEY', ""),
'redirect_uri': proto + myurl + "/oauth",
'scope': "",
'auth_uri': "https://api.actingweb.net/v1/authorize",
'token_uri': "https://api.actingweb.net/v1/access_token",
'response_type': "code",
'grant_type': "authorization_code",
'refresh_type': "refresh_token",
# Example oauth_extras for google
#'oauth_extras': {
# 'access_type': 'offline',
# 'include_granted_scopes': 'false',
# 'login_hint': 'dynamic:creator',
# 'prompt': 'consent'
#}
}
actors = {
'myself': {
'type': aw_type,
'factory': proto + myurl + '/',
'relationship': 'friend', # associate, friend, partner, admin
}
}
return config.Config(
database='dynamodb',
fqdn=myurl,
proto=proto,
aw_type=aw_type,
desc="Actingwebdemo actor: ",
version="2.3",
devtest=True,
actors=actors,
force_email_prop_as_creator=False,
unique_creator=False,
# Use "oauth" here if authenticating against a service
www_auth="basic",
logLevel=os.getenv('LOG_LEVEL', "INFO"),
ui=True,
bot={
"token": bot_token,
"email": bot_email,
"secret": bot_secret,
"admin_room": bot_admin_room
},
oauth=oauth
)
class SimplifyRequest:
def __init__(self, req):
if isinstance(req, dict):
self._req = req
if isinstance(self._req['data'], str):
req['data'] = req['data'].encode('utf-8')
if 'method' not in self._req:
self._req['method'] = 'POST'
if 'path' not in req:
self._req['path'] = urlparse(req['url']).path
else:
cookies = {}
raw_cookies = req.headers.get("Cookie")
if raw_cookies:
for cookie in raw_cookies.split("; "):
name, value = cookie.split("=")
cookies[name] = value
headers = {}
for k, v in req.headers.items():
headers[k] = v
params = {}
for k, v in req.values.items():
params[k] = v
self._req = {
'method': req.method,
'path': req.path,
'data': req.data,
'headers': headers,
'cookies': cookies,
'values': params,
'url': req.url
}
def __getattr__(self, key):
try:
return self._req[key]
except KeyError:
raise AttributeError(key)
class Handler:
def __init__(self, req):
req = SimplifyRequest(req)
self.handler = None
self.response = None
self.actor_id = None
self.path = req.path
self.method = req.method
LOG.debug('Path: ' + req.url + ', params(' + json.dumps(req.values) + ')' + ', body (' +
json.dumps(req.data.decode('utf-8')) + ')')
self.webobj = aw_web_request.AWWebObj(
url=req.url,
params=req.values,
body=req.data,
headers=req.headers,
cookies=req.cookies
)
if not req or not self.path:
return
if self.path == '/':
self.handler = factory.RootFactoryHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
else:
path = self.path.split('/')
self.path = path
f = path[1]
if f == 'oauth':
self.handler = callback_oauth.CallbackOauthHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'bot':
self.handler = bot.BotHandler(
webobj=self.webobj, config=get_config(), on_aw=OBJ_ON_AW)
elif len(path) == 2:
self.handler = root.RootHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
else:
self.actor_id = f
f = path[2]
if f == 'meta':
# r'/<actor_id>/meta<:/?><path:(.*)>'
self.handler = meta.MetaHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'oauth':
# r'/<actor_id>/oauth<:/?><path:.*>'
self.handler = oauth.OauthHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'www':
# r'/<actor_id>/www<:/?><path:(.*)>'
self.handler = www.WwwHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'properties':
# r'/<actor_id>/properties<:/?><name:(.*)>'
self.handler = properties.PropertiesHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'trust':
# r'/<actor_id>/trust<:/?>'
# r'/<actor_id>/trust/<relationship><:/?>'
# r'/<actor_id>/trust/<relationship>/<peerid><:/?>'
if len(path) == 3:
self.handler = trust.TrustHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif len(path) == 4:
self.handler = trust.TrustRelationshipHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif len(path) >= 5:
self.handler = trust.TrustPeerHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'subscriptions':
# r'/<actor_id>/subscriptions<:/?>'
# r'/<actor_id>/subscriptions/<peerid><:/?>'
# r'/<actor_id>/subscriptions/<peerid>/<subid><:/?>'
# r'/<actor_id>/subscriptions/<peerid>/<subid>/<seqnr><:/?>'
if len(path) == 3:
self.handler = subscription.SubscriptionRootHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif len(path) == 4:
self.handler = subscription.SubscriptionRelationshipHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif len(path) == 5:
self.handler = subscription.SubscriptionHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif len(path) >= 6:
self.handler = subscription.SubscriptionDiffHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'callbacks':
# r'/<actor_id>/callbacks<:/?><name:(.*)>'
self.handler = callbacks.CallbacksHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'resources':
# r'/<actor_id>/resources<:/?><name:(.*)>'
self.handler = resources.ResourcesHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
elif f == 'devtest':
# r'/<actor_id>/devtest<:/?><path:(.*)>'
self.handler = devtest.DevtestHandler(
self.webobj, get_config(), on_aw=OBJ_ON_AW)
if not self.handler:
LOG.warning('Handler was not set with path: ' + req.url)
def process(self, **kwargs):
try:
if self.method == 'POST':
self.handler.post(**kwargs)
elif self.method == 'GET':
self.handler.get(**kwargs)
elif self.method == 'DELETE':
self.handler.delete(**kwargs)
elif self.method == 'PUT':
self.handler.put(**kwargs)
except AttributeError:
return False
if self.get_status() == 404:
return False
return True
def get_redirect(self):
if self.webobj.response.redirect:
return self.get_response()
return None
def get_response(self):
if self.webobj.response.redirect:
self.response = redirect(self.webobj.response.redirect, code=302)
else:
if self.webobj.response.status_code == 401:
# For this demo, you may have tested with different users, so
# force a new domain, so that the browser will pop up the login window
self.webobj.response.headers['WWW-Authenticate'] = 'Basic realm="' + self.actor_id +'"'
self.response = Response(
response=self.webobj.response.body,
status=self.webobj.response.status_message,
headers=self.webobj.response.headers
)
self.response.status_code = self.webobj.response.status_code
if len(self.webobj.response.cookies) > 0:
for a in self.webobj.response.cookies:
self.response.set_cookie(a["name"], a["value"], max_age=a["max_age"], secure=a["secure"])
return self.response
def get_status(self):
return self.webobj.response.status_code
@app.route('/', methods=['GET', 'POST'])
def app_root():
h = Handler(request)
if not h.process():
return Response(status=404)
if h.get_status() == 400:
existing = actor.Actor(config=get_config())
existing.get_from_creator(request.values.get('creator'))
if existing.id:
return redirect(get_config().root + existing.id + '/www?refresh=true', 302)
else:
return render_template('aw-root-failed.html', **h.webobj.response.template_values)
if request.method == 'GET':
return render_template('aw-root-factory.html', **h.webobj.response.template_values)
if request.method == 'POST' and h.get_status() == 200:
return render_template('aw-root-created.html', **h.webobj.response.template_values)
return h.get_response()
@app.route('/<actor_id>', methods=['GET', 'POST', 'DELETE'], strict_slashes=False)
def app_actor_root(actor_id):
h = Handler(request)
if not h.process(actor_id=actor_id):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/meta', methods=['GET'], strict_slashes=False)
@app.route('/<actor_id>/meta/<path:path>', methods=['GET'], strict_slashes=False)
def app_meta(actor_id, path=''):
h = Handler(request)
if not h.process(actor_id=actor_id, path=path):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/oauth', methods=['GET'], strict_slashes=False)
@app.route('/<actor_id>/oauth/<path:path>', methods=['GET'], strict_slashes=False)
def app_oauth(actor_id, path=''):
h = Handler(request)
if not h.process(actor_id=actor_id, path=path):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/www', methods=['GET', 'POST', 'DELETE'], strict_slashes=False)
@app.route('/<actor_id>/www/<path:path>', methods=['GET', 'POST', 'DELETE'], strict_slashes=False)
def app_www(actor_id, path=''):
h = Handler(request)
if not h.process(actor_id=actor_id, path=path):
return Response(status=404)
if h.get_redirect():
return h.get_redirect()
if h.webobj.response.status_code == 403:
return Response(status=403)
if request.method == 'GET' and h.get_status() == 200:
if not path or path == '':
return render_template('aw-actor-www-root.html', **h.webobj.response.template_values)
elif path == 'init':
return render_template('aw-actor-www-init.html', **h.webobj.response.template_values)
elif path == 'properties':
return render_template('aw-actor-www-properties.html', **h.webobj.response.template_values)
elif path == 'property':
return render_template('aw-actor-www-property.html', **h.webobj.response.template_values)
elif path == 'trust':
return render_template('aw-actor-www-trust.html', **h.webobj.response.template_values)
return h.get_response()
@app.route('/<actor_id>/properties', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/properties/<path:name>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
def app_properties(actor_id, name=''):
h = Handler(request)
if not h.process(actor_id=actor_id, name=name):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/trust', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/trust/<relationship>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/trust/<relationship>/<peerid>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
def app_trust(actor_id, relationship=None, peerid=None):
h = Handler(request)
if peerid:
if not h.process(actor_id=actor_id, relationship=relationship, peerid=peerid):
return Response(status=404)
elif relationship:
if not h.process(actor_id=actor_id, relationship=relationship):
return Response(status=404)
else:
if not h.process(actor_id=actor_id):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/subscriptions', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/subscriptions/<peerid>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/subscriptions/<peerid>/<subid>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/subscriptions/<peerid>/<subid>/<int:seqnr>', methods=['GET'], strict_slashes=False)
def app_subscriptions(actor_id, peerid=None, subid=None, seqnr=None):
h = Handler(request)
if seqnr:
if not h.process(actor_id=actor_id, peerid=peerid, subid=subid, seqnr=seqnr):
return Response(status=404)
elif subid:
if not h.process(actor_id=actor_id, peerid=peerid, subid=subid):
return Response(status=404)
elif peerid:
if not h.process(actor_id=actor_id, peerid=peerid):
return Response(status=404)
else:
if not h.process(actor_id=actor_id):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/resources', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/resources/<path:name>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
def app_resources(actor_id, name=''):
h = Handler(request)
if not h.process(actor_id=actor_id, name=name):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/callbacks', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/callbacks/<path:name>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
def app_callbacks(actor_id, name=''):
h = Handler(request)
if not h.process(actor_id=actor_id, name=name):
return Response(status=404)
return h.get_response()
@app.route('/<actor_id>/devtest', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
@app.route('/<actor_id>/devtest/<path:path>', methods=['GET', 'POST', 'DELETE', 'PUT'], strict_slashes=False)
def app_devtest(actor_id, path=''):
h = Handler(request)
if not h.process(actor_id=actor_id, path=path):
return Response(status=404)
return h.get_response()
@app.route('/bot', methods=['POST'], strict_slashes=False)
def app_bot():
h = Handler(request)
if not h.process(path='/bot'):
return Response(status=404)
return h.get_response()
@app.route('/oauth', methods=['GET'], strict_slashes=False)
def app_oauth_callback():
h = Handler(request)
if not h.process():
return Response(status=404)
return h.get_response()
# Here is how you can add a google verification
#@app.route('/google123456.html', methods=['GET'], strict_slashes=False)
#def app_google_verify():
# return Response("google-site-verification: google123456.html")
if __name__ == "__main__":
logging.debug('Starting up the ActingWeb Demo ...')
# Only for debugging while developing
app.run(host='0.0.0.0', debug=True, port=5000)
| []
| []
| [
"APP_HOST_PROTOCOL",
"LOG_LEVEL",
"APP_BOT_EMAIL",
"APP_HOST_FQDN",
"APP_BOT_ADMIN_ROOM",
"APP_OAUTH_ID",
"APP_BOT_SECRET",
"APP_OAUTH_KEY",
"APP_BOT_TOKEN"
]
| [] | ["APP_HOST_PROTOCOL", "LOG_LEVEL", "APP_BOT_EMAIL", "APP_HOST_FQDN", "APP_BOT_ADMIN_ROOM", "APP_OAUTH_ID", "APP_BOT_SECRET", "APP_OAUTH_KEY", "APP_BOT_TOKEN"] | python | 9 | 0 | |
yt_dlp/utils.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import collections
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import hashlib
import hmac
import importlib.util
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_HTTPError,
compat_basestring,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_integer_types,
compat_numeric_types,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_urlunparse,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
def random_user_agent():
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
std_headers = {
'User-Agent': random_user_agent(),
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %drd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %drd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %drd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y.%m.%d.',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y%m%d%H%M',
'%Y%m%d%H%M%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S:%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
'%H:%M %d-%b-%Y',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
try:
mask = os.umask(0)
os.umask(mask)
os.chmod(tf.name, 0o666 & ~mask)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
elif not restricted and char == '\n':
return ' '
elif char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
if s == '':
return ''
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s, force=False):
"""Sanitizes and normalizes path on Windows"""
if sys.platform == 'win32':
force = False
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
elif force:
drive_or_unc = ''
else:
return s
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
elif force and s[0] == os.path.sep:
sanitized_path.insert(0, os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/ytdl-org/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def extract_basic_auth(url):
parts = compat_urlparse.urlsplit(url)
if parts.username is None:
return url, None
url = compat_urlparse.urlunsplit(parts._replace(netloc=(
parts.hostname if parts.port is None
else '%s:%d' % (parts.hostname, parts.port))))
auth_payload = base64.b64encode(
('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
return url, 'Basic ' + auth_payload.decode('utf-8')
def sanitized_Request(url, *args, **kwargs):
url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
if auth_header is not None:
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
headers['Authorization'] = auth_header
return compat_urllib_request.Request(url, *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/ytdl-org/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def escapeHTML(text):
return (
text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", ''')
)
def process_communicate_or_kill(p, *args, **kwargs):
try:
return p.communicate(*args, **kwargs)
except BaseException: # Including KeyboardInterrupt
p.kill()
p.wait()
raise
class Popen(subprocess.Popen):
if sys.platform == 'win32':
_startupinfo = subprocess.STARTUPINFO()
_startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
_startupinfo = None
def __init__(self, *args, **kwargs):
super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
def communicate_or_kill(self, *args, **kwargs):
return process_communicate_or_kill(self, *args, **kwargs)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
def timetuple_from_msec(msec):
secs, msec = divmod(msec, 1000)
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return _timetuple(hrs, mins, secs, msec)
def formatSeconds(secs, delim=':', msec=False):
time = timetuple_from_msec(secs * 1000)
if time.hours:
ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
elif time.minutes:
ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
else:
ret = '%d' % time.seconds
return '%s.%03d' % (ret, time.milliseconds) if msec else ret
def _ssl_load_windows_store_certs(ssl_context, storename):
# Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
try:
certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
if encoding == 'x509_asn' and (
trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
except PermissionError:
return
for cert in certs:
try:
ssl_context.load_verify_locations(cadata=cert)
except ssl.SSLError:
pass
def make_HTTPS_handler(params, **kwargs):
opts_check_certificate = not params.get('nocheckcertificate')
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = opts_check_certificate
context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
if opts_check_certificate:
try:
context.load_default_certs()
# Work around the issue in load_default_certs when there are bad certificates. See:
# https://github.com/yt-dlp/yt-dlp/issues/1060,
# https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
except ssl.SSLError:
# enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
# Create a new context to discard any certificates that were already loaded
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
for storename in ('CA', 'ROOT'):
_ssl_load_windows_store_certs(context, storename)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message(before=';'):
if ytdl_is_updateable():
update_cmd = 'type yt-dlp -U to update'
else:
update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
before = before.rstrip()
if not before or before.endswith(('.', '!', '?')):
msg = msg[0].title() + msg[1:]
return (before + ' ' if before else '') + msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
pass
network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
network_exceptions.append(ssl.CertificateError)
network_exceptions = tuple(network_exceptions)
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
"""
if sys.exc_info()[0] in network_exceptions:
expected = True
self.msg = str(msg)
self.traceback = tb
self.expected = expected
self.cause = cause
self.video_id = video_id
self.ie = ie
self.exc_info = sys.exc_info() # preserve original exception
super(ExtractorError, self).__init__(''.join((
format_field(ie, template='[%s] '),
format_field(video_id, template='%s: '),
self.msg,
format_field(cause, template=' (caused by %r)'),
'' if expected else bug_reports_message())))
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None, **kwargs):
kwargs['expected'] = True
super(GeoRestrictedError, self).__init__(msg, **kwargs)
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class EntryNotInPlaylist(YoutubeDLError):
"""Entry not in playlist exception.
This exception will be thrown by YoutubeDL when a requested entry
is not found in the playlist info_dict
"""
pass
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
super(PostProcessingError, self).__init__(msg)
self.msg = msg
class DownloadCancelled(YoutubeDLError):
""" Exception raised when the download queue should be interrupted """
msg = 'The download was cancelled'
def __init__(self, msg=None):
if msg is not None:
self.msg = msg
YoutubeDLError.__init__(self, self.msg)
class ExistingVideoReached(DownloadCancelled):
""" --break-on-existing triggered """
msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
class RejectedVideoReached(DownloadCancelled):
""" --break-on-reject triggered """
msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
class MaxDownloadsReached(DownloadCancelled):
""" --max-downloads limit has been reached. """
msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
class ThrottledDownload(YoutubeDLError):
""" Download speed below --throttled-rate. """
pass
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/ytdl-org/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
# This is to workaround _create_connection() from socket where it will try all
# address data from getaddrinfo() including IPv6. This filters the result from
# getaddrinfo() based on the source_address value.
# This is based on the cpython socket.create_connection() function.
# https://github.com/python/cpython/blob/master/Lib/socket.py#L691
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
host, port = address
err = None
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
ip_addrs = [addr for addr in addrs if addr[0] == af]
if addrs and not ip_addrs:
ip_version = 'v4' if af == socket.AF_INET else 'v6'
raise socket.error(
"No remote IP%s addresses available for connect, can't use '%s' as source address"
% (ip_version, source_address[0]))
for res in ip_addrs:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.bind(source_address)
sock.connect(sa)
err = None # Explicitly break reference cycle
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
if hasattr(hc, '_create_connection'):
hc._create_connection = _create_connection
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = _create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
if not data:
return data
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
"""
See [1] for cookie file format.
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
_ENTRY_LEN = 7
_HEADER = '''# Netscape HTTP Cookie File
# This file is generated by yt-dlp. Do not edit.
'''
_CookieFileEntry = collections.namedtuple(
'CookieFileEntry',
('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""
Save cookies to a file.
Most of the code is taken from CPython 3.8 and slightly adapted
to support cookie files with UTF-8 in both python 2 and 3.
"""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self._HEADER)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure:
secure = 'TRUE'
else:
secure = 'FALSE'
if cookie.domain.startswith('.'):
initial_dot = 'TRUE'
else:
initial_dot = 'FALSE'
if cookie.expires is not None:
expires = compat_str(cookie.expires)
else:
expires = ''
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ''
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
'\t'.join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
def prepare_line(line):
if line.startswith(self._HTTPONLY_PREFIX):
line = line[len(self._HTTPONLY_PREFIX):]
# comments and empty lines are fine
if line.startswith('#') or not line.strip():
return line
cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN:
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit():
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
return line
cf = io.StringIO()
with io.open(filename, encoding='utf-8') as f:
for line in f:
try:
cf.write(prepare_line(line))
except compat_cookiejar.LoadError as e:
write_string(
'WARNING: skipping cookie file entry due to %s: %r\n'
% (e, line), sys.stderr)
continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
# an empty string or 0. MozillaCookieJar only recognizes the former
# (see [1]). So we need force the latter to be recognized as session
# cookies on our own.
# Session cookies may be important for cookies-based authentication,
# e.g. usually, when user does not check 'Remember me' check box while
# logging in on a site, some important cookies are stored as session
# cookies so that not recognizing them will result in failed login.
# 1. https://bugs.python.org/issue17164
for cookie in self:
# Treat `expires=0` cookies as session cookies
if cookie.expires == 0:
cookie.expires = None
cookie.discard = True
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/ytdl-org/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
"""YoutubeDL redirect handler
The code is based on HTTPRedirectHandler implementation from CPython [1].
This redirect handler solves two issues:
- ensures redirect URL is always unicode under python 2
- introduces support for experimental HTTP response status code
308 Permanent Redirect [2] used by some sites [3]
1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3. https://github.com/ytdl-org/youtube-dl/issues/28768
"""
http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST")):
raise compat_HTTPError(req.full_url, code, msg, headers, fp)
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib.request, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# On python 2 urlh.geturl() may sometimes return redirect URL
# as byte string instead of unicode. This workaround allows
# to force it always return unicode.
if sys.version_info[0] < 3:
newurl = compat_str(newurl)
# Be conciliant with URIs containing a space. This is mainly
# redundant with the more complete encoding done in http_error_302(),
# but it is kept for compatibility with other callers.
newurl = newurl.replace(' ', '%20')
CONTENT_HEADERS = ("content-length", "content-type")
# NB: don't use dict comprehension for python 2.6 compatibility
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in CONTENT_HEADERS)
return compat_urllib_request.Request(
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
unverifiable=True)
def extract_timezone(date_str):
m = re.search(
r'''(?x)
^.{8,}? # >=8 char non-TZ prefix, if present
(?P<tz>Z| # just the UTC Z, or
(?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
(?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
[ ]? # optional space
(?P<sign>\+|-) # +/-
(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
$)
''', date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
precision: round the time portion of a datetime object.
auto|microsecond|second|minute|hour|day.
auto: round to the unit provided in date_str (if applicable).
"""
auto_precision = False
if precision == 'auto':
auto_precision = True
precision = 'microsecond'
today = datetime_round(datetime.datetime.now(), precision)
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
date_str)
if match is not None:
start_time = datetime_from_str(match.group('start'), precision, format)
time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
unit = match.group('unit')
if unit == 'month' or unit == 'year':
new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
unit = 'day'
else:
if unit == 'week':
unit = 'day'
time *= 7
delta = datetime.timedelta(**{unit + 's': time})
new_date = start_time + delta
if auto_precision:
return datetime_round(new_date, unit)
return new_date
return datetime_round(datetime.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
"""
return datetime_from_str(date_str, precision='microsecond', format=format).date()
def datetime_add_months(dt, months):
"""Increment/Decrement a datetime object by months."""
month = dt.month + months - 1
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year, month, day)
def datetime_round(dt, precision='day'):
"""
Round a datetime object's time to a specific precision
"""
if precision == 'microsecond':
return dt
unit_seconds = {
'day': 86400,
'hour': 3600,
'minute': 60,
'second': 1,
}
roundto = lambda x, n: ((x + n / 2) // n) * n
timestamp = calendar.timegm(dt.timetuple())
return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def get_windows_version():
''' Get Windows version. None if it's not running on Windows '''
if compat_os_name == 'nt':
return version_tuple(platform.win32_ver()[1])
else:
return None
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def parse_bitrate(s):
if not isinstance(s, compat_str):
return
mobj = re.search(r'\b(\d+)\s*kbps', s)
if mobj:
return int(mobj.group(1))
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def get_domain(url):
domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
return domain.group('domain') if domain else None
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except (ValueError, TypeError):
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types):
return int_str
elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str)
return int_or_none(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except (ValueError, TypeError):
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v, default=None):
return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
def strftime_or_none(timestamp, date_format, default=None):
datetime_object = None
try:
if isinstance(timestamp, compat_numeric_types): # unix timestamp
datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
elif isinstance(timestamp, compat_str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
return datetime_object.strftime(date_format)
except (ValueError, TypeError, AttributeError):
return default
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if yt-dlp is run in the background.
# See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
out, _ = Popen(
[encodeArgument(exe)] + args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class LazyList(collections.abc.Sequence):
''' Lazy immutable list from an iterable
Note that slices of a LazyList are lists and not LazyList'''
class IndexError(IndexError):
pass
def __init__(self, iterable):
self.__iterable = iter(iterable)
self.__cache = []
self.__reversed = False
def __iter__(self):
if self.__reversed:
# We need to consume the entire iterable to iterate in reverse
yield from self.exhaust()
return
yield from self.__cache
for item in self.__iterable:
self.__cache.append(item)
yield item
def __exhaust(self):
self.__cache.extend(self.__iterable)
# Discard the emptied iterable to make it pickle-able
self.__iterable = []
return self.__cache
def exhaust(self):
''' Evaluate the entire iterable '''
return self.__exhaust()[::-1 if self.__reversed else 1]
@staticmethod
def __reverse_index(x):
return None if x is None else -(x + 1)
def __getitem__(self, idx):
if isinstance(idx, slice):
if self.__reversed:
idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
start, stop, step = idx.start, idx.stop, idx.step or 1
elif isinstance(idx, int):
if self.__reversed:
idx = self.__reverse_index(idx)
start, stop, step = idx, idx, 0
else:
raise TypeError('indices must be integers or slices')
if ((start or 0) < 0 or (stop or 0) < 0
or (start is None and step < 0)
or (stop is None and step > 0)):
# We need to consume the entire iterable to be able to slice from the end
# Obviously, never use this with infinite iterables
self.__exhaust()
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
n = max(start or 0, stop or 0) - len(self.__cache) + 1
if n > 0:
self.__cache.extend(itertools.islice(self.__iterable, n))
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
def __bool__(self):
try:
self[-1] if self.__reversed else self[0]
except self.IndexError:
return False
return True
def __len__(self):
self.__exhaust()
return len(self.__cache)
def reverse(self):
self.__reversed = not self.__reversed
return self
def __repr__(self):
# repr and str should mimic a list. So we exhaust the iterable
return repr(self.exhaust())
def __str__(self):
return repr(self.exhaust())
class PagedList:
def __len__(self):
# This is only useful for tests
return len(self.getslice())
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
self._cache = {}
def getpage(self, pagenum):
page_results = self._cache.get(pagenum) or list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
return page_results
def getslice(self, start=0, end=None):
return list(self._getslice(start, end))
def _getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def __getitem__(self, idx):
# NOTE: cache must be enabled if this is used
if not isinstance(idx, int) or idx < 0:
raise TypeError('indices must be non-negative integers')
entries = self.getslice(idx, idx + 1)
return entries[0] if entries else None
class OnDemandPagedList(PagedList):
def _getslice(self, start, end):
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
page_results = self.getpage(pagenum)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
yield from page_results
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagecount = pagecount
PagedList.__init__(self, pagefunc, pagesize, True)
def _getslice(self, start, end):
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page_results = self.getpage(pagenum)
if skip_elems:
page_results = page_results[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page_results) < only_more:
only_more -= len(page_results)
else:
yield from page_results[:only_more]
break
yield from page_results
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def parse_qs(url):
return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
for bom in BOM_UTF8:
if url.startswith(bom):
url = url[len(bom):]
url = url.lstrip()
if not url or url.startswith(('#', ';', ']')):
return False
# "#" cannot be stripped out since it is part of the URI
# However, it can be safely stipped out if follwing a whitespace
return re.split(r'\s#', url, 1)[0].rstrip()
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
for get in variadic(getter):
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
s = s.upper()
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code, vars={}):
# vars is a dict of var, val pairs to substitute
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v in ('undefined', 'void 0'):
return 'null'
elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
else:
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
if v in vars:
return vars[v]
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)|
!+
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = {
'default': '%(title)s [%(id)s].%(ext)s',
'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
}
OUTTMPL_TYPES = {
'chapter': None,
'subtitle': None,
'thumbnail': None,
'description': 'description',
'annotation': 'annotations.xml',
'infojson': 'info.json',
'link': None,
'pl_thumbnail': None,
'pl_description': 'description',
'pl_infojson': 'info.json',
}
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
STR_FORMAT_RE_TMPL = r'''(?x)
(?<!%)(?P<prefix>(?:%%)*)
%
(?P<has_key>\((?P<key>{0})\))?
(?P<format>
(?P<conversion>[#0\-+ ]+)?
(?P<min_width>\d+)?
(?P<precision>\.\d+)?
(?P<len_mod>[hlL])? # unused in python
{1} # conversion type
)
'''
STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if yt-dlp can be updated with -U """
from .update import is_non_updateable
return not is_non_updateable()
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
mt, _, params = mt.partition(';')
mt = mt.strip()
FULL_MAP = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
'audio/x-wav': 'wav',
'audio/wav': 'wav',
'audio/wave': 'wav',
}
ext = FULL_MAP.get(mt)
if ext is not None:
return ext
SUBTYPE_MAP = {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
'x-wav': 'wav',
'filmstrip+json': 'fs',
'svg+xml': 'svg',
}
_, _, subtype = mt.rpartition('/')
ext = SUBTYPE_MAP.get(subtype.lower())
if ext is not None:
return ext
SUFFIX_MAP = {
'json': 'json',
'xml': 'xml',
'zip': 'zip',
'gzip': 'gz',
}
_, _, suffix = subtype.partition('+')
ext = SUFFIX_MAP.get(suffix)
if ext is not None:
return ext
return subtype.replace('+', '.')
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
str.strip, codecs_str.strip().strip(',').split(','))))
vcodec, acodec, hdr = None, None, None
for full_codec in split_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora', 'dvh1', 'dvhe'):
if not vcodec:
vcodec = full_codec
if codec in ('dvh1', 'dvhe'):
hdr = 'DV'
elif codec == 'vp9' and vcodec.startswith('vp9.2'):
hdr = 'HDR10'
elif codec == 'av01':
parts = full_codec.split('.')
if len(parts) > 3 and parts[3] == '10':
hdr = 'HDR10'
vcodec = '.'.join(parts[:4])
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
'dynamic_range': hdr,
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = sanitize_url(info_dict['url'])
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data, delim=False, extraGap=0, hideEmpty=False):
""" Render a list of rows, each as a list of values """
def width(string):
return len(remove_terminal_sequences(string))
def get_max_lens(table):
return [max(width(str(v)) for v in col) for col in zip(*table)]
def filter_using_list(row, filterArray):
return [col for (take, col) in zip(filterArray, row) if take]
if hideEmpty:
max_lens = get_max_lens(data)
header_row = filter_using_list(header_row, max_lens)
data = [filter_using_list(row, max_lens) for row in data]
table = [header_row] + data
max_lens = get_max_lens(table)
extraGap += 1
if delim:
table = [header_row] + [[delim * (ml + extraGap) for ml in max_lens]] + data
max_lens[-1] = 0
for row in table:
for pos, text in enumerate(map(str, row)):
row[pos] = text + (' ' * (max_lens[pos] - width(text) + extraGap))
ret = '\n'.join(''.join(row) for row in table)
return ret
def _match_one(filter_part, dct, incomplete):
# TODO: Generalize code with YoutubeDL._build_format_filter
STRING_OPERATORS = {
'*=': operator.contains,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'~=': lambda attr, value: re.search(value, attr),
}
COMPARISON_OPERATORS = {
**STRING_OPERATORS,
'<=': operator.le, # "<=" must be defined above "<"
'<': operator.lt,
'>=': operator.ge,
'>': operator.gt,
'=': operator.eq,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
(?P<strval>.+?)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
m = m.groupdict()
unnegated_op = COMPARISON_OPERATORS[m['op']]
if m['negation']:
op = lambda attr, value: not unnegated_op(attr, value)
else:
op = unnegated_op
comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
if m['quote']:
comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
actual_value = dct.get(m['key'])
numeric_comparison = None
if isinstance(actual_value, compat_numeric_types):
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082)
try:
numeric_comparison = int(comparison_value)
except ValueError:
numeric_comparison = parse_filesize(comparison_value)
if numeric_comparison is None:
numeric_comparison = parse_filesize(f'{comparison_value}B')
if numeric_comparison is None:
numeric_comparison = parse_duration(comparison_value)
if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
raise ValueError('Operator %s only supports string values!' % m['op'])
if actual_value is None:
return incomplete or m['none_inclusive']
return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if incomplete and actual_value is None:
return True
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct, incomplete=False):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
When incomplete, all conditions passes on missing fields
"""
return all(
_match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
for filter_part in re.split(r'(?<!\\)&', filter_str))
def match_filter_func(filter_str):
def _match_func(info_dict, *args, **kwargs):
if match_str(filter_str, info_dict, *args, **kwargs):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
def ass_subtitles_timecode(seconds):
time = timetuple_from_msec(seconds * 1000)
return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(argdict, keys, default=[], use_compat=True):
if isinstance(argdict, (list, tuple)): # for backward compatibility
if use_compat:
return argdict
else:
argdict = None
if argdict is None:
return default
assert isinstance(argdict, dict)
assert isinstance(keys, (list, tuple))
for key_list in keys:
arg_list = list(filter(
lambda x: x is not None,
[argdict.get(key.lower()) for key in variadic(key_list)]))
if arg_list:
return [arg for args in arg_list for arg in args]
return default
def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
main_key, exe = main_key.lower(), exe.lower()
root_key = exe if main_key == exe else f'{main_key}+{exe}'
keys = [f'{root_key}{k}' for k in (keys or [''])]
if root_key in keys:
if main_key != exe:
keys.append((main_key, exe))
keys.append('default')
else:
use_compat = False
return cli_configuration_args(argdict, keys, default, use_compat)
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'iw': 'heb', # Replaced by he in 1989 revision
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'in': 'ind', # Replaced by id in 1989 revision
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'ji': 'yid', # Replaced by yi in 1989 revision
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '46.172.224.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '182.50.184.0/21',
'AQ': '23.154.160.0/24',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '77.116.0.0/14',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AX': '185.217.4.0/22',
'AZ': '5.197.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '102.178.0.0/15',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '185.212.72.0/23',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '191.128.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '197.242.176.0/21',
'CG': '160.113.0.0/16',
'CH': '85.0.0.0/13',
'CI': '102.136.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '102.244.0.0/14',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '31.153.0.0/16',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FK': '80.73.208.0/21',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '154.160.0.0/12',
'GI': '95.164.0.0/16',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '133.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.167.192.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '24.92.144.0/20',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '102.183.0.0/16',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '102.70.0.0/15',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '102.35.0.0/16',
'RO': '79.112.0.0/13',
'RS': '93.86.0.0/15',
'RU': '5.136.0.0/13',
'RW': '41.186.0.0/16',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '102.120.0.0/13',
'SE': '78.64.0.0/12',
'SG': '8.128.0.0/10',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '102.143.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '154.115.192.0/18',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '37.52.0.0/14',
'UG': '102.80.0.0/13',
'US': '6.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '84.54.64.0/18',
'VA': '212.77.0.0/19',
'VC': '207.191.240.0/21',
'VE': '186.88.0.0/13',
'VG': '66.81.192.0/20',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '102.144.0.0/13',
'ZW': '102.177.192.0/18',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# yt-dlp's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfuscated_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfuscated_code)
def caesar(s, alphabet, shift):
if shift == 0:
return s
l = len(alphabet)
return ''.join(
alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
for c in s)
def rot47(s):
return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/ytdl-org/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'yt-dlp requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate_or_kill()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
day_field: str(random_date.day),
}
# Templates for internet shortcut files, which are plain text files.
DOT_URL_LINK_TEMPLATE = '''
[InternetShortcut]
URL=%(url)s
'''.lstrip()
DOT_WEBLOC_LINK_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>URL</key>
\t<string>%(url)s</string>
</dict>
</plist>
'''.lstrip()
DOT_DESKTOP_LINK_TEMPLATE = '''
[Desktop Entry]
Encoding=UTF-8
Name=%(filename)s
Type=Link
URL=%(url)s
Icon=text-html
'''.lstrip()
LINK_TEMPLATES = {
'url': DOT_URL_LINK_TEMPLATE,
'desktop': DOT_DESKTOP_LINK_TEMPLATE,
'webloc': DOT_WEBLOC_LINK_TEMPLATE,
}
def iri_to_uri(iri):
"""
Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
"""
iri_parts = compat_urllib_parse_urlparse(iri)
if '[' in iri_parts.netloc:
raise ValueError('IPv6 URIs are not, yet, supported.')
# Querying `.netloc`, when there's only one bracket, also raises a ValueError.
# The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
net_location = ''
if iri_parts.username:
net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
if iri_parts.password is not None:
net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
net_location += '@'
net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
# The 'idna' encoding produces ASCII text.
if iri_parts.port is not None and iri_parts.port != 80:
net_location += ':' + str(iri_parts.port)
return compat_urllib_parse_urlunparse(
(iri_parts.scheme,
net_location,
compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
# Unsure about the `safe` argument, since this is a legacy way of handling parameters.
compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
# Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
# Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
def to_high_limit_path(path):
if sys.platform in ['win32', 'cygwin']:
# Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
return r'\\?\ '.rstrip() + os.path.abspath(path)
return path
def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
if field is None:
val = obj if obj is not None else default
else:
val = obj.get(field, default)
if func and val not in ignore:
val = func(val)
return template % val if val not in ignore else default
def clean_podcast_url(url):
return re.sub(r'''(?x)
(?:
(?:
chtbl\.com/track|
media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
play\.podtrac\.com
)/[^/]+|
(?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
flex\.acast\.com|
pd(?:
cn\.co| # https://podcorn.com/analytics-prefix/
st\.fm # https://podsights.com/docs/
)/e
)/''', '', url)
_HEX_TABLE = '0123456789abcdef'
def random_uuidv4():
return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
def make_dir(path, to_screen=None):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
if callable(to_screen) is not None:
to_screen('unable to create directory ' + error_to_compat_str(err))
return False
def get_executable_path():
from zipimport import zipimporter
if hasattr(sys, 'frozen'): # Running from PyInstaller
path = os.path.dirname(sys.executable)
elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
path = os.path.join(os.path.dirname(__file__), '../..')
else:
path = os.path.join(os.path.dirname(__file__), '..')
return os.path.abspath(path)
def load_plugins(name, suffix, namespace):
classes = {}
try:
plugins_spec = importlib.util.spec_from_file_location(
name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
plugins = importlib.util.module_from_spec(plugins_spec)
sys.modules[plugins_spec.name] = plugins
plugins_spec.loader.exec_module(plugins)
for name in dir(plugins):
if name in namespace:
continue
if not name.endswith(suffix):
continue
klass = getattr(plugins, name)
classes[name] = namespace[name] = klass
except FileNotFoundError:
pass
return classes
def traverse_obj(
obj, *path_list, default=None, expected_type=None, get_all=True,
casesense=True, is_user_input=False, traverse_string=False):
''' Traverse nested list/dict/tuple
@param path_list A list of paths which are checked one by one.
Each path is a list of keys where each key is a string,
a function, a tuple of strings or "...".
When a fuction is given, it takes the key as argument and
returns whether the key matches or not. When a tuple is given,
all the keys given in the tuple are traversed, and
"..." traverses all the keys in the object
@param default Default value to return
@param expected_type Only accept final value of this type (Can also be any callable)
@param get_all Return all the values obtained from a path or only the first one
@param casesense Whether to consider dictionary keys as case sensitive
@param is_user_input Whether the keys are generated from user input. If True,
strings are converted to int/slice if necessary
@param traverse_string Whether to traverse inside strings. If True, any
non-compatible object will also be converted into a string
# TODO: Write tests
'''
if not casesense:
_lower = lambda k: (k.lower() if isinstance(k, str) else k)
path_list = (map(_lower, variadic(path)) for path in path_list)
def _traverse_obj(obj, path, _current_depth=0):
nonlocal depth
if obj is None:
return None
path = tuple(variadic(path))
for i, key in enumerate(path):
if isinstance(key, (list, tuple)):
obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
key = ...
if key is ...:
obj = (obj.values() if isinstance(obj, dict)
else obj if isinstance(obj, (list, tuple, LazyList))
else str(obj) if traverse_string else [])
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
elif callable(key):
if isinstance(obj, (list, tuple, LazyList)):
obj = enumerate(obj)
elif isinstance(obj, dict):
obj = obj.items()
else:
if not traverse_string:
return None
obj = str(obj)
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
elif isinstance(obj, dict) and not (is_user_input and key == ':'):
obj = (obj.get(key) if casesense or (key in obj)
else next((v for k, v in obj.items() if _lower(k) == key), None))
else:
if is_user_input:
key = (int_or_none(key) if ':' not in key
else slice(*map(int_or_none, key.split(':'))))
if key == slice(None):
return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
if not isinstance(key, (int, slice)):
return None
if not isinstance(obj, (list, tuple, LazyList)):
if not traverse_string:
return None
obj = str(obj)
try:
obj = obj[key]
except IndexError:
return None
return obj
if isinstance(expected_type, type):
type_test = lambda val: val if isinstance(val, expected_type) else None
elif expected_type is not None:
type_test = expected_type
else:
type_test = lambda val: val
for path in path_list:
depth = 0
val = _traverse_obj(obj, path)
if val is not None:
if depth:
for _ in range(depth - 1):
val = itertools.chain.from_iterable(v for v in val if v is not None)
val = [v for v in map(type_test, val) if v is not None]
if val:
return val if get_all else val[0]
else:
val = type_test(val)
if val is not None:
return val
return default
def traverse_dict(dictn, keys, casesense=True):
''' For backward compatibility. Do not use '''
return traverse_obj(dictn, keys, casesense=casesense,
is_user_input=True, traverse_string=True)
def variadic(x, allowed_types=(str, bytes)):
return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
# create a JSON Web Signature (jws) with HS256 algorithm
# the resulting format is in JWS Compact Serialization
# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
def jwt_encode_hs256(payload_data, key, headers={}):
header_data = {
'alg': 'HS256',
'typ': 'JWT',
}
if headers:
header_data.update(headers)
header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
signature_b64 = base64.b64encode(h.digest())
token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
return token
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
def jwt_decode_hs256(jwt):
header_b64, payload_b64, signature_b64 = jwt.split('.')
payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
return payload_data
def supports_terminal_sequences(stream):
if compat_os_name == 'nt':
if get_windows_version() < (10, 0, 10586):
return False
elif not os.getenv('TERM'):
return False
try:
return stream.isatty()
except BaseException:
return False
_terminal_sequences_re = re.compile('\033\\[[^m]+m')
def remove_terminal_sequences(string):
return _terminal_sequences_re.sub('', string)
def number_of_digits(number):
return len('%d' % number)
| []
| []
| [
"TERM"
]
| [] | ["TERM"] | python | 1 | 0 | |
selfdrive/controls/controlsd.py | #!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or \
self.CP.fingerprintSource == car.CarParams.FingerprintSource.can
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed and car_recognized and not self.CP.dashcamOnly:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
for pandaState in self.sm['pandaStates']:
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION):
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if any(not ps.controlsAllowed and self.enabled for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif self.CP.pcmCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits)
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.20
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers.keys():
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
self.AM.process_alerts(self.sm.frame, clear_event)
hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| []
| []
| [
"NO_CAN_TIMEOUT"
]
| [] | ["NO_CAN_TIMEOUT"] | python | 1 | 0 | |
check/check_suite_test.go | package main_test
import (
"os"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
var checkPath string
var useInstanceProfile = os.Getenv("SEMVER_TESTING_USE_INSTANCE_PROFILE")
var accessKeyID = os.Getenv("SEMVER_TESTING_ACCESS_KEY_ID")
var secretAccessKey = os.Getenv("SEMVER_TESTING_SECRET_ACCESS_KEY")
var bucketName = os.Getenv("SEMVER_TESTING_BUCKET")
var regionName = os.Getenv("SEMVER_TESTING_REGION")
var _ = BeforeSuite(func() {
var err error
if useInstanceProfile == "" {
Expect(accessKeyID).ShouldNot(BeEmpty(), "must specify $SEMVER_TESTING_ACCESS_KEY_ID or SEMVER_TESTING_USE_INSTANCE_PROFILE=true")
Expect(secretAccessKey).ShouldNot(BeEmpty(), "must specify $SEMVER_TESTING_SECRET_ACCESS_KEY or SEMVER_TESTING_USE_INSTANCE_PROFILE=true")
}
Expect(bucketName).ShouldNot(BeEmpty(), "must specify $SEMVER_TESTING_BUCKET")
Expect(regionName).ShouldNot(BeEmpty(), "must specify $SEMVER_TESTING_REGION")
checkPath, err = gexec.Build("github.com/concourse/semver-resource/check")
Expect(err).NotTo(HaveOccurred())
})
var _ = AfterSuite(func() {
gexec.CleanupBuildArtifacts()
})
func TestCheck(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Check Suite")
}
| [
"\"SEMVER_TESTING_USE_INSTANCE_PROFILE\"",
"\"SEMVER_TESTING_ACCESS_KEY_ID\"",
"\"SEMVER_TESTING_SECRET_ACCESS_KEY\"",
"\"SEMVER_TESTING_BUCKET\"",
"\"SEMVER_TESTING_REGION\""
]
| []
| [
"SEMVER_TESTING_SECRET_ACCESS_KEY",
"SEMVER_TESTING_REGION",
"SEMVER_TESTING_BUCKET",
"SEMVER_TESTING_ACCESS_KEY_ID",
"SEMVER_TESTING_USE_INSTANCE_PROFILE"
]
| [] | ["SEMVER_TESTING_SECRET_ACCESS_KEY", "SEMVER_TESTING_REGION", "SEMVER_TESTING_BUCKET", "SEMVER_TESTING_ACCESS_KEY_ID", "SEMVER_TESTING_USE_INSTANCE_PROFILE"] | go | 5 | 0 | |
cmd/licenser.go | package cmd
/*
Copyright © 2020 Tyk Technology https://tyk.io
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"io/ioutil"
"os"
"strings"
"net/http"
"github.com/TykTechnologies/gromit/licenser"
"github.com/TykTechnologies/gromit/util"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
// token is the endpoint auth, defaults to GROMIT_LICENSER_TOKEN
var token string
// baseURL is the product agnostic part of the endpoint
var baseURL string
// licenserCmd represents the client command
var licenserCmd = &cobra.Command{
Use: "licenser [flags] <mdcb-trial|dash-trial> <path>",
Short: "Get a trial license and writes it to path, overwriting it",
Long: `Uses the Tyk gateway in the internal k8s cluster. This is the same endpoint that the /*-trial commands use and needs the auth token in GROMIT_LICENSER_TOKEN
Supports:
- dashboard
- mdcb`,
Args: cobra.MaximumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
product := args[0]
opPath := args[1]
l := licenser.Licenser{
Client: http.DefaultClient,
}
license, err := l.Fetch(baseURL, product, token)
if err != nil {
log.Fatal().Err(err).Str("baseURL", baseURL).Msg("could not fetch license")
}
aws, _ := cmd.Flags().GetBool("aws")
license = strings.TrimSuffix(license, "\n")
if aws {
err = util.UpdateSecret(opPath, license)
} else {
err = ioutil.WriteFile(opPath, []byte(license), 0444)
}
if err != nil {
log.Error().Err(err).Str("opFile", opPath).Msg("could not write")
}
},
}
func init() {
rootCmd.AddCommand(licenserCmd)
licenserCmd.PersistentFlags().StringVarP(&baseURL, "baseurl", "b", "https://bots.cluster.internal.tyk.technology/license-bot/", "base url for the licenser endpoint")
licenserCmd.PersistentFlags().StringVarP(&token, "token", "t", os.Getenv("GROMIT_LICENSER_TOKEN"), "Auth token")
licenserCmd.Flags().BoolP("aws", "a", false, "The path is the AWS secret name to store the secret in")
}
| [
"\"GROMIT_LICENSER_TOKEN\""
]
| []
| [
"GROMIT_LICENSER_TOKEN"
]
| [] | ["GROMIT_LICENSER_TOKEN"] | go | 1 | 0 | |
hackerrank/JumpingClounds.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the jumpingOnClouds function below.
static int jumpingOnClouds(int[] c) {
int jumpCount = 0;
int currentPosition = 0;
while (currentPosition < c.length - 1) {
if (currentPosition + 2 >= c.length) {
jumpCount++;
break;
}
if (c[currentPosition+2] == 0) {
currentPosition++;
}
jumpCount++;
currentPosition++;
}
return jumpCount;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int n = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
int[] c = new int[n];
String[] cItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
int cItem = Integer.parseInt(cItems[i]);
c[i] = cItem;
}
int result = jumpingOnClouds(c);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
travisconfig.go | package travisci
import (
"io/ioutil"
"os"
"os/user"
"path"
"github.com/go-yaml/yaml"
"github.com/pkg/errors"
)
// Structure of ~/.travis/config.yml
type config struct {
Endpoints map[string]configEndpoint
DefaultEndpoint string `yaml:"default_endpoint"`
}
type configEndpoint struct {
AccessToken string `yaml:"access_token"`
}
func configPath() (string, error) {
if path := os.Getenv("TRAVIS_CONFIG_PATH"); path != "" {
return path, nil
}
currentUser, err := user.Current()
if err != nil {
return "", err
}
return path.Join(currentUser.HomeDir, ".travis"), nil
}
func readTravisConfig() (config, error) {
configPath, err := configPath()
if err != nil {
return config{}, err
}
name := path.Join(configPath, "config.yml")
f, err := os.Open(name)
if os.IsNotExist(err) {
return config{}, nil
} else if err != nil {
return config{}, errors.Wrapf(err, "opening %v", name)
}
bytes, err := ioutil.ReadAll(f)
if err != nil {
return config{}, errors.Wrapf(err, "reading %v", name)
}
var result config
err = yaml.Unmarshal(bytes, &result)
if err != nil {
return config{}, errors.Wrapf(err, "parsing %v as yaml", name)
}
return result, nil
}
| [
"\"TRAVIS_CONFIG_PATH\""
]
| []
| [
"TRAVIS_CONFIG_PATH"
]
| [] | ["TRAVIS_CONFIG_PATH"] | go | 1 | 0 | |
cmd/dns-operator/main.go | package main
import (
"os"
"github.com/openshift/cluster-dns-operator/pkg/operator"
operatorconfig "github.com/openshift/cluster-dns-operator/pkg/operator/config"
"github.com/openshift/cluster-dns-operator/pkg/operator/controller"
"github.com/sirupsen/logrus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
)
func main() {
metrics.DefaultBindAddress = ":60000"
// Collect operator configuration.
releaseVersion := os.Getenv("RELEASE_VERSION")
if len(releaseVersion) == 0 {
releaseVersion = controller.UnknownVersionValue
logrus.Infof("RELEASE_VERSION environment variable is missing, defaulting to %q", controller.UnknownVersionValue)
}
coreDNSImage := os.Getenv("IMAGE")
if len(coreDNSImage) == 0 {
logrus.Fatalf("IMAGE environment variable is required")
}
cliImage := os.Getenv("OPENSHIFT_CLI_IMAGE")
if len(cliImage) == 0 {
logrus.Fatalf("OPENSHIFT_CLI_IMAGE environment variable is required")
}
operatorConfig := operatorconfig.Config{
OperatorReleaseVersion: releaseVersion,
CoreDNSImage: coreDNSImage,
OpenshiftCLIImage: cliImage,
}
// Set up and start the operator.
op, err := operator.New(operatorConfig)
if err != nil {
logrus.Fatalf("failed to create operator: %v", err)
}
if err := op.Start(signals.SetupSignalHandler()); err != nil {
logrus.Fatalf("failed to start operator: %v", err)
}
}
| [
"\"RELEASE_VERSION\"",
"\"IMAGE\"",
"\"OPENSHIFT_CLI_IMAGE\""
]
| []
| [
"OPENSHIFT_CLI_IMAGE",
"RELEASE_VERSION",
"IMAGE"
]
| [] | ["OPENSHIFT_CLI_IMAGE", "RELEASE_VERSION", "IMAGE"] | go | 3 | 0 | |
python/tvm/autotvm/measure/measure_methods.py | # pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
from ... import ir_pass, build, build_config, nd, TVMError, register_func, \
rpc as _rpc, target as _target
from ...contrib import nvcc, ndk
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = default_build_func
elif build_func == 'ndk':
build_func = android_ndk_build_func
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = build_func
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number : int, optional
Number of times to do measurement for tasking average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
min_repeat_ms : float, optional
Minimum duration of a timer measurement in milliseconds.
When the run time of a measurement trial falls below this time, the
`number` parameter will be automatically increased.
Set this to improve the accuracy of perf measurement, e.g., when timers
are not precise enough to capture short-running tasks. This parameter is
also critical when devices need a certain minimum running time to "warm
up," such as GPUs that need time to reach a performance power state.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.cur_number = number
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
self.cur_number = self.number
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.cur_number,
self.repeat,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
# If some runs were too fast, do remeasure for them
# to meet the requirement of `min_repeat_ms`
remeasure = np.zeros((len(measure_inputs),), dtype=np.bool)
pre_number = next_number = self.cur_number
min_repeat_duration = self.min_repeat_ms / 1000.0
for i, res in enumerate(results):
if res.error_no == MeasureErrorNo.NO_ERROR:
if np.mean(res.costs) * pre_number <= min_repeat_duration:
next_number = max(next_number,
int(np.ceil(min_repeat_duration / np.mean(res.costs))))
remeasure[i] = True
if pre_number != next_number:
self.cur_number = next_number
msg = "increasing number to %d" % self.cur_number
logger.info(msg)
re_measure_inputs = [x for i, x in enumerate(measure_inputs) if remeasure[i]]
re_build_results = [x for i, x in enumerate(build_results) if remeasure[i]]
re_res = self.run(re_measure_inputs, re_build_results)
ct = 0
for i, rerun in enumerate(remeasure):
if rerun:
results[i] = re_res[ct]
ct += 1
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number : int, optional
Number of times to do measurement for tasking average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
each of which is the average of `number` test run.
min_repeat_ms : float, optional
Minimum duration of a timer measurement in milliseconds.
When the run time of a measurement trial falls below this time, the
`number` parameter will be automatically increased.
Set this to improve the accuracy of perf measurement, e.g., when timers
are not precise enough to capture short-running tasks. This parameter is
also critical when devices need a certain minimum running time to "warm
up," such as GPUs that need time to reach a performance power state.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
self.task = task
from ...rpc.tracker import Tracker
from ...rpc.server import Server
tracker = Tracker('localhost', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('localhost', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def default_build_func(measure_input, tmp_dir, **kwargs):
"""
Default build func. This can work for cuda, opencl, llvm backend
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.tar" % getrandbits(64))
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
def android_ndk_build_func(measure_input, tmp_dir, **kwargs):
"""
Build function for android device using ndk.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.so" % getrandbits(64))
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, ndk.create_shared)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
def run_through_rpc(measure_input, build_result,
number, repeat, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number : int, optional
Number of times to do measurement for tasking average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
each of which is the average of `number` test run.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurment results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
ptx = nvcc.compile_cuda(code, target="ptx", arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(stmt):
valid = ir_pass.VerifyGPUCode(stmt, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return stmt
return verify_pass
| []
| []
| [
"TVM_TRACKER_HOST",
"TVM_TRACKER_PORT"
]
| [] | ["TVM_TRACKER_HOST", "TVM_TRACKER_PORT"] | python | 2 | 0 | |
setup.py | # -*- coding: utf-8 -*-
# python std lib
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# if you are using vagrant, just delete os.link directly,
# The hard link only saves a little disk space, so you should not care
if os.getenv('USER', '').lower() == 'vagrant':
del os.link
with open('README.md') as f:
readme = f.read()
with open(os.path.join('docs', 'release-notes.rst')) as f:
history = f.read()
setup(
name="redis-py-cluster",
version="2.1.0",
description="Library for communicating with Redis Clusters. Built on top of redis-py lib",
long_description=readme + '\n\n' + history,
long_description_content_type="text/markdown",
author="Johan Andersson",
author_email="[email protected]",
maintainer='Johan Andersson',
maintainer_email='[email protected]',
packages=["rediscluster"],
url='http://github.com/grokzen/redis-py-cluster',
license='MIT',
install_requires=[
'redis>=3.0.0,<3.4.0'
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
extras_require={
'hiredis': [
"hiredis>=0.1.3",
],
},
keywords=[
'redis',
'redis cluster',
],
classifiers=[
# As from https://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Environment :: Web Environment',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
]
)
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
src/main/java/fr/polytechnice/templateapi/TemplateApi.java | package fr.polytechnice.templateapi;
import org.apache.commons.lang3.StringUtils;
import org.glassfish.grizzly.http.server.HttpServer;
import org.glassfish.jersey.grizzly2.httpserver.GrizzlyHttpServerFactory;
import org.glassfish.jersey.jackson.internal.jackson.jaxrs.json.JacksonJaxbJsonProvider;
import org.glassfish.jersey.logging.LoggingFeature;
import org.glassfish.jersey.server.ResourceConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.bridge.SLF4JBridgeHandler;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.MessageBodyWriter;
import java.io.IOException;
import java.net.URI;
import java.util.concurrent.ExecutionException;
import java.util.logging.Level;
import java.util.logging.LogManager;
public class TemplateApi {
private static final Logger logger = LoggerFactory.getLogger(TemplateApi.class);
private final int port;
private final HttpServer server;
public static void main(String[] args) {
LogManager.getLogManager().reset();
SLF4JBridgeHandler.install();
try {
int port = StringUtils.isNotEmpty(System.getenv("PORT")) ? Integer.parseInt(System.getenv("PORT")) : 8080;
TemplateApi api = new TemplateApi(port);
api.start();
} catch (Throwable e) {
logger.error("Couldn't start server: {}", e.getMessage(), e);
System.exit(1);
}
}
public TemplateApi(int port) {
URI baseUri = UriBuilder.fromUri("http://0.0.0.0/").port(port).build();
ResourceConfig config = new ResourceConfig();
config.register(new LoggingFeature(java.util.logging.Logger.getLogger(this.getClass().getName()), Level.INFO, LoggingFeature.Verbosity.PAYLOAD_TEXT, 8192));
config.register(JacksonJaxbJsonProvider.class, MessageBodyReader.class, MessageBodyWriter.class);
config.packages(this.getClass().getPackage().getName() + ".providers");
config.packages(this.getClass().getPackage().getName() + ".resources");
this.port = port;
this.server = GrizzlyHttpServerFactory.createHttpServer(baseUri, config, false);
}
public void start() throws IOException {
server.start();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
server.shutdown().get();
} catch (ExecutionException e) {
logger.error("Error while shutting down server: {}", e.getCause().getMessage(), e.getCause());
} catch (InterruptedException e) { /* noop */ }
}));
logger.info("Server started on port {}", port);
}
}
| [
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | java | 1 | 0 | |
HackerRank/Algorithm/Implementation/ServiceLane.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the serviceLane function below.
static int[] serviceLane(int[] width, int[][] cases) {
int n = cases.length;
int[] point = new int[n];
for(int i=0;i<n;i++){
int start = cases[i][0];
int exit = cases [i][1];
int[] useful_cases = new int[exit-start+1];
for(int j=0;j<useful_cases.length;j++){
useful_cases[j] = width[start+j];
}
//point[i]=cases[i][1]-cases[i][0];
Arrays.sort(useful_cases);
point[i]=useful_cases[0];
}
return point;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nt = scanner.nextLine().split(" ");
int n = Integer.parseInt(nt[0]);
int t = Integer.parseInt(nt[1]);
int[] width = new int[n];
String[] widthItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
int widthItem = Integer.parseInt(widthItems[i]);
width[i] = widthItem;
}
int[][] cases = new int[t][2];
for (int i = 0; i < t; i++) {
String[] casesRowItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int j = 0; j < 2; j++) {
int casesItem = Integer.parseInt(casesRowItems[j]);
cases[i][j] = casesItem;
}
}
int[] result = serviceLane(width, cases);
for (int i = 0; i < result.length; i++) {
bufferedWriter.write(String.valueOf(result[i]));
if (i != result.length - 1) {
bufferedWriter.write("\n");
}
}
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
temp.py | import ssl
from notifly import tf_notifier
import tensorflow as tf
from dotenv import load_dotenv
import os
load_dotenv()
ssl._create_default_https_context = ssl._create_unverified_context
token = os.getenv('TOKEN')
notifier = tf_notifier.TfNotifier(token=token, platform='discord')
class TestCallback(tf.keras.callbacks.Callback):
@notifier.notify_on_epoch_begin(epoch_interval=1, graph_interval=10)
def on_epoch_begin(self, epoch, logs=None):
pass
@notifier.notify_on_epoch_end(epoch_interval=1, graph_interval=10)
def on_epoch_end(self, epoch, logs=None):
pass
@notifier.notify_on_train_begin()
def on_train_begin(self, logs=None):
pass
@notifier.notify_on_train_end()
def on_train_end(self, logs=None):
pass
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(5, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, callbacks=[TestCallback()])
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
| []
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | python | 1 | 0 | |
nz_snow_tools/util/generate_mask.py | """
code to generate catchment masks for modelling
"""
from __future__ import division
import numpy as np
from nz_snow_tools.util.utils import create_mask_from_shpfile, setup_nztm_dem
# from nz_snow_tools.met.interp_met_data_hourly_vcsn_data import
import os
os.environ['PROJ_LIB']=r'C:\miniconda\envs\nz_snow27\Library\share'
catchments = ['Wilkin']
# catchments = ['Clutha','Wilkin','Wanaka northern inflows','Upper Dart','Rees', 'Shotover', 'Teviot','Taieri','Upper Matukituki','Roaring Meg','Pomahaka','Motutapu',\
# 'Moonlight Creek','Matukituki', 'Manuherikia','Luggate Creek', 'Lochy','Lindis',\
# 'Kawarau','Greenstone','Hawea','Fraser','Clutha above Clyde Dam','Cardrona','Arrow' ,'Bannockburn Creek', 'Nevis'] # string identifying the catchment to run. must match the naming of the catchment shapefile
output_dem = 'nztm250m' # identifier for output dem
dem_folder = '' #'Z:/GIS_DATA/Topography/DEM_NZSOS/'
dem = 'modis_si_dem_250m'
mask_dem = True # boolean to set whether or not to mask the output dem
mask_created = False # boolean to set whether or not the mask has already been created
mask_folder = r'C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\MODIS\masks' # location of numpy catchment mask. must be writeable if mask_created == False
# shapefile containing polyline or polygon of catchment in WGS84
catchment_shp_folder = r'C:\Users\conwayjp\OneDrive - NIWA\projects\DSC Snow\MODIS\catchments'
# calculate model grid etc:
# output DEM
if dem == 'clutha_dem_250m':
dem_file = dem_folder + dem + '.tif'
nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(dem_file)
if dem == 'si_dem_250m':
dem_file = dem_folder + dem + '.tif'
nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(dem_file, extent_w=1.08e6, extent_e=1.72e6, extent_n=5.52e6,
extent_s=4.82e6,
resolution=250)
if dem == 'modis_si_dem_250m':
nztm_dem, x_centres, y_centres, lat_array, lon_array = setup_nztm_dem(None, extent_w=1.085e6, extent_e=1.72e6, extent_n=5.52e6, extent_s=4.82e6,
resolution=250)
for catchment in catchments:
mask_shpfile = catchment_shp_folder + '/{}.shp'.format(catchment)
mask = create_mask_from_shpfile(lat_array, lon_array, mask_shpfile)
np.save(mask_folder + '/{}_{}.npy'.format(catchment, dem), mask)
| []
| []
| [
"PROJ_LIB"
]
| [] | ["PROJ_LIB"] | python | 1 | 0 | |
caps/wsgi.py | """
WSGI config for caps project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'caps.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
multi_harvest_zoo/environment/game/game.py | import os
from multi_harvest_zoo.environment.game import graphic_pipeline
from multi_harvest_zoo.misc.game.utils import *
import pygame
import os.path
from collections import defaultdict
from datetime import datetime
from time import sleep
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
class Game:
def __init__(self, env, num_humans, ai_policies, max_steps=100, render=False):
self._running = True
self.env = env
self.play = bool(num_humans)
self.render = render or self.play
# Visual parameters
self.graphics_pipeline = graphic_pipeline.GraphicPipeline(env, self.render)
self.save_dir = 'misc/game/screenshots'
self.store = defaultdict(list)
self.num_humans = num_humans
self.ai_policies = ai_policies
self.max_steps = max_steps
self.current_step = 0
self.last_obs = env.reset()
self.step_done = False
self.yielding_action_dict = {}
assert len(ai_policies) == len(env.unwrapped.world.agents) - num_humans
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def on_init(self):
pygame.init()
self.graphics_pipeline.on_init()
return True
def on_event(self, event):
self.step_done = False
if event.type == pygame.QUIT:
self._running = False
elif event.type == pygame.KEYDOWN:
# exit the game
if event.key == pygame.K_ESCAPE:
self._running = False
# Save current image
if event.key == pygame.K_RETURN:
image_name = '{}_{}.png'.format(self.env.unwrapped.filename, datetime.now().strftime('%m-%d-%y_%H-%M-%S'))
pygame.image.save(self.graphics_pipeline.screen, '{}/{}'.format(self.save_dir, image_name))
print('Saved image {} to {}'.format(image_name, self.save_dir))
return
# Control current human agent
if event.key in KeyToTuple_human1 and self.num_humans > 0:
store_action_dict = {}
action = KeyToTuple_human1[event.key]
self.env.unwrapped.world.agents[0].action = action
store_action_dict[self.env.unwrapped.world.agents[0]] = action
self.store["observation"].append(self.last_obs)
self.store["agent_states"].append([agent.location for agent in self.env.unwrapped.world.agents])
for idx, agent in enumerate(self.env.unwrapped.world.agents):
if idx >= self.num_humans:
ai_policy = self.ai_policies[idx - self.num_humans]
env_agent = self.env.unwrapped.world_agent_to_env_agent_mapping[agent]
last_obs_raw = self.last_obs[env_agent]
ai_action = ai_policy.get_action(last_obs_raw)
store_action_dict[agent] = ai_action
self.env.unwrapped.world.agents[idx].action = ai_action
self.yielding_action_dict = {agent: self.env.unwrapped.world_agent_mapping[agent].action
for agent in self.env.agents}
observations, rewards, dones, infos = self.env.step(self.yielding_action_dict)
self.store["actions"].append(store_action_dict)
self.store["info"].append(infos)
self.store["rewards"].append(rewards)
self.store["done"].append(dones)
print(rewards)
if all(dones.values()):
self._running = False
self.last_obs = observations
self.step_done = True
def ai_only_event(self):
self.step_done = False
store_action_dict = {}
self.store["observation"].append(self.last_obs)
self.store["agent_states"].append([agent.location for agent in self.env.unwrapped.world.agents])
for idx, agent in enumerate(self.env.unwrapped.world.agents):
if idx >= self.num_humans:
ai_policy = self.ai_policies[idx - self.num_humans]
env_agent = self.env.unwrapped.world_agent_to_env_agent_mapping[agent]
last_obs_raw = self.last_obs[env_agent]
ai_action = ai_policy.get_action(last_obs_raw)
store_action_dict[agent] = ai_action
self.env.unwrapped.world.agents[idx].action = ai_action
self.yielding_action_dict = {agent: self.env.unwrapped.world_agent_mapping[agent].action
for agent in self.env.agents}
observations, rewards, dones, infos = self.env.step(self.yielding_action_dict)
self.store["actions"].append(store_action_dict)
self.store["info"].append(infos)
self.store["rewards"].append(rewards)
self.store["done"].append(dones)
if all(dones.values()):
self._running = False
self.last_obs = observations
self.step_done = True
def on_execute(self):
self._running = self.on_init()
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.on_render()
self.on_cleanup()
return self.store
def on_execute_yielding(self):
self._running = self.on_init()
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.on_render()
if self.step_done:
self.step_done = False
yield self.store["observation"][-1], self.store["done"][-1], self.store["info"][-1], \
self.store["rewards"][-1], self.yielding_action_dict
self.on_cleanup()
def on_execute_ai_only_with_delay(self):
self._running = self.on_init()
while self._running:
sleep(0.2)
self.ai_only_event()
self.on_render()
self.on_cleanup()
return self.store
def on_render(self):
self.graphics_pipeline.on_render()
@staticmethod
def on_cleanup():
# pygame.display.quit()
pygame.quit()
def get_image_obs(self):
return self.graphics_pipeline.get_image_obs()
def save_image_obs(self, t):
self.graphics_pipeline.save_image_obs(t)
| []
| []
| [
"PYGAME_HIDE_SUPPORT_PROMPT"
]
| [] | ["PYGAME_HIDE_SUPPORT_PROMPT"] | python | 1 | 0 | |
w3c-examples/w3c-junit5/src/test/java/JUnit5W3CFirefoxTest.java | import org.junit.jupiter.api.*;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.firefox.FirefoxOptions;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
import java.net.MalformedURLException;
import java.net.URL;
public class JUnit5W3CFirefoxTest {
protected WebDriver driver;
public Boolean result;
/**
* @BeforeEach is a JUnit 5 annotation that defines specific prerequisite test method behaviors.
In the example below we:
- Define Environment Variables for Sauce Credentials ("SAUCE_USERNAME" and "SAUCE_ACCESS_KEY")
- Define Firefox Options such as W3C protocol
- Define the "sauce:options" capabilities, indicated by the "sauceOpts" MutableCapability object
- Define the WebDriver capabilities, indicated by the "caps" DesiredCapabilities object
- Define the service URL for communicating with SauceLabs.com indicated by "sauceURL" string
- Set the URL to sauceURl
- Set the driver instance to a RemoteWebDriver
- Pass "url" and "caps" as parameters of the RemoteWebDriver
For more information visit the docs: https://junit.org/junit5/docs/5.0.2/api/org/junit/jupiter/api/BeforeEach.html
*/
@BeforeEach
public void setup(TestInfo testInfo) throws MalformedURLException {
String username = System.getenv("SAUCE_USERNAME");
String accessKey = System.getenv("SAUCE_ACCESS_KEY");
String methodName = testInfo.getDisplayName();
/** FirefoxOptions allows us to set browser-specific behavior such as profile settings, headless capabilities, insecure tls certs,
and in this example--the W3C protocol
For more information see: https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/firefox/FirefoxOptions.html */
FirefoxOptions foxOpts = new FirefoxOptions();
foxOpts.setCapability("w3c", true);
/** The MutableCapabilities class came into existence with Selenium 3.6.0 and acts as the parent class for
all browser implementations--including the FirefoxOptions class extension.
Fore more information see: https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/MutableCapabilities.html */
MutableCapabilities sauceOpts = new MutableCapabilities();
sauceOpts.setCapability("name", methodName);
sauceOpts.setCapability("build", "Java-W3C-Examples");
sauceOpts.setCapability("seleniumVersion", "3.141.59");
sauceOpts.setCapability("username", username);
sauceOpts.setCapability("accessKey", accessKey);
sauceOpts.setCapability("tags", testInfo.getTags());
/** Below we see the use of our other capability objects, 'foxOpts' and 'sauceOpts',
defined in 'moz:firefoxOptions' and sauce:options respectively.
*/
DesiredCapabilities caps = new DesiredCapabilities();
caps.setCapability("moz:firefoxOptions", foxOpts);
caps.setCapability("sauce:options", sauceOpts);
caps.setCapability("browserName", "firefox");
caps.setCapability("browserVersion", "latest");
caps.setCapability("platformName", "windows 10");
/** Finally, we pass our DesiredCapabilities object 'caps' as a parameter of our RemoteWebDriver instance */
String sauceUrl = "https://ondemand.saucelabs.com:443/wd/hub";
URL url = new URL(sauceUrl);
driver = new RemoteWebDriver(url, caps);
}
/**
* @Tag is a JUnit 5 annotation that defines test method tags that aid in reporting and filtering tests.
For more information visit the docs: https://junit.org/junit5/docs/5.0.2/api/org/junit/jupiter/api/Tag.html
*/
@Tag("w3c-firefox-tests")
/**
* @DisplayName is a JUnit 5 annotation that defines test case name.
For more information visit the docs: https://junit.org/junit5/docs/5.0.2/api/org/junit/jupiter/api/DisplayName.html
*/
@DisplayName("JUnit5W3CFirefoxTest()")
/**
* @Test is a JUnit 5 annotation that defines the actual test case, along with the test execution commands.
In the example below we:
- Navigate to our SUT (site under test), 'https://www.saucedemo.com'
- Store the current page title in a String called 'getTitle'
- Assert that the page title equals "Swag Labs"
- Use and If/Else block to determine String match
For more information visit the docs: https://junit.org/junit5/docs/5.0.2/api/org/junit/jupiter/api/Test.html
*/
@Test
public void Junit5w3cFirefoxTest() throws AssertionError {
driver.navigate().to("https://www.saucedemo.com");
String getTitle = driver.getTitle();
Assertions.assertEquals(getTitle, "Swag Labs");
if (getTitle.equals("Swag Labs")) {
result = true;
}else result = false;
}
/**
* @AfterEach is a JUnit 5 annotation that defines any postrequisite test method tasks .
In the example below we:
- Use the JavascriptExecutor class to send our test results to Sauce Labs with a "passed" flag
if the test was successful, or a"failed" flag if the test was unsuccessful.
- Teardown the RemoteWebDriver session with a 'driver.quit()' command so that the test VM doesn't hang.
For more information visit the docs: https://junit.org/junit5/docs/5.0.2/api/org/junit/jupiter/api/AfterEach.html
*/
@AfterEach
public void teardown() {
((JavascriptExecutor) driver).executeScript("sauce:job-result=" + (result ? "passed" : "failed"));
driver.quit();
}
}
| [
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\""
]
| []
| [
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY"
]
| [] | ["SAUCE_USERNAME", "SAUCE_ACCESS_KEY"] | java | 2 | 0 | |
profiles/common.go | package profiles
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
"github.com/cloudspannerecosystem/harbourbridge/common/utils"
go_ora "github.com/sijms/go-ora/v2"
)
// Parses input string `s` as a map of key-value pairs. It's expected that the
// input string is of the form "key1=value1,key2=value2,..." etc. Return error
// otherwise.
func parseProfile(s string) (map[string]string, error) {
params := make(map[string]string)
if len(s) == 0 {
return params, nil
}
// We use CSV reader to parse key=value pairs separated by a comma to
// handle the case where a value may contain a comma within a quote. We
// expect exactly one record to be returned.
r := csv.NewReader(strings.NewReader(s))
r.Comma = ','
r.TrimLeadingSpace = true
records, err := r.ReadAll()
if err != nil {
return params, err
}
if len(records) > 1 {
return params, fmt.Errorf("contains invalid newline characters")
}
for _, kv := range records[0] {
s := strings.Split(strings.TrimSpace(kv), "=")
if len(s) != 2 {
return params, fmt.Errorf("invalid key=value pair (expected format: key1=value1): %v", kv)
}
if _, ok := params[s[0]]; ok {
return params, fmt.Errorf("duplicate key found: %v", s[0])
}
params[s[0]] = s[1]
}
return params, nil
}
func GetSQLConnectionStr(sourceProfile SourceProfile) string {
sqlConnectionStr := ""
if sourceProfile.Ty == SourceProfileTypeConnection {
switch sourceProfile.Conn.Ty {
case SourceProfileConnectionTypeMySQL:
connParams := sourceProfile.Conn.Mysql
return getMYSQLConnectionStr(connParams.Host, connParams.Port, connParams.User, connParams.Pwd, connParams.Db)
case SourceProfileConnectionTypePostgreSQL:
connParams := sourceProfile.Conn.Pg
return getPGSQLConnectionStr(connParams.Host, connParams.Port, connParams.User, connParams.Pwd, connParams.Db)
case SourceProfileConnectionTypeDynamoDB:
// For DynamoDB, client provided by aws-sdk reads connection credentials from env variables only.
// Thus, there is no need to create sqlConnectionStr for the same. We instead set the env variables
// programmatically if not set.
return ""
case SourceProfileConnectionTypeSqlServer:
connParams := sourceProfile.Conn.SqlServer
return getSQLSERVERConnectionStr(connParams.Host, connParams.Port, connParams.User, connParams.Pwd, connParams.Db)
case SourceProfileConnectionTypeOracle:
connParams := sourceProfile.Conn.Oracle
return getORACLEConnectionStr(connParams.Host, connParams.Port, connParams.User, connParams.Pwd, connParams.Db)
}
}
return sqlConnectionStr
}
func GeneratePGSQLConnectionStr() (string, error) {
server := os.Getenv("PGHOST")
port := os.Getenv("PGPORT")
user := os.Getenv("PGUSER")
dbname := os.Getenv("PGDATABASE")
if server == "" || port == "" || user == "" || dbname == "" {
fmt.Printf("Please specify host, port, user and database using PGHOST, PGPORT, PGUSER and PGDATABASE environment variables\n")
return "", fmt.Errorf("could not connect to source database")
}
password := os.Getenv("PGPASSWORD")
if password == "" {
password = utils.GetPassword()
}
return getPGSQLConnectionStr(server, port, user, password, dbname), nil
}
func getPGSQLConnectionStr(server, port, user, password, dbname string) string {
return fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", server, port, user, password, dbname)
}
func GenerateMYSQLConnectionStr() (string, error) {
server := os.Getenv("MYSQLHOST")
port := os.Getenv("MYSQLPORT")
user := os.Getenv("MYSQLUSER")
dbname := os.Getenv("MYSQLDATABASE")
if server == "" || port == "" || user == "" || dbname == "" {
fmt.Printf("Please specify host, port, user and database using MYSQLHOST, MYSQLPORT, MYSQLUSER and MYSQLDATABASE environment variables\n")
return "", fmt.Errorf("could not connect to source database")
}
password := os.Getenv("MYSQLPWD")
if password == "" {
password = utils.GetPassword()
}
return getMYSQLConnectionStr(server, port, user, password, dbname), nil
}
func getMYSQLConnectionStr(server, port, user, password, dbname string) string {
return fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", user, password, server, port, dbname)
}
func getSQLSERVERConnectionStr(server, port, user, password, dbname string) string {
return fmt.Sprintf(`sqlserver://%s:%s@%s:%s?database=%s`, user, password, server, port, dbname)
}
func GetSchemaSampleSize(sourceProfile SourceProfile) int64 {
schemaSampleSize := int64(100000)
if sourceProfile.Ty == SourceProfileTypeConnection {
if sourceProfile.Conn.Ty == SourceProfileConnectionTypeDynamoDB {
if sourceProfile.Conn.Dydb.SchemaSampleSize != 0 {
schemaSampleSize = sourceProfile.Conn.Dydb.SchemaSampleSize
}
}
}
return schemaSampleSize
}
func getORACLEConnectionStr(server, port, user, password, dbname string) string {
portNumber, _ := strconv.Atoi(port)
return go_ora.BuildUrl(server, portNumber, dbname, user, password, nil)
}
| [
"\"PGHOST\"",
"\"PGPORT\"",
"\"PGUSER\"",
"\"PGDATABASE\"",
"\"PGPASSWORD\"",
"\"MYSQLHOST\"",
"\"MYSQLPORT\"",
"\"MYSQLUSER\"",
"\"MYSQLDATABASE\"",
"\"MYSQLPWD\""
]
| []
| [
"MYSQLPORT",
"PGPORT",
"PGDATABASE",
"PGUSER",
"MYSQLDATABASE",
"PGHOST",
"PGPASSWORD",
"MYSQLPWD",
"MYSQLUSER",
"MYSQLHOST"
]
| [] | ["MYSQLPORT", "PGPORT", "PGDATABASE", "PGUSER", "MYSQLDATABASE", "PGHOST", "PGPASSWORD", "MYSQLPWD", "MYSQLUSER", "MYSQLHOST"] | go | 10 | 0 | |
qa/rpc-tests/test_framework/test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave eboostds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop eboostds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing eboostd/eboost-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: eboostds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("EBOOSTD", "eboostd"),
help="eboostd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("EBOOSTD", "eboostd"),
help="eboostd binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| []
| []
| [
"PYTHON_DEBUG",
"EBOOSTD",
"PATH"
]
| [] | ["PYTHON_DEBUG", "EBOOSTD", "PATH"] | python | 3 | 0 | |
patches/sitecustomize.py | import os
kaggle_proxy_token = os.getenv("KAGGLE_DATA_PROXY_TOKEN")
bq_user_jwt = os.getenv("KAGGLE_BQ_USER_JWT")
if kaggle_proxy_token or bq_user_jwt:
from google.auth import credentials
from google.cloud import bigquery
from google.cloud.bigquery._http import Connection
# TODO: Update this to the correct kaggle.gcp path once we no longer inject modules
# from the worker.
from kaggle_gcp import PublicBigqueryClient
def monkeypatch_bq(bq_client, *args, **kwargs):
data_proxy_project = os.getenv("KAGGLE_DATA_PROXY_PROJECT")
specified_project = kwargs.get('project')
specified_credentials = kwargs.get('credentials')
if specified_project is None and specified_credentials is None:
print("Using Kaggle's public dataset BigQuery integration.")
return PublicBigqueryClient(*args, **kwargs)
else:
return bq_client(*args, **kwargs)
# Monkey patches BigQuery client creation to use proxy or user-connected GCP account.
# Deprecated in favor of Kaggle.DataProxyClient().
# TODO: Remove this once uses have migrated to that new interface.
bq_client = bigquery.Client
bigquery.Client = lambda *args, **kwargs: monkeypatch_bq(
bq_client, *args, **kwargs)
| []
| []
| [
"KAGGLE_BQ_USER_JWT",
"KAGGLE_DATA_PROXY_PROJECT",
"KAGGLE_DATA_PROXY_TOKEN"
]
| [] | ["KAGGLE_BQ_USER_JWT", "KAGGLE_DATA_PROXY_PROJECT", "KAGGLE_DATA_PROXY_TOKEN"] | python | 3 | 0 | |
pkg/issuer/acme/dns/digitalocean/digitalocean.go | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package digitalocean implements a DNS provider for solving the DNS-01
// challenge using digitalocean DNS.
package digitalocean
import (
"context"
"fmt"
"os"
"strings"
"github.com/digitalocean/godo"
"golang.org/x/oauth2"
"github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util"
)
// DNSProvider is an implementation of the acme.ChallengeProvider interface
type DNSProvider struct {
dns01Nameservers []string
client *godo.Client
}
// NewDNSProvider returns a DNSProvider instance configured for digitalocean.
// The access token must be passed in the environment variable DIGITALOCEAN_TOKEN
func NewDNSProvider(dns01Nameservers []string) (*DNSProvider, error) {
token := os.Getenv("DIGITALOCEAN_TOKEN")
return NewDNSProviderCredentials(token, dns01Nameservers)
}
// NewDNSProviderCredentials uses the supplied credentials to return a
// DNSProvider instance configured for digitalocean.
func NewDNSProviderCredentials(token string, dns01Nameservers []string) (*DNSProvider, error) {
if token == "" {
return nil, fmt.Errorf("DigitalOcean token missing")
}
c := oauth2.NewClient(
context.Background(),
oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}),
)
return &DNSProvider{
dns01Nameservers: dns01Nameservers,
client: godo.NewClient(c),
}, nil
}
// Present creates a TXT record to fulfil the dns-01 challenge
func (c *DNSProvider) Present(domain, fqdn, value string) error {
// if DigitalOcean does not have this zone then we will find out later
zoneName, err := util.FindZoneByFqdn(fqdn, c.dns01Nameservers)
if err != nil {
return err
}
// check if the record has already been created
records, err := c.findTxtRecord(fqdn)
if err != nil {
return err
}
for _, record := range records {
if record.Type == "TXT" && record.Data == value {
return nil
}
}
createRequest := &godo.DomainRecordEditRequest{
Type: "TXT",
Name: fqdn,
Data: value,
TTL: 60,
}
_, _, err = c.client.Domains.CreateRecord(
context.Background(),
util.UnFqdn(zoneName),
createRequest,
)
if err != nil {
return err
}
return nil
}
// CleanUp removes the TXT record matching the specified parameters
func (c *DNSProvider) CleanUp(domain, fqdn, value string) error {
zoneName, err := util.FindZoneByFqdn(fqdn, c.dns01Nameservers)
if err != nil {
return err
}
records, err := c.findTxtRecord(fqdn)
if err != nil {
return err
}
for _, record := range records {
_, err = c.client.Domains.DeleteRecord(context.Background(), util.UnFqdn(zoneName), record.ID)
if err != nil {
return err
}
}
return nil
}
func (c *DNSProvider) findTxtRecord(fqdn string) ([]godo.DomainRecord, error) {
zoneName, err := util.FindZoneByFqdn(fqdn, c.dns01Nameservers)
if err != nil {
return nil, err
}
allRecords, _, err := c.client.Domains.Records(
context.Background(),
util.UnFqdn(zoneName),
nil,
)
var records []godo.DomainRecord
// The record Name doesn't contain the zoneName, so
// lets remove it before filtering the array of record
targetName := fqdn
if strings.HasSuffix(fqdn, zoneName) {
targetName = fqdn[:len(fqdn)-len(zoneName)]
}
for _, record := range allRecords {
if util.ToFqdn(record.Name) == targetName {
records = append(records, record)
}
}
return records, err
}
| [
"\"DIGITALOCEAN_TOKEN\""
]
| []
| [
"DIGITALOCEAN_TOKEN"
]
| [] | ["DIGITALOCEAN_TOKEN"] | go | 1 | 0 | |
pkg/codegen/internal/test/helpers.go | // Copyright 2016-2020, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/pulumi/pulumi/pkg/v3/codegen/schema"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// GenPkgSignature corresponds to the shape of the codegen GeneratePackage functions.
type GenPkgSignature func(string, *schema.Package, map[string][]byte) (map[string][]byte, error)
// GeneratePackageFilesFromSchema loads a schema and generates files using the provided GeneratePackage function.
func GeneratePackageFilesFromSchema(schemaPath string, genPackageFunc GenPkgSignature) (map[string][]byte, error) {
// Read in, decode, and import the schema.
schemaBytes, err := ioutil.ReadFile(schemaPath)
if err != nil {
return nil, err
}
var pkgSpec schema.PackageSpec
err = json.Unmarshal(schemaBytes, &pkgSpec)
if err != nil {
return nil, err
}
pkg, err := schema.ImportSpec(pkgSpec, nil)
if err != nil {
return nil, err
}
return genPackageFunc("test", pkg, nil)
}
// LoadFiles loads the provided list of files from a directory.
func LoadFiles(dir, lang string, files []string) (map[string][]byte, error) {
result := map[string][]byte{}
for _, file := range files {
fileBytes, err := ioutil.ReadFile(filepath.Join(dir, lang, file))
if err != nil {
return nil, err
}
result[file] = fileBytes
}
return result, nil
}
func loadDirectory(fs map[string][]byte, root, path string) error {
entries, err := os.ReadDir(path)
if err != nil {
return err
}
for _, e := range entries {
entryPath := filepath.Join(path, e.Name())
if e.IsDir() {
if err = loadDirectory(fs, root, entryPath); err != nil {
return err
}
} else {
contents, err := os.ReadFile(entryPath)
if err != nil {
return err
}
name := filepath.ToSlash(entryPath[len(root)+1:])
fs[name] = contents
}
}
return nil
}
// LoadBaseline loads the contents of the given baseline directory.
func LoadBaseline(dir, lang string) (map[string][]byte, error) {
dir = filepath.Join(dir, lang)
fs := map[string][]byte{}
if err := loadDirectory(fs, dir, dir); err != nil {
return nil, err
}
return fs, nil
}
// ValidateFileEquality compares maps of files for equality.
func ValidateFileEquality(t *testing.T, actual, expected map[string][]byte) {
for name, file := range expected {
assert.Contains(t, actual, name)
assert.Equal(t, string(file), string(actual[name]), name)
}
for name := range actual {
if _, ok := expected[name]; !ok {
t.Logf("missing data for %s", name)
}
}
}
// If PULUMI_ACCEPT is set, writes out actual output to the expected
// file set, so we can continue enjoying golden tests without manually
// modifying the expected output.
func RewriteFilesWhenPulumiAccept(t *testing.T, dir, lang string, actual map[string][]byte) bool {
if os.Getenv("PULUMI_ACCEPT") == "" {
return false
}
baseline := filepath.Join(dir, lang)
// Remove the baseline directory's current contents.
entries, err := os.ReadDir(baseline)
switch {
case err == nil:
for _, e := range entries {
err = os.RemoveAll(filepath.Join(baseline, e.Name()))
require.NoError(t, err)
}
case os.IsNotExist(err):
// OK
default:
require.NoError(t, err)
}
for file, bytes := range actual {
relPath := filepath.FromSlash(file)
path := filepath.Join(dir, lang, relPath)
if err = os.MkdirAll(filepath.Dir(path), 0755); err != nil && !os.IsExist(err) {
require.NoError(t, err)
}
err = ioutil.WriteFile(path, bytes, 0600)
require.NoError(t, err)
}
return true
}
// CheckAllFilesGenerated ensures that the set of expected and actual files generated
// are exactly equivalent.
func CheckAllFilesGenerated(t *testing.T, actual, expected map[string][]byte) {
seen := map[string]bool{}
for x := range expected {
seen[x] = true
}
for a := range actual {
assert.Contains(t, seen, a, "Unexpected file generated: %s", a)
if seen[a] {
delete(seen, a)
}
}
for s := range seen {
assert.Fail(t, "No content generated for expected file %s", s)
}
}
| [
"\"PULUMI_ACCEPT\""
]
| []
| [
"PULUMI_ACCEPT"
]
| [] | ["PULUMI_ACCEPT"] | go | 1 | 0 | |
src/main/java/com/twilio/Twilio.java | package com.twilio;
import com.twilio.exception.ApiException;
import com.twilio.exception.AuthenticationException;
import com.twilio.exception.CertificateValidationException;
import com.twilio.http.HttpMethod;
import com.twilio.http.NetworkHttpClient;
import com.twilio.http.Request;
import com.twilio.http.Response;
import com.twilio.http.TwilioRestClient;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Singleton class to initialize Twilio environment.
*/
public class Twilio {
public static final String VERSION = "8.1.0";
public static final String JAVA_VERSION = System.getProperty("java.version");
private static String username = System.getenv("TWILIO_ACCOUNT_SID");
private static String password = System.getenv("TWILIO_AUTH_TOKEN");
private static String accountSid; // username used if this is null
private static String region = System.getenv("TWILIO_REGION");
private static String edge = System.getenv("TWILIO_EDGE");
private static volatile TwilioRestClient restClient;
private static volatile ExecutorService executorService;
private Twilio() {
}
/*
* Ensures that the ExecutorService is shutdown when the JVM exits.
*/
static {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
if (executorService != null) {
executorService.shutdownNow();
}
}
});
}
/**
* Initialize the Twilio environment.
*
* @param username account to use
* @param password auth token for the account
*/
public static synchronized void init(final String username, final String password) {
Twilio.setUsername(username);
Twilio.setPassword(password);
}
/**
* Initialize the Twilio environment.
*
* @param username account to use
* @param password auth token for the account
* @param accountSid account sid to use
*/
public static synchronized void init(final String username, final String password, final String accountSid) {
Twilio.setUsername(username);
Twilio.setPassword(password);
Twilio.setAccountSid(accountSid);
}
/**
* Set the username.
*
* @param username account to use
* @throws AuthenticationException if username is null
*/
public static synchronized void setUsername(final String username) {
if (username == null) {
throw new AuthenticationException("Username can not be null");
}
if (!username.equals(Twilio.username)) {
Twilio.invalidate();
}
Twilio.username = username;
}
/**
* Set the auth token.
*
* @param password auth token to use
* @throws AuthenticationException if password is null
*/
public static synchronized void setPassword(final String password) {
if (password == null) {
throw new AuthenticationException("Password can not be null");
}
if (!password.equals(Twilio.password)) {
Twilio.invalidate();
}
Twilio.password = password;
}
/**
* Set the account sid.
*
* @param accountSid account sid to use
* @throws AuthenticationException if account sid is null
*/
public static synchronized void setAccountSid(final String accountSid) {
if (accountSid == null) {
throw new AuthenticationException("AccountSid can not be null");
}
if (!accountSid.equals(Twilio.accountSid)) {
Twilio.invalidate();
}
Twilio.accountSid = accountSid;
}
/**
* Set the region.
*
* @param region region to make request
*/
public static synchronized void setRegion(final String region) {
if (!Objects.equals(region, Twilio.region)) {
Twilio.invalidate();
}
Twilio.region = region;
}
/**
* Set the edge.
*
* @param edge edge to make request
*/
public static synchronized void setEdge(final String edge) {
if (!Objects.equals(edge, Twilio.edge)) {
Twilio.invalidate();
}
Twilio.edge = edge;
}
/**
* Returns (and initializes if not initialized) the Twilio Rest Client.
*
* @return the Twilio Rest Client
* @throws AuthenticationException if initialization required and either accountSid or authToken is null
*/
public static TwilioRestClient getRestClient() {
if (Twilio.restClient == null) {
synchronized (Twilio.class) {
if (Twilio.restClient == null) {
Twilio.restClient = buildRestClient();
}
}
}
return Twilio.restClient;
}
private static TwilioRestClient buildRestClient() {
if (Twilio.username == null || Twilio.password == null) {
throw new AuthenticationException(
"TwilioRestClient was used before AccountSid and AuthToken were set, please call Twilio.init()"
);
}
TwilioRestClient.Builder builder = new TwilioRestClient.Builder(Twilio.username, Twilio.password);
if (Twilio.accountSid != null) {
builder.accountSid(Twilio.accountSid);
}
builder.region(Twilio.region);
builder.edge(Twilio.edge);
return builder.build();
}
/**
* Use a custom rest client.
*
* @param restClient rest client to use
*/
public static void setRestClient(final TwilioRestClient restClient) {
synchronized (Twilio.class) {
Twilio.restClient = restClient;
}
}
/**
* Returns the Twilio executor service.
*
* @return the Twilio executor service
*/
public static ExecutorService getExecutorService() {
if (Twilio.executorService == null) {
synchronized (Twilio.class) {
if (Twilio.executorService == null) {
Twilio.executorService = Executors.newCachedThreadPool();
}
}
}
return Twilio.executorService;
}
/**
* Use a custom executor service.
*
* @param executorService executor service to use
*/
public static void setExecutorService(final ExecutorService executorService) {
synchronized (Twilio.class) {
Twilio.executorService = executorService;
}
}
/**
* Validate that we can connect to the new SSL certificate posted on api.twilio.com.
*
* @throws CertificateValidationException if the connection fails
*/
public static void validateSslCertificate() {
final NetworkHttpClient client = new NetworkHttpClient();
final Request request = new Request(HttpMethod.GET, "https://api.twilio.com:8443");
try {
final Response response = client.makeRequest(request);
if (!TwilioRestClient.SUCCESS.test(response.getStatusCode())) {
throw new CertificateValidationException(
"Unexpected response from certificate endpoint", request, response
);
}
} catch (final ApiException e) {
throw new CertificateValidationException("Could not get response from certificate endpoint", request);
}
}
/**
* Invalidates the volatile state held in the Twilio singleton.
*/
private static void invalidate() {
Twilio.restClient = null;
}
/**
* Attempts to gracefully shutdown the ExecutorService if it is present.
*/
public static synchronized void destroy() {
if (executorService != null) {
executorService.shutdown();
}
}
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\"",
"\"TWILIO_REGION\"",
"\"TWILIO_EDGE\""
]
| []
| [
"TWILIO_EDGE",
"TWILIO_REGION",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_EDGE", "TWILIO_REGION", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | java | 4 | 0 | |
mpcf.go | package main
import (
"crypto/md5"
"database/sql"
"flag"
"fmt"
_ "github.com/mattn/go-sqlite3"
"io"
"io/ioutil"
"log"
"os"
"strings"
)
var (
verp = flag.Bool("version", false, "Show version info")
facetp = flag.Bool("facets", false, "List facets")
scanp = flag.Bool("scan", false, "Check for new/modified files and sweep db for orphan records")
cleanp = flag.Bool("cleandb", false, "Just clean db (no file scan)")
tagp = flag.Bool("tag", false, "Tag [dir] with [facet]")
getp = flag.Bool("get", false, "Get filenames for tracks tagged with [facet]")
mdflag = flag.String("musicdir", "", "Set location of your mpd music directory")
musicdir = ""
seen int64
touched int64
)
func init() {
flag.Parse()
config := os.Getenv("HOME") + "/.mpcf"
if *mdflag != "" {
err := ioutil.WriteFile(config, []byte(*mdflag), 0644)
if err != nil {
log.Fatal(err)
}
musicdir = *mdflag
} else {
mdbytes, err := ioutil.ReadFile(config)
if err != nil {
log.Fatal("Please run 'mpcf -musicdir /path/to/music' to set your musicdir path.")
}
musicdir = string(mdbytes)
}
}
func main() {
db, err := sql.Open("sqlite3", musicdir+"/.mpcf.db")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// create db if needed
var tracks int
res := db.QueryRow("select count(id) from tracks")
err = res.Scan(&tracks)
if err != nil {
db.Exec("PRAGMA synchronous=0")
log.Println("Creating db")
createdb(db)
log.Println("Updating track list")
scandir("", db)
}
if *verp {
fmt.Println("This is mpcf v0.5.3")
os.Exit(0)
}
if *scanp {
db.Exec("PRAGMA synchronous=0")
scandir("", db)
cleandb(db)
os.Exit(0)
}
if *cleanp {
cleandb(db)
os.Exit(0)
}
if *tagp {
tagdir(flag.Args(), db)
os.Exit(0)
}
if *getp {
getfacettracks(flag.Args(), db)
os.Exit(0)
}
if *facetp {
lsfacets(db)
os.Exit(0)
}
var taggedtracks, tags, facets int
db.QueryRow("select count(tid) from t2f").Scan(&tags)
db.QueryRow("select count(distinct tid) from t2f").Scan(&taggedtracks)
db.QueryRow("select count(id) from facets").Scan(&facets)
fmt.Printf("%v tracks (%v tagged)\n%v tags\n%v facets\n", tracks, taggedtracks, tags, facets)
}
func lsfacets(db *sql.DB) {
rows, err := db.Query("SELECT facet FROM facets ORDER BY facet")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
var f string
for rows.Next() {
if err := rows.Scan(&f); err != nil {
log.Fatal(err)
}
fmt.Println(f)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func getfacettracks(args []string, db *sql.DB) {
if len(args) != 1 {
log.Fatal("Too many/few arguments to -get; need a facet name")
}
var fid int
db.QueryRow("select id from facets where facet = ?", args[0]).Scan(&fid)
if fid == 0 {
return
}
rows, err := db.Query("SELECT filename FROM tracks WHERE id IN (SELECT DISTINCT tid FROM t2f WHERE fid = ?)", fid)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
var name string
for rows.Next() {
if err := rows.Scan(&name); err != nil {
log.Fatal(err)
}
fmt.Println(name)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func tagdir(args []string, db *sql.DB) {
if len(args) != 2 {
log.Fatal("Too many/few arguments to -tag; need a directory and a facet")
}
// create the tag if it doesn't exist
var fid int
db.QueryRow("select id from facets where facet = ?", args[1]).Scan(&fid)
if fid == 0 {
db.Exec("insert into facets (facet) values (?)", args[1])
db.QueryRow("select id from facets where facet = ?", args[1]).Scan(&fid)
}
// now actually tag tracks under this dir
args[0] = strings.TrimRight(args[0], "/")
args[0] = strings.TrimLeft(args[0], "./")
args[0] = strings.TrimLeft(args[0], musicdir)
tagdir2(args[0], fid, db)
}
func tagdir2(dir string, fid int, db *sql.DB) {
err := os.Chdir(musicdir + "/" + dir)
if err != nil {
log.Fatalf("Can't chdir to %v", dir)
}
ls, err := ioutil.ReadDir(".")
for _, direntry := range ls {
name := dir + "/" + direntry.Name()
if direntry.IsDir() {
tagdir2(name, fid, db)
} else {
var tid, fcnt int
db.QueryRow("select id from tracks where filename = ?", name).Scan(&tid)
db.QueryRow("select count(tid) from t2f where tid = ? and fid = ?", tid, fid).Scan(&fcnt)
if fcnt > 0 {
continue
}
db.Exec("insert into t2f (tid, fid) values (?, ?)", tid, fid)
}
}
}
func createdb(db *sql.DB) {
var err error
var stmts = []string{
"create table tracks (id integer primary key, filename text unique, hash text unique)",
"create table facets (id integer primary key, facet text)",
"create table t2f (tid integer, fid integer)",
"create index fididx on t2f(fid)",
"create table config (key text, value text)",
"insert into config (key, value) values('mpdconf', '/etc/mpd.conf')",
}
for _, stmt := range stmts {
if err != nil {
break
}
_, err = db.Exec(stmt)
}
if err != nil {
log.Fatal(err)
}
}
func scandir(dir string, db *sql.DB) {
os.Chdir(musicdir + "/" + dir)
ls, err := ioutil.ReadDir(".")
if err != nil {
log.Fatal(err, dir)
}
for _, direntry := range ls {
if direntry.IsDir() {
if dir == "" {
scandir(direntry.Name(), db)
} else {
scandir(dir+"/"+direntry.Name(), db)
}
} else {
seen++
if seen%100 == 0 {
log.Printf("Processed %v tracks; updated %v\n", seen, touched)
}
name := dir + "/" + direntry.Name()
md5 := fmt.Sprintf("%x", calcMD5(direntry.Name()))
// _, err := db.Exec("INSERT OR REPLACE INTO tracks (filename, hash) VALUES(COALESCE((SELECT filename FROM tracks WHERE filename = ?),?), COALESCE((SELECT hash FROM tracks WHERE hash = ?), ?))", name, name, md5, md5)
r, err := db.Exec("INSERT OR IGNORE INTO tracks (filename, hash) VALUES(?, ?)", name, md5)
if err != nil {
log.Fatal(err)
}
touch, _ := r.RowsAffected()
touched += touch
//r, err = db.Exec("UPDATE tracks SET filename = ?, hash = ? WHERE filename = ?", name, md5, name)
//if err != nil {
// log.Fatal(err)
//}
}
}
}
func cleandb(db *sql.DB) {
log.Printf("Scanning db for orphaned records")
rows, err := db.Query("SELECT id, filename FROM tracks")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
var id int64
var name string
for rows.Next() {
if err := rows.Scan(&id, &name); err != nil {
log.Fatal(err)
}
_, err = os.Stat(musicdir + "/" + name)
if err == nil {
continue
}
// remove track entry
_, err = db.Exec("delete from tracks where id = ?", id)
if err != nil {
log.Fatal(err)
}
// remove tag links
_, err = db.Exec("delete from t2f where tid = ?", id)
if err != nil {
log.Fatal(err)
}
log.Printf("Removed orphan record for %v\n", name)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
_, err = db.Exec("vacuum")
}
func calcMD5(filename string) []byte {
file, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer file.Close()
hash := md5.New()
if _, err := io.CopyN(hash, file, 524288); err != nil && err != io.EOF {
log.Fatal(err)
}
return hash.Sum(nil)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
docs/examples/proxy/bot.go | package main
import (
"context"
"net"
"net/http"
"os"
"golang.org/x/net/proxy"
"github.com/andersfylling/disgord"
)
// In the event that the Discord connections need to be routed
// through a proxy, you can do so by using this approach. In
// this example we will be using SOCKS5, but any custom
// implementation can be used. You just configure your own http client.
//
// For REST methods the only Do method is required. So any configuration, libraries, whatever that
// implements the Do method is good enough.
//
// For websocket connection you must specify the WebsocketHttpClient config option. Currently there is a issue
// when specifying http.Client timeouts for websocket, which is why you have the option to specify both.
// When a WebsocketHttpClient is not specified, a default config is utilised.
func main() {
p, err := proxy.SOCKS5("tcp", "localhost:8080", nil, proxy.Direct)
if err != nil {
panic(err)
}
httpClient := &http.Client{
Transport: &http.Transport{
DialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) {
return p.Dial(network, addr)
},
},
}
client := disgord.New(disgord.Config{
BotToken: os.Getenv("DISCORD_TOKEN"),
HttpClient: httpClient, // REST requests with proxy support
WebsocketHttpClient: httpClient, // Websocket setup with proxy support
})
defer client.Gateway().StayConnectedUntilInterrupted()
}
| [
"\"DISCORD_TOKEN\""
]
| []
| [
"DISCORD_TOKEN"
]
| [] | ["DISCORD_TOKEN"] | go | 1 | 0 | |
src/transformers/trainer_tf.py | """Tensorflow trainer class."""
import datetime
import logging
import math
import os
import sys
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, is_wandb_available, set_seed
from .training_args_tf import TFTrainingArguments
if is_wandb_available():
import wandb
logger = logging.getLogger(__name__)
if parse(tf.__version__).release < (2, 2, 0):
logger.info(
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is {}".format(
tf.__version__
)
)
sys.exit(1)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`): The dataset to use.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / (steps * self.args.eval_batch_size)
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self._prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(self, features: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
self.eval_loss.update_state(per_example_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
logits = self.args.strategy.run(self.prediction_step, batch)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
if self.args.max_steps > 0:
t_total = self.args.max_steps
self.steps_per_epoch = self.args.max_steps
else:
approx = math.floor if self.args.dataloader_drop_last else math.ceil
self.steps_per_epoch = approx(self.num_train_examples / self.total_train_batch_size)
t_total = self.steps_per_epoch * self.args.num_train_epochs
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
iterations = self.optimizer.iterations
self.global_step = iterations.numpy()
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
if self.model.ckpt_manager.latest_checkpoint:
epochs_trained = self.global_step // (self.num_train_examples // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
self.num_train_examples // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
else:
epochs_trained = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
if self.args.fp16:
policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16")
tf.keras.mixed_precision.experimental.set_policy(policy)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs + 1)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter - 1 + (step + 1) / self.steps_per_epoch
self.distributed_training_steps(batch)
training_loss = self.train_loss.result() / ((step + 1) * self.total_train_batch_size)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.global_step > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.global_step > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.global_step > 0 and self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / self.total_train_batch_size
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(per_example_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = features[: self.args.train_batch_size / self.args.n_replicas]
reduced_labels = labels[: self.args.train_batch_size / self.args.n_replicas]
self.training_step(reduced_features, reduced_labels)
features = tf.concat(
[features[self.args.train_batch_size / self.args.n_replicas :], reduced_features], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
self.args.strategy.run(self.apply_gradients, batch)
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
| []
| []
| [
"WANDB_DISABLED",
"WANDB_PROJECT"
]
| [] | ["WANDB_DISABLED", "WANDB_PROJECT"] | python | 2 | 0 | |
app/Request.py | from fhirclient.models import (
bundle as bdl
)
class Request:
def __init__(self, method, url):
self.method = method
self.url = url
def to_fhir(self):
request = bdl.BundleEntryRequest()
method = self.method
request.method = method
url = self.url
request.url = url
return request | []
| []
| []
| [] | [] | python | null | null | null |
apps/dash-vtk-tutorials/explorer.py | from importlib import import_module
from inspect import getsource
from copy import deepcopy
import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
def prepend_recursive(component, prefix: str) -> None:
"""in-place modifications"""
if hasattr(component, "id"):
if type(component.id) == str:
component.id = prefix + component.id
elif type(component.id) == dict:
key = "type"
if key in component.id:
component.id[key] = prefix + component.id[key]
if hasattr(component, "children") and component.children is not None:
for child in component.children:
prepend_recursive(child, prefix)
def prepend_list_of_dict(ls: list, prefix: str) -> list:
new_ls = []
for di in ls:
di = deepcopy(di)
try: # is a dictionary
di_id = json.loads(di["id"])
key = "type"
if key in di_id:
di_id[key] = prefix + di_id[key]
di["id"] = json.dumps(di_id).replace(" ", "")
except ValueError: # is a string
di["id"] = prefix + di["id"]
new_ls.append(di)
return new_ls
def prepend_callback_map(di: dict, prefix: str) -> dict:
new_di = {}
for k, v in di.items():
v = deepcopy(v)
v["inputs"] = prepend_list_of_dict(v["inputs"], prefix)
v["state"] = prepend_list_of_dict(v["state"], prefix)
new_di[prefix + k] = v
return new_di
def prepend_callback_list(ls: list, prefix: str) -> list:
new_ls = []
for di in ls:
di = deepcopy(di)
if type(di["output"]) == list:
di["output"] = prepend_list_of_dict(di["output"], prefix)
else:
di["output"] = prefix + di["output"]
di["inputs"] = prepend_list_of_dict(di["inputs"], prefix)
di["state"] = prepend_list_of_dict(di["state"], prefix)
new_ls.append(di)
return new_ls
def name_to_label(x):
return (
x.replace("_", " ")
.replace("t0", "Tutorial #")
.replace("s0", "Webinar Demo #")
.title()
.replace("Vtk", "VTK")
)
def Header(name, app):
title = html.H2(name, style={"display": "inline-flex"})
logo = html.Img(
src=app.get_asset_url("dash-logo.png"),
style={
"height": 60,
"display": "inline-flex",
"margin-top": "-10px",
"margin-right": "5px",
},
)
link = html.A(logo, href="https://plotly.com/dash/", target="_blank")
return html.Div([link, title])
def display_demo(name, layout, code):
download_btn = html.A(
html.Button(
"Download",
style={
"width": "90px",
"margin": "auto",
"padding": "0px",
"font-size": "10px",
"height": "35px",
"border-radius": "2px",
},
),
href=app.get_asset_url(name + ".py"),
download="app.py",
style={"position": "absolute", "top": "1.5em", "right": "1.5em"},
)
return html.Div(
[
html.Div(
[download_btn, dcc.Markdown(f"```\n{code}\n```"),],
style={
"float": "left",
"width": "49%",
"height": "85vh",
"overflow-y": "auto",
"position": "relative",
"background-color": "#F7FAFC",
"border": "1px solid #A1ACC3",
"border-right": "none",
},
),
html.Div(
layout,
style={
"float": "left",
"width": "48%",
"padding": "5px 1% 5px 1%",
"height": "calc(85vh - 10px)",
"overflow-y": "auto",
"border": "1px solid #A1ACC3",
},
),
]
)
prefix_ignored = []
ignored_pages = ["data", "requirements.txt"]
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.COSMO],
)
server = app.server
app_subdomain = os.getenv("APP_SUBDOMAIN", "dash-vtk-tutorials")
pages = [
p.replace(".py", "")
for p in sorted(os.listdir("demos"))
if p not in ignored_pages and p.endswith(".py")
]
print(pages)
modules = {p: import_module(f"demos.{p}") for p in pages}
apps = {p: m.app for p, m in modules.items()}
source_codes = {p: getsource(m) for p, m in modules.items()}
notfound_404 = html.Div(
[
html.H1("404"),
"Webpage not found. Please contact us if a page is supposed to be here.",
]
)
app.layout = dbc.Container(
children=[
dbc.Row(
style={"height": "10%", "align-items": "center"},
children=[
dbc.Col([Header("VTK Tutorials", app),], width=8,),
dbc.Col(
dbc.Spinner(
dbc.Select(
id="app-choice",
placeholder="Please select an app...",
style={"width": "100%"},
options=[
{"label": name_to_label(x), "value": x} for x in pages
],
),
),
width=4,
),
],
),
html.Div(id="display", style={"height": "90%"}),
dcc.Location(id="url", refresh=True),
],
style={"height": "calc(100vh - 15px)"},
fluid=True,
)
for k in apps:
new_callback_map = apps[k].callback_map
new_callback_list = apps[k]._callback_list
# Prepend to layout IDs recursively in-place
# if k in prefix_ignored:
# new_callback_map = apps[k].callback_map
# new_callback_list = apps[k]._callback_list
# else:
# prepend_recursive(apps[k].layout, prefix=k + "-")
# new_callback_map = prepend_callback_map(apps[k].callback_map, prefix=k + "-")
# new_callback_list = prepend_callback_list(apps[k]._callback_list, prefix=k + "-")
app.callback_map.update(new_callback_map)
app._callback_list.extend(new_callback_list)
@app.callback(
[Output("url", "pathname"), Output("url", "refresh")], Input("app-choice", "value")
)
def update_url(name):
if name is None:
return dash.no_update, dash.no_update
return f"/{app_subdomain}/{name}", True
@app.callback(
[Output("display", "children"), Output("app-choice", "options")],
[Input("url", "pathname")],
)
def display_content(pathname):
if app_subdomain in pathname:
name = pathname.split("/")[-1]
if name == "":
return html.P("Please select an app from the dropdown"), dash.no_update
elif name in pages:
# return display_demo(
# name=name, layout=apps[name].layout, code=source_codes[name]
# )
return apps[name].layout.children, dash.no_update
else:
return notfound_404, dash.no_update
return dash.no_update, dash.no_update
if __name__ == "__main__":
app.run_server(debug=True)
| []
| []
| [
"APP_SUBDOMAIN"
]
| [] | ["APP_SUBDOMAIN"] | python | 1 | 0 | |
Fire_detect/HostFrame.py | import sys
import numpy as np
import cv2
import math
from PyQt5 import Qt
from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPixmap,QFont,QIcon
from PyQt5.QtWidgets import (QApplication,QDialog, QFileDialog, QGridLayout,
QLabel, QPushButton,QHBoxLayout,QFrame,QWidget,QLineEdit)
import time
import os
import FireDetect #加载FireDetect火焰检测模块
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class MainWindow(QWidget):
def __init__(self):
# 初始化一个img, 用于存储图像
self.img = None
self.timer_camera = QtCore.QTimer()
self.cap = cv2.VideoCapture()
self.CAM_NUM = 0
self.detect_flag = 0
super().__init__()
self.initUI()
def initUI(self):
self.setWindowIcon(QIcon('opencv.png'))
self.resize(800, 600)#主窗体大小
#-----------标题设置------------#
self.title = QLabel('这里改成自己的标题', self)
self.title.move(10, 20)
self.title.setFont(QFont("楷体",36))
#-----------主预览窗口----------#
self.label = QLabel(self)
self.label.resize(640,480)
self.label.move(10,105)
self.label.setFrameStyle(QFrame.Box | QFrame.Plain)
self.label.setScaledContents(True)
#-----------按键相关设置----------#
self.btnOpenCam = QPushButton('摄像头检测', self)
self.btnOpenCam.setGeometry(QtCore.QRect(660,105,130,45))
self.btnOpenCam.setFont(QFont("微软雅黑",13))
self.btnOpenVideo = QPushButton('视频检测', self)
self.btnOpenVideo.setGeometry(QtCore.QRect(660,105+60,130,45))
self.btnOpenVideo.setFont(QFont("微软雅黑",13))
self.btnClose = QPushButton('关闭检测', self)
self.btnClose.setGeometry(QtCore.QRect(660,105+120,130,45))
self.btnClose.setFont(QFont("微软雅黑",13))
self.btnImgTest = QPushButton('图片检测', self)
self.btnImgTest.setGeometry(QtCore.QRect(660,105+180,130,45))
self.btnImgTest.setFont(QFont("微软雅黑",13))
self.btnQuit = QPushButton('退出系统', self)
self.btnQuit.setGeometry(QtCore.QRect(660,105+240,130,45))
self.btnQuit.setFont(QFont("微软雅黑",13))
self.btnClose.setEnabled(False)
# 信号与槽连接, PyQt5与Qt5相同, 信号可绑定普通成员函数
self.btnOpenCam.clicked.connect(self.OpenCam)
self.btnOpenVideo.clicked.connect(self.OpenVideo)
self.btnImgTest.clicked.connect(self.ImgTest)
self.btnClose.clicked.connect(self.CloseTest)
self.btnQuit.clicked.connect(self.QuitApp)
#self.timer_camera.timeout.connect(self.show_camera)
self.video_flag = 1
self.flag = 0
def QuitApp(self):
self.close()
def show_camera(self):
flag, self.img = self.cap.read()
if self.img is not None:
if self.detect_flag == 0: #只显示视频画面
self.showViewImg(self.label,self.img)
elif self.detect_flag == 1: #手势识别
self.GestureRecognition(self.img)
elif self.detect_flag == 2: #手势跟踪
self.GestureTrack(self.img)
# 摄像头火焰检测
def OpenCam(self):
print("摄像头火焰检测")
self.flag = 0
self.video_flag = 1
self.ccd=FireDetect.Work(self)#调用FireDetect.work类
self.ccd.setDaemon(True)
self.ccd.start()#启动线程
self.video_path = ''
self.btnOpenCam.setEnabled(False)
self.btnOpenVideo.setEnabled(False)
self.btnImgTest.setEnabled(False)
self.btnClose.setEnabled(True)
self.btnQuit.setEnabled(False)
# 视频文件火焰检测
def OpenVideo(self):
print("视频文件火焰检测")
self.flag = 0
self.video_flag = 2
#self.video_path = "./1.avi"
path,_ =QFileDialog.getOpenFileName(self,'OpenFile',
"./","Video files (*.mp4 *.avi)")
if path == "":
return
self.video_path = path
#开启火焰检测线程
self.ccd=FireDetect.Work(self)
self.ccd.setDaemon(True)
self.ccd.start()
self.btnOpenCam.setEnabled(False)
self.btnOpenVideo.setEnabled(False)
self.btnImgTest.setEnabled(False)
self.btnClose.setEnabled(True)
self.btnQuit.setEnabled(False)
def ImgTest(self):
path,_ =QFileDialog.getOpenFileName(self,'OpenFile',"./",
"Image files (*.jpg *.bmp *.png)")
img = cv2.imread(path)
if img is None:
print("Open Image Failed!")
return
WorkClass=FireDetect.Work(self)
WorkClass.img_detect(img)
def CloseTest(self):
self.flag = 1
self.btnOpenCam.setEnabled(True)
self.btnOpenVideo.setEnabled(True)
self.btnImgTest.setEnabled(True)
self.btnClose.setEnabled(False)
self.btnQuit.setEnabled(True)
def closeSlot(self):
#self.number.setText("")
self.label.clear()
#self.label2.clear()
self.detect_flag = 0
self.timer_camera.stop()
self.cap.release()
#self.btnOpen.setEnabled(True)
self.btnClose.setEnabled(False)
self.btnQuit.setEnabled(True)
if __name__ == '__main__':
a = QApplication(sys.argv)
w = MainWindow()
w.show()
w.setWindowTitle('OpenCV火焰检测系统')
sys.exit(a.exec_())
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
vendor/github.com/getlantern/golog/golog.go | // Package golog implements logging functions that log errors to stderr and
// debug messages to stdout. Trace logging is also supported.
// Trace logs go to stdout as well, but they are only written if the program
// is run with environment variable "TRACE=true".
// A stack dump will be printed after the message if "PRINT_STACK=true".
package golog
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/getlantern/errors"
"github.com/getlantern/hidden"
"github.com/getlantern/ops"
"github.com/oxtoacart/bpool"
)
const (
// ERROR is an error Severity
ERROR = 500
// FATAL is an error Severity
FATAL = 600
)
var (
outs atomic.Value
prepender atomic.Value
reporters []ErrorReporter
reportersMutex sync.RWMutex
bufferPool = bpool.NewBufferPool(200)
onFatal atomic.Value
)
// Severity is a level of error (higher values are more severe)
type Severity int
func (s Severity) String() string {
switch s {
case ERROR:
return "ERROR"
case FATAL:
return "FATAL"
default:
return "UNKNOWN"
}
}
func init() {
DefaultOnFatal()
ResetOutputs()
ResetPrepender()
}
// SetPrepender sets a function to write something, e.g., the timestamp, before
// each line of the log.
func SetPrepender(p func(io.Writer)) {
prepender.Store(p)
}
func ResetPrepender() {
SetPrepender(func(io.Writer) {})
}
func GetPrepender() func(io.Writer) {
return prepender.Load().(func(io.Writer))
}
// SetOutputs sets the outputs for error and debug logs to use the given writers.
// Returns a function that resets outputs to their original values prior to calling SetOutputs.
func SetOutputs(errorOut io.Writer, debugOut io.Writer) (reset func()) {
oldOuts := outs.Load()
outs.Store(&outputs{
ErrorOut: errorOut,
DebugOut: debugOut,
})
return func() {
outs.Store(oldOuts)
}
}
// Deprecated: instead of calling ResetOutputs, use the reset function returned by SetOutputs.
func ResetOutputs() {
SetOutputs(os.Stderr, os.Stdout)
}
func GetOutputs() *outputs {
return outs.Load().(*outputs)
}
// RegisterReporter registers the given ErrorReporter. All logged Errors are
// sent to this reporter.
func RegisterReporter(reporter ErrorReporter) {
reportersMutex.Lock()
reporters = append(reporters, reporter)
reportersMutex.Unlock()
}
// OnFatal configures golog to call the given function on any FATAL error. By
// default, golog calls os.Exit(1) on any FATAL error.
func OnFatal(fn func(err error)) {
onFatal.Store(fn)
}
// DefaultOnFatal enables the default behavior for OnFatal
func DefaultOnFatal() {
onFatal.Store(func(err error) {
os.Exit(1)
})
}
type outputs struct {
ErrorOut io.Writer
DebugOut io.Writer
}
// MultiLine is an interface for arguments that support multi-line output.
type MultiLine interface {
// MultiLinePrinter returns a function that can be used to print the
// multi-line output. The returned function writes one line to the buffer and
// returns true if there are more lines to write. This function does not need
// to take care of trailing carriage returns, golog handles that
// automatically.
MultiLinePrinter() func(buf *bytes.Buffer) bool
}
// ErrorReporter is a function to which the logger will report errors.
// It the given error and corresponding message along with associated ops
// context. This should return quickly as it executes on the critical code
// path. The recommended approach is to buffer as much as possible and discard
// new reports if the buffer becomes saturated.
type ErrorReporter func(err error, severity Severity, ctx map[string]interface{})
type Logger interface {
// Debug logs to stdout
Debug(arg interface{})
// Debugf logs to stdout
Debugf(message string, args ...interface{})
// Error logs to stderr
Error(arg interface{}) error
// Errorf logs to stderr. It returns the first argument that's an error, or
// a new error built using fmt.Errorf if none of the arguments are errors.
Errorf(message string, args ...interface{}) error
// Fatal logs to stderr and then exits with status 1
Fatal(arg interface{})
// Fatalf logs to stderr and then exits with status 1
Fatalf(message string, args ...interface{})
// Trace logs to stderr only if TRACE=true
Trace(arg interface{})
// Tracef logs to stderr only if TRACE=true
Tracef(message string, args ...interface{})
// TraceOut provides access to an io.Writer to which trace information can
// be streamed. If running with environment variable "TRACE=true", TraceOut
// will point to os.Stderr, otherwise it will point to a ioutil.Discared.
// Each line of trace information will be prefixed with this Logger's
// prefix.
TraceOut() io.Writer
// IsTraceEnabled() indicates whether or not tracing is enabled for this
// logger.
IsTraceEnabled() bool
// AsStdLogger returns an standard logger
AsStdLogger() *log.Logger
}
func LoggerFor(prefix string) Logger {
l := &logger{
prefix: prefix + ": ",
pc: make([]uintptr, 10),
}
trace := os.Getenv("TRACE")
l.traceOn, _ = strconv.ParseBool(trace)
if !l.traceOn {
prefixes := strings.Split(trace, ",")
for _, p := range prefixes {
if prefix == strings.Trim(p, " ") {
l.traceOn = true
break
}
}
}
if l.traceOn {
l.traceOut = l.newTraceWriter()
} else {
l.traceOut = ioutil.Discard
}
printStack := os.Getenv("PRINT_STACK")
l.printStack, _ = strconv.ParseBool(printStack)
return l
}
type logger struct {
prefix string
traceOn bool
traceOut io.Writer
printStack bool
outs atomic.Value
pc []uintptr
funcForPc *runtime.Func
}
// attaches the file and line number corresponding to
// the log message
func (l *logger) linePrefix(skipFrames int) string {
runtime.Callers(skipFrames, l.pc)
funcForPc := runtime.FuncForPC(l.pc[0])
file, line := funcForPc.FileLine(l.pc[0] - 1)
return fmt.Sprintf("%s%s:%d ", l.prefix, filepath.Base(file), line)
}
func (l *logger) print(out io.Writer, skipFrames int, severity string, arg interface{}) {
buf := bufferPool.Get()
defer bufferPool.Put(buf)
GetPrepender()(buf)
linePrefix := l.linePrefix(skipFrames)
writeHeader := func() {
buf.WriteString(severity)
buf.WriteString(" ")
buf.WriteString(linePrefix)
}
if arg != nil {
ml, isMultiline := arg.(MultiLine)
if !isMultiline {
writeHeader()
fmt.Fprintf(buf, "%v", arg)
printContext(buf, arg)
buf.WriteByte('\n')
} else {
mlp := ml.MultiLinePrinter()
first := true
for {
writeHeader()
more := mlp(buf)
if first {
printContext(buf, arg)
first = false
}
buf.WriteByte('\n')
if !more {
break
}
}
}
}
b := []byte(hidden.Clean(buf.String()))
_, err := out.Write(b)
if err != nil {
errorOnLogging(err)
}
if l.printStack {
l.doPrintStack()
}
}
func (l *logger) printf(out io.Writer, skipFrames int, severity string, err error, message string, args ...interface{}) {
buf := bufferPool.Get()
defer bufferPool.Put(buf)
GetPrepender()(buf)
linePrefix := l.linePrefix(skipFrames)
buf.WriteString(severity)
buf.WriteString(" ")
buf.WriteString(linePrefix)
fmt.Fprintf(buf, message, args...)
printContext(buf, err)
buf.WriteByte('\n')
b := []byte(hidden.Clean(buf.String()))
_, err2 := out.Write(b)
if err2 != nil {
errorOnLogging(err2)
}
if l.printStack {
l.doPrintStack()
}
}
func (l *logger) Debug(arg interface{}) {
l.print(GetOutputs().DebugOut, 4, "DEBUG", arg)
}
func (l *logger) Debugf(message string, args ...interface{}) {
l.printf(GetOutputs().DebugOut, 4, "DEBUG", nil, message, args...)
}
func (l *logger) Error(arg interface{}) error {
return l.errorSkipFrames(arg, 1, ERROR)
}
func (l *logger) Errorf(message string, args ...interface{}) error {
return l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, ERROR)
}
func (l *logger) Fatal(arg interface{}) {
fatal(l.errorSkipFrames(arg, 1, FATAL))
}
func (l *logger) Fatalf(message string, args ...interface{}) {
fatal(l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, FATAL))
}
func fatal(err error) {
fn := onFatal.Load().(func(err error))
fn(err)
}
func (l *logger) errorSkipFrames(arg interface{}, skipFrames int, severity Severity) error {
var err error
switch e := arg.(type) {
case error:
err = e
default:
err = fmt.Errorf("%v", e)
}
l.print(GetOutputs().ErrorOut, skipFrames+4, severity.String(), err)
return report(err, severity)
}
func (l *logger) Trace(arg interface{}) {
if l.traceOn {
l.print(GetOutputs().DebugOut, 4, "TRACE", arg)
}
}
func (l *logger) Tracef(message string, args ...interface{}) {
if l.traceOn {
l.printf(GetOutputs().DebugOut, 4, "TRACE", nil, message, args...)
}
}
func (l *logger) TraceOut() io.Writer {
return l.traceOut
}
func (l *logger) IsTraceEnabled() bool {
return l.traceOn
}
func (l *logger) newTraceWriter() io.Writer {
pr, pw := io.Pipe()
br := bufio.NewReader(pr)
if !l.traceOn {
return pw
}
go func() {
defer func() {
if err := pr.Close(); err != nil {
errorOnLogging(err)
}
}()
defer func() {
if err := pw.Close(); err != nil {
errorOnLogging(err)
}
}()
for {
line, err := br.ReadString('\n')
if err == nil {
// Log the line (minus the trailing newline)
l.print(GetOutputs().DebugOut, 6, "TRACE", line[:len(line)-1])
} else {
l.printf(GetOutputs().DebugOut, 6, "TRACE", nil, "TraceWriter closed due to unexpected error: %v", err)
return
}
}
}()
return pw
}
type errorWriter struct {
l *logger
}
// Write implements method of io.Writer, due to different call depth,
// it will not log correct file and line prefix
func (w *errorWriter) Write(p []byte) (n int, err error) {
s := string(p)
if s[len(s)-1] == '\n' {
s = s[:len(s)-1]
}
w.l.print(GetOutputs().ErrorOut, 6, "ERROR", s)
return len(p), nil
}
func (l *logger) AsStdLogger() *log.Logger {
return log.New(&errorWriter{l}, "", 0)
}
func (l *logger) doPrintStack() {
var b []byte
buf := bytes.NewBuffer(b)
for _, pc := range l.pc {
funcForPc := runtime.FuncForPC(pc)
if funcForPc == nil {
break
}
name := funcForPc.Name()
if strings.HasPrefix(name, "runtime.") {
break
}
file, line := funcForPc.FileLine(pc)
fmt.Fprintf(buf, "\t%s\t%s: %d\n", name, file, line)
}
if _, err := buf.WriteTo(os.Stderr); err != nil {
errorOnLogging(err)
}
}
func errorOnLogging(err error) {
fmt.Fprintf(os.Stderr, "Unable to log: %v\n", err)
}
func printContext(buf *bytes.Buffer, err interface{}) {
// Note - we don't include globals when printing in order to avoid polluting the text log
values := ops.AsMap(err, false)
if len(values) == 0 {
return
}
buf.WriteString(" [")
var keys []string
for key := range values {
keys = append(keys, key)
}
sort.Strings(keys)
for i, key := range keys {
value := values[key]
if i > 0 {
buf.WriteString(" ")
}
buf.WriteString(key)
buf.WriteString("=")
fmt.Fprintf(buf, "%v", value)
}
buf.WriteByte(']')
}
func report(err error, severity Severity) error {
var reportersCopy []ErrorReporter
reportersMutex.RLock()
if len(reporters) > 0 {
reportersCopy = make([]ErrorReporter, len(reporters))
copy(reportersCopy, reporters)
}
reportersMutex.RUnlock()
if len(reportersCopy) > 0 {
ctx := ops.AsMap(err, true)
ctx["severity"] = severity.String()
for _, reporter := range reportersCopy {
// We include globals when reporting
reporter(err, severity, ctx)
}
}
return err
}
| [
"\"TRACE\"",
"\"PRINT_STACK\""
]
| []
| [
"TRACE",
"PRINT_STACK"
]
| [] | ["TRACE", "PRINT_STACK"] | go | 2 | 0 | |
pkg/authn/backends/ldap/authenticator.go | // Copyright 2022 Paul Greenberg [email protected]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ldap
import (
"crypto/tls"
"crypto/x509"
"fmt"
ldap "github.com/go-ldap/ldap/v3"
"github.com/greenpau/go-authcrunch/pkg/errors"
"github.com/greenpau/go-authcrunch/pkg/requests"
"go.uber.org/zap"
"io/ioutil"
"net"
"net/url"
"os"
"strings"
"sync"
"time"
)
// Authenticator represents database connector.
type Authenticator struct {
mux sync.Mutex
realm string
servers []*AuthServer
username string
password string
searchBaseDN string
searchUserFilter string
searchGroupFilter string
userAttributes UserAttributes
rootCAs *x509.CertPool
groups []*UserGroup
logger *zap.Logger
}
// NewAuthenticator returns an instance of Authenticator.
func NewAuthenticator() *Authenticator {
return &Authenticator{
servers: []*AuthServer{},
groups: []*UserGroup{},
}
}
// ConfigureRealm configures a domain name (realm) associated with
// the instance of authenticator.
func (sa *Authenticator) ConfigureRealm(cfg *Config) error {
sa.mux.Lock()
defer sa.mux.Unlock()
if cfg.Realm == "" {
return fmt.Errorf("no realm found")
}
sa.realm = cfg.Realm
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "realm"),
zap.String("realm", cfg.Realm),
)
return nil
}
// ConfigureServers configures the addresses of LDAP servers.
func (sa *Authenticator) ConfigureServers(cfg *Config) error {
sa.mux.Lock()
defer sa.mux.Unlock()
if len(cfg.Servers) == 0 {
return fmt.Errorf("no authentication servers found")
}
for _, entry := range cfg.Servers {
if !strings.HasPrefix(entry.Address, "ldaps://") && !strings.HasPrefix(entry.Address, "ldap://") {
return fmt.Errorf("the server address does not have neither ldaps:// nor ldap:// prefix, address: %s", entry.Address)
}
if entry.Timeout == 0 {
entry.Timeout = 5
}
if entry.Timeout > 10 {
return fmt.Errorf("invalid timeout value: %d, cannot exceed 10 seconds", entry.Timeout)
}
server := &AuthServer{
Address: entry.Address,
IgnoreCertErrors: entry.IgnoreCertErrors,
Timeout: entry.Timeout,
PosixGroups: entry.PosixGroups,
}
url, err := url.Parse(entry.Address)
if err != nil {
return fmt.Errorf("failed parsing LDAP server address: %s, %s", entry.Address, err)
}
server.URL = url
switch {
case strings.HasPrefix(entry.Address, "ldaps://"):
server.Port = "636"
server.Encrypted = true
case strings.HasPrefix(entry.Address, "ldap://"):
server.Port = "389"
}
if server.URL.Port() != "" {
server.Port = server.URL.Port()
}
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "servers"),
zap.String("address", server.Address),
zap.String("url", server.URL.String()),
zap.String("port", server.Port),
zap.Bool("ignore_cert_errors", server.IgnoreCertErrors),
zap.Bool("posix_groups", server.PosixGroups),
zap.Int("timeout", server.Timeout),
)
sa.servers = append(sa.servers, server)
}
return nil
}
// ConfigureBindCredentials configures user credentials for LDAP binding.
func (sa *Authenticator) ConfigureBindCredentials(cfg *Config) error {
username := cfg.BindUsername
password := cfg.BindPassword
sa.mux.Lock()
defer sa.mux.Unlock()
if username == "" {
return fmt.Errorf("no username found")
}
if password == "" {
password = os.Getenv("LDAP_USER_SECRET")
if password == "" {
return fmt.Errorf("no password found")
}
}
if strings.HasPrefix(password, "file:") {
secretFile := strings.TrimPrefix(password, "file:")
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "bind_credentials"),
zap.String("password_file", secretFile),
)
fileContent, err := ioutil.ReadFile(secretFile)
if err != nil {
return fmt.Errorf("failed reading password file: %s, %s", secretFile, err)
}
password = strings.TrimSpace(string(fileContent))
if password == "" {
return fmt.Errorf("no password found in file: %s", secretFile)
}
}
sa.username = username
sa.password = password
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "bind_credentials"),
zap.String("username", sa.username),
)
return nil
}
// ConfigureSearch configures base DN, search filter, attributes for LDAP queries.
func (sa *Authenticator) ConfigureSearch(cfg *Config) error {
attr := cfg.Attributes
searchBaseDN := cfg.SearchBaseDN
searchUserFilter := cfg.SearchUserFilter
searchGroupFilter := cfg.SearchGroupFilter
sa.mux.Lock()
defer sa.mux.Unlock()
if searchBaseDN == "" {
return fmt.Errorf("no search_base_dn found")
}
if searchUserFilter == "" {
searchUserFilter = "(&(|(sAMAccountName=%s)(mail=%s))(objectclass=user))"
}
if searchGroupFilter == "" {
searchGroupFilter = "(&(uniqueMember=%s)(objectClass=groupOfUniqueNames))"
}
if attr.Name == "" {
attr.Name = "givenName"
}
if attr.Surname == "" {
attr.Surname = "sn"
}
if attr.Username == "" {
attr.Username = "sAMAccountName"
}
if attr.MemberOf == "" {
attr.MemberOf = "memberOf"
}
if attr.Email == "" {
attr.Email = "mail"
}
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "search"),
zap.String("search_base_dn", searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
zap.String("search_group_filter", searchGroupFilter),
zap.String("attr.name", attr.Name),
zap.String("attr.surname", attr.Surname),
zap.String("attr.username", attr.Username),
zap.String("attr.member_of", attr.MemberOf),
zap.String("attr.email", attr.Email),
)
sa.searchBaseDN = searchBaseDN
sa.searchUserFilter = searchUserFilter
sa.searchGroupFilter = searchGroupFilter
sa.userAttributes = attr
return nil
}
// ConfigureUserGroups configures user group bindings for LDAP searching.
func (sa *Authenticator) ConfigureUserGroups(cfg *Config) error {
groups := cfg.Groups
if len(groups) == 0 {
return fmt.Errorf("no groups found")
}
for i, group := range groups {
if group.GroupDN == "" {
return fmt.Errorf("Base DN for group %d is empty", i)
}
if len(group.Roles) == 0 {
return fmt.Errorf("Role assignments for group %d is empty", i)
}
for j, role := range group.Roles {
if role == "" {
return fmt.Errorf("Role assignment %d for group %d is empty", j, i)
}
}
saGroup := &UserGroup{
GroupDN: group.GroupDN,
Roles: group.Roles,
}
sa.logger.Info(
"LDAP plugin configuration",
zap.String("phase", "user_groups"),
zap.String("roles", strings.Join(saGroup.Roles, ", ")),
zap.String("dn", saGroup.GroupDN),
)
sa.groups = append(sa.groups, saGroup)
}
return nil
}
// IdentifyUser returns user challenges.
func (sa *Authenticator) IdentifyUser(r *requests.Request) error {
sa.mux.Lock()
defer sa.mux.Unlock()
for _, server := range sa.servers {
conn, err := sa.dial(server)
if err != nil {
continue
}
defer conn.Close()
if err := sa.findUser(conn, server, r); err != nil {
if err.Error() == errors.ErrBackendLdapAuthFailed.WithArgs("user not found").Error() {
r.User.Username = "nobody"
r.User.Email = "nobody@localhost"
r.User.Challenges = []string{"password"}
return nil
}
r.Response.Code = 401
return err
}
return nil
}
r.Response.Code = 500
return errors.ErrBackendLdapAuthFailed.WithArgs("LDAP servers are unavailable")
}
// AuthenticateUser checks the database for the presence of a username/email
// and password and returns user claims.
func (sa *Authenticator) AuthenticateUser(r *requests.Request) error {
sa.mux.Lock()
defer sa.mux.Unlock()
for _, server := range sa.servers {
ldapConnection, err := sa.dial(server)
if err != nil {
continue
}
defer ldapConnection.Close()
searchUserFilter := strings.ReplaceAll(sa.searchUserFilter, "%s", r.User.Username)
req := ldap.NewSearchRequest(
// group.GroupDN,
sa.searchBaseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0,
server.Timeout,
false,
searchUserFilter,
[]string{
sa.userAttributes.Email,
},
nil, // Controls
)
if req == nil {
sa.logger.Error(
"LDAP request building failed, request is nil",
zap.String("server", server.Address),
zap.String("search_base_dn", sa.searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
)
continue
}
resp, err := ldapConnection.Search(req)
if err != nil {
sa.logger.Error(
"LDAP search failed",
zap.String("server", server.Address),
zap.String("search_base_dn", sa.searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
zap.String("error", err.Error()),
)
continue
}
switch len(resp.Entries) {
case 1:
case 0:
return errors.ErrBackendLdapAuthFailed.WithArgs("user not found")
default:
return errors.ErrBackendLdapAuthFailed.WithArgs("multiple users matched")
}
user := resp.Entries[0]
// Use the provided password to make an LDAP connection.
if err := ldapConnection.Bind(user.DN, r.User.Password); err != nil {
sa.logger.Error(
"LDAP auth binding failed",
zap.String("server", server.Address),
zap.String("dn", user.DN),
zap.String("username", r.User.Username),
zap.String("error", err.Error()),
)
return errors.ErrBackendLdapAuthFailed.WithArgs(err)
}
sa.logger.Debug(
"LDAP auth succeeded",
zap.String("server", server.Address),
zap.String("dn", user.DN),
zap.String("username", r.User.Username),
)
return nil
}
return errors.ErrBackendLdapAuthFailed.WithArgs("LDAP servers are unavailable")
}
// ConfigureTrustedAuthorities configured trusted certificate authorities, if any.
func (sa *Authenticator) ConfigureTrustedAuthorities(cfg *Config) error {
authorities := cfg.TrustedAuthorities
if len(authorities) == 0 {
return nil
}
for _, authority := range authorities {
pemCerts, err := ioutil.ReadFile(authority)
if err != nil {
return fmt.Errorf("failed reading trusted authority file: %s, %s", authority, err)
}
if sa.rootCAs == nil {
sa.rootCAs = x509.NewCertPool()
}
if ok := sa.rootCAs.AppendCertsFromPEM(pemCerts); !ok {
return fmt.Errorf("failed added trusted authority file contents to Root CA pool: %s", authority)
}
sa.logger.Debug(
"added trusted authority",
zap.String("pem_file", authority),
)
}
return nil
}
func (sa *Authenticator) searchGroups(conn *ldap.Conn, reqData map[string]interface{}, roles map[string]bool) error {
if roles == nil {
roles = make(map[string]bool)
}
req := ldap.NewSearchRequest(reqData["base_dn"].(string), ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0,
reqData["timeout"].(int), false, reqData["search_group_filter"].(string), []string{"dn"}, nil,
)
if req == nil {
return fmt.Errorf("failed building group search LDAP request")
}
resp, err := conn.Search(req)
if err != nil {
return err
}
if len(resp.Entries) < 1 {
return fmt.Errorf("no groups found for %s", reqData["user_dn"].(string))
}
for _, entry := range resp.Entries {
for _, g := range sa.groups {
if g.GroupDN != entry.DN {
continue
}
for _, role := range g.Roles {
if role == "" {
continue
}
roles[role] = true
}
}
}
return nil
}
func (sa *Authenticator) dial(server *AuthServer) (*ldap.Conn, error) {
var ldapDialer net.Conn
var err error
timeout := time.Duration(server.Timeout) * time.Second
if server.Encrypted {
// Handle LDAPS servers.
tlsConfig := &tls.Config{
InsecureSkipVerify: server.IgnoreCertErrors,
}
if sa.rootCAs != nil {
tlsConfig.RootCAs = sa.rootCAs
}
ldapDialer, err = tls.DialWithDialer(
&net.Dialer{
Timeout: timeout,
},
"tcp",
net.JoinHostPort(server.URL.Hostname(), server.Port),
tlsConfig,
)
if err != nil {
sa.logger.Error(
"LDAP TLS dialer failed",
zap.String("server", server.Address),
zap.Error(err),
)
return nil, err
}
sa.logger.Debug(
"LDAP TLS dialer setup succeeded",
zap.String("server", server.Address),
)
} else {
// Handle LDAP servers.
ldapDialer, err = net.DialTimeout("tcp", net.JoinHostPort(server.URL.Hostname(), server.Port), timeout)
if err != nil {
sa.logger.Error(
"LDAP dialer failed",
zap.String("server", server.Address),
zap.Error(err),
)
return nil, err
}
sa.logger.Debug(
"LDAP dialer setup succeeded",
zap.String("server", server.Address),
)
}
ldapConnection := ldap.NewConn(ldapDialer, server.Encrypted)
if ldapConnection == nil {
err = fmt.Errorf("ldap connection is nil")
sa.logger.Error(
"LDAP connection failed",
zap.String("server", server.Address),
zap.Error(err),
)
return nil, err
}
if server.Encrypted {
tlsState, ok := ldapConnection.TLSConnectionState()
if !ok {
err = fmt.Errorf("TLSConnectionState is not ok")
sa.logger.Error(
"LDAP connection TLS state polling failed",
zap.String("server", server.Address),
zap.Error(err),
)
return nil, err
}
sa.logger.Debug(
"LDAP connection TLS state polling succeeded",
zap.String("server", server.Address),
zap.String("server_name", tlsState.ServerName),
zap.Bool("handshake_complete", tlsState.HandshakeComplete),
zap.String("version", fmt.Sprintf("%d", tlsState.Version)),
zap.String("negotiated_protocol", tlsState.NegotiatedProtocol),
)
}
ldapConnection.Start()
if err := ldapConnection.Bind(sa.username, sa.password); err != nil {
sa.logger.Error(
"LDAP connection binding failed",
zap.String("server", server.Address),
zap.String("username", sa.username),
zap.String("error", err.Error()),
)
return nil, err
}
sa.logger.Debug(
"LDAP binding succeeded",
zap.String("server", server.Address),
)
return ldapConnection, nil
}
func (sa *Authenticator) findUser(ldapConnection *ldap.Conn, server *AuthServer, r *requests.Request) error {
searchUserFilter := strings.ReplaceAll(sa.searchUserFilter, "%s", r.User.Username)
req := ldap.NewSearchRequest(
// group.GroupDN,
sa.searchBaseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0,
server.Timeout,
false,
searchUserFilter,
[]string{
sa.userAttributes.Name,
sa.userAttributes.Surname,
sa.userAttributes.Username,
sa.userAttributes.MemberOf,
sa.userAttributes.Email,
},
nil, // Controls
)
if req == nil {
sa.logger.Error(
"LDAP request building failed, request is nil",
zap.String("server", server.Address),
zap.String("search_base_dn", sa.searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
)
return errors.ErrBackendLdapAuthFailed.WithArgs("LDAP request building failed, request is nil")
}
resp, err := ldapConnection.Search(req)
if err != nil {
sa.logger.Error(
"LDAP search failed",
zap.String("server", server.Address),
zap.String("search_base_dn", sa.searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
zap.String("error", err.Error()),
)
return errors.ErrBackendLdapAuthFailed.WithArgs("LDAP search failed")
}
sa.logger.Debug(
"LDAP search succeeded",
zap.String("server", server.Address),
zap.Int("entry_count", len(resp.Entries)),
zap.String("search_base_dn", sa.searchBaseDN),
zap.String("search_user_filter", searchUserFilter),
zap.Any("users", resp.Entries),
)
switch len(resp.Entries) {
case 1:
case 0:
return errors.ErrBackendLdapAuthFailed.WithArgs("user not found")
default:
return errors.ErrBackendLdapAuthFailed.WithArgs("multiple users matched")
}
user := resp.Entries[0]
var userFullName, userLastName, userFirstName, userAccountName, userMail string
userRoles := make(map[string]bool)
if server.PosixGroups {
// Handle POSIX group memberships.
searchGroupRequest := map[string]interface{}{
"user_dn": user.DN,
"base_dn": sa.searchBaseDN,
"search_group_filter": strings.ReplaceAll(sa.searchGroupFilter, "%s", user.DN),
"timeout": server.Timeout,
}
if err := sa.searchGroups(ldapConnection, searchGroupRequest, userRoles); err != nil {
sa.logger.Error(
"LDAP group search failed, request",
zap.String("server", server.Address),
zap.String("base_dn", sa.searchBaseDN),
zap.String("search_group_filter", sa.searchGroupFilter),
zap.Error(err),
)
return err
}
}
for _, attr := range user.Attributes {
if len(attr.Values) < 1 {
continue
}
if attr.Name == sa.userAttributes.Name {
userFirstName = attr.Values[0]
}
if attr.Name == sa.userAttributes.Surname {
userLastName = attr.Values[0]
}
if attr.Name == sa.userAttributes.Username {
userAccountName = attr.Values[0]
}
if attr.Name == sa.userAttributes.MemberOf {
for _, v := range attr.Values {
for _, g := range sa.groups {
if g.GroupDN != v {
continue
}
for _, role := range g.Roles {
if role == "" {
continue
}
userRoles[role] = true
}
}
}
}
if attr.Name == sa.userAttributes.Email {
userMail = attr.Values[0]
}
}
if userFirstName != "" {
userFullName = userFirstName
}
if userLastName != "" {
if userFullName == "" {
userFullName = userLastName
} else {
userFullName = userFullName + " " + userLastName
}
}
if len(userRoles) == 0 {
return errors.ErrBackendLdapAuthFailed.WithArgs("no matched groups")
}
sa.logger.Debug(
"LDAP user match",
zap.String("server", server.Address),
zap.String("name", userFullName),
zap.String("username", userAccountName),
zap.String("email", userMail),
zap.Any("roles", userRoles),
)
r.User.Username = userAccountName
r.User.Email = userMail
r.User.FullName = userFullName
for role := range userRoles {
r.User.Roles = append(r.User.Roles, role)
}
r.User.Challenges = []string{"password"}
r.Response.Code = 200
return nil
}
| [
"\"LDAP_USER_SECRET\""
]
| []
| [
"LDAP_USER_SECRET"
]
| [] | ["LDAP_USER_SECRET"] | go | 1 | 0 | |
bot_tests/EventBotTest.py | import os
import random
import discord
from dotenv import load_dotenv
from discord.ext import commands
class DiscordClient(discord.Client):
def __init__(self, **options):
super().__init__(**options)
load_dotenv()
self.token = os.getenv('DISCORD_TOKEN')
async def on_ready(self):
print(f'{client.user} has connected to Discord!')
async def on_message(self, message):
if message.author == client.user:
return
brooklyn_99_quotes = [
'I\'m the human form of the 💯 emoji.',
'Bingpot!',
(
'Cool. Cool cool cool cool cool cool cool, '
'no doubt no doubt no doubt no doubt.'
),
]
if message.content == '99!':
response = random.choice(brooklyn_99_quotes)
await message.channel.send(response)
if __name__ == '__main__':
client = DiscordClient()
client.run(client.token)
| []
| []
| [
"DISCORD_TOKEN"
]
| [] | ["DISCORD_TOKEN"] | python | 1 | 0 | |
abci/tests/test_app/main.go | package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString)
_, err = cmd.CombinedOutput()
if err == nil {
break
}
<-time.After(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
cmd := exec.Command("bash", "-c", subCommand)
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer func() {
if err := cmd.Process.Kill(); err != nil {
log.Printf("error on process kill: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Printf("error while waiting for cmd to exit: %v", err)
}
}()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err) //nolint:gocritic
}
client := startClient(abciType)
defer func() {
if err := client.Stop(); err != nil {
log.Printf("error trying client stop: %v", err)
}
}()
setOption(client, "serial", "on")
commit(client, nil)
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}
| [
"\"ABCI\"",
"\"ABCI_APP\""
]
| []
| [
"ABCI_APP",
"ABCI"
]
| [] | ["ABCI_APP", "ABCI"] | go | 2 | 0 | |
object.go | package dfon
import (
"bytes"
"fmt"
"strconv"
"strings"
)
type Object struct {
Enabled bool `json:"enabled"`
Type string `json:"type"`
Values []string `json:"values"`
Children []*Object `json:"children"`
Traits []*Object `json:"traits"`
}
func (o *Object) IsFlag() bool {
return len(o.Values) == 0
}
func (o *Object) String() string {
buf := bytes.Buffer{}
if o.Enabled {
buf.WriteString("[")
} else {
buf.WriteString("╳")
}
buf.WriteString(o.Type)
if !o.IsFlag() {
buf.WriteString(":")
buf.WriteString(strings.Join(o.Values, ":"))
}
if o.Enabled {
buf.WriteString("]")
} else {
buf.WriteString("╳")
}
for i := range o.Traits {
buf.WriteString(o.Traits[i].String())
}
return buf.String()
}
func (o *Object) EnableState(state, traits bool) {
o.Enabled = state
if traits {
for i := range o.Traits {
o.Traits[i].Enabled = state
}
}
}
func (o *Object) GetById(id string) []*Object {
return getById(id, o.Children)
}
func (o *Object) AsBool() bool {
if len(o.Values) > 0 && o.Values[0] == "YES" {
return true
}
return false
}
func (o *Object) SetBool(value bool) {
if len(o.Values) == 0 {
if value {
o.Values = []string{"YES"}
} else {
o.Values = []string{"NO"}
}
} else {
if value {
o.Values[0] = "YES"
} else {
o.Values[0] = "NO"
}
}
}
func (o *Object) AsInt(index int) int {
if index >= len(o.Values) {
return -1
}
n, _ := strconv.Atoi(o.Values[index])
return n
}
func (o *Object) SetInt(index int, value int) {
if index < len(o.Values) {
o.Values[index] = fmt.Sprint(value)
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
src/segmentpy/_taskManager/ActViewer_logic.py | from PySide2.QtWidgets import QApplication, QWidget, QMessageBox, QListWidget
from PySide2.QtGui import QPixmap, QImage
from PySide2.QtCore import Qt
from segmentpy._taskManager.ActViewer_design import Ui_actViewer
from segmentpy._taskManager.nodes_list_logic import node_list_logic
from segmentpy._taskManager.file_dialog import file_dialog
from segmentpy.tf114.util import print_nodes_name, check_N_mkdir
from segmentpy.tf114.analytic import partialRlt_and_diff, visualize_weights
from PIL import Image
import re
import sys
import os
import numpy as np
import subprocess
import tensorflow as tf
import matplotlib.pyplot as plt
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from operator import add
# logging
import logging
from segmentpy.tf114 import log
logger = log.setup_custom_logger(__name__)
logger.setLevel(logging.DEBUG)
class actViewer_logic(QWidget, Ui_actViewer):
def __init__(self, *args, **kwargs):
"""order: set_ckpt() = set_input() > load_graph() > get_nodes() > load_activations()"""
super().__init__()
self.setupUi(self)
self.actList.setSelectionMode(QListWidget.MultiSelection)
self.ckptButton.clicked.connect(self.ckptFileDialog)
self.inputButton.clicked.connect(self.inputFileDialog)
self.load.clicked.connect(self.load_activations)
self.saveButton.clicked.connect(self.save_selected_activations)
self.cancelButton.clicked.connect(self.exit)
self.ckptPathLine.editingFinished.connect(self.set_ckpt)
self.inputPathLine.editingFinished.connect(self.set_input)
self.corrector.editingFinished.connect(self.setCorrector)
self.actList.doubleClicked.connect(self.set_focused_layer)
self.actSlider.valueChanged.connect(self.display)
self.weightSlider.valueChanged.connect(self.displayWeight)
# variables
self.ckpt = None
self.input = None
self.layer_name = None
self.layer = None
self.correction = None
def log_window(self, title: str, Msg: str):
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText(Msg)
msg.setWindowTitle(title)
msg.exec_()
def ckptFileDialog(self):
tmp = file_dialog(title='choose .meta file').openFileNameDialog()
if tmp:
self.ckptPathLine.setText(tmp)
self.set_ckpt()
def inputFileDialog(self):
tmp = file_dialog(title='choose .tif for input', type='.tif').openFileNameDialog()
if tmp:
self.inputPathLine.setText(tmp)
self.set_input()
def setCorrector(self):
self.correction = float(self.corrector.text())
self.hyperparams['normalization'] = self.correction
def set_ckpt(self):
self.ckpt = self.ckptPathLine.text()
# hit Enter or close file dialog load automatically the model
# prepare
if self.ckpt:
_re = re.search('(.+)ckpt/step(\d+)\.meta', self.ckpt)
self.step = _re.group(2)
self.graph_def_dir = _re.group(1)
self.paths = {
'step': self.step,
'working_dir': self.graph_def_dir,
'ckpt_dir': self.graph_def_dir + 'ckpt/',
'ckpt_path': self.graph_def_dir + 'ckpt/step{}'.format(self.step),
'save_pb_dir': self.graph_def_dir + 'pb/',
'save_pb_path': self.graph_def_dir + 'pb/step{}.pb'.format(self.step),
'data_dir': self.input,
}
model = re.search('mdl_([A-Za-z]*\d*)', self.ckpt).group(1)
self.hyperparams = {
'model': model,
'window_size': int(re.search('ps(\d+)', self.ckpt).group(1)),
'batch_size': int(re.search('bs(\d+)', self.ckpt).group(1)),
# 'stride': args.stride,
'device_option': 'cpu',
'mode': 'classification', # todo:
'batch_normalization': False,
'feature_map': True if model in ['LRCS8', 'LRCS9', 'LRCS10', 'Unet3'] else False,
'correction': self.correction
}
# get node and set the listViewWidget
self.get_nodes()
def set_input(self):
self.input = self.inputPathLine.text()
self.paths['data_dir'] = self.input
def get_nodes(self):
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# restore from ckpt the nodes
tf.reset_default_graph()
self.actList.clear()
_ = tf.train.import_meta_graph(
self.ckpt,
clear_devices=True,
)
# get_nodes
graph = tf.get_default_graph().as_graph_def()
nodes = print_nodes_name(graph)
options = []
for node in nodes:
tmp = re.search('(^[a-zA-Z]+\d*\/).*(leaky|relu|sigmoid|tanh|logits\/identity|up\d+\/Reshape\_4|concat\d+\/concat)$', # concat\d+\/concat for uniquely Unet
node)
if tmp is not None:
tmp = tmp.string
options.append(tmp)
self.actList.addItems([n for n in options])
def set_focused_layer(self, list_number=None):
self.layer_name = self.actList.item(list_number.row()).text()
self.layer = list_number.row()
self.display(0)
def display(self, nth=0):
logger.debug(self.layer_name)
logger.debug(self.layer)
if not hasattr(self, 'activations'):
self.get_nodes()
self.load_activations()
else:
act = self.activations[self.layer_name][0]
weight = self.kernels[self.layer]
logger.debug('weight matrix shape: {}'.format(weight.shape))
logger.debug('activations list len: {}'.format(len(self.activations[self.layer_name])))
self.actSlider.setMaximum(act.shape[-1] - 1) # -1 as starts with 0
# 1D dnn output
if 'dnn' in self.layer_name:
ceiling = int(np.ceil(np.sqrt(act.size)))
tmp = np.zeros((ceiling ** 2), np.float32).ravel()
tmp[:act.size] = act
act = tmp.reshape(ceiling, ceiling)
else:
logger.debug('act shape: {}'.format(act.shape))
logger.debug('weight shape: {}'.format(weight.shape))
act = act[:, :, nth]
act = (act - np.min(act)) / (np.max(act) - np.min(act)) * 255
act = np.asarray(Image.fromarray(act).convert('RGB'))
act = act.copy()
# imshow
self.q = QImage(act,
act.shape[1],
act.shape[0],
act.shape[1] * 3, QImage.Format_RGB888)
self.p = QPixmap(self.q)
self.p.scaled(self.width(), self.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.Images.setScaledContents(True)
self.Images.setPixmap(self.p)
self.Images.update()
self.Images.repaint()
# get weight
weight = weight[:, :, :, nth]
logger.debug('weightSlide maxi: {}'.format(weight.shape[2]))
self.weightSlider.setMaximum(weight.shape[2] - 1)
weight = (weight - np.min(weight)) / (np.max(weight) - np.min(weight)) * 255
self.weight = weight.copy()
self.displayWeight(0)
def displayWeight(self, slide=None):
# get weight
fig_weight = plt.figure(figsize=(1.2, 1.2))
fig_weight.clear()
ax = fig_weight.add_subplot(111)
img = np.squeeze(self.weight[:, :, slide])
ax.imshow(img, interpolation='none', aspect='auto')
for (y, x), z in np.ndenumerate(np.squeeze(img)):
ax.text(x, y, '%.2f' % z, fontsize=5, ha='center', va='center',)
ax.axis('off')
fig_weight.canvas.draw()
data = np.fromstring(fig_weight.canvas.tostring_rgb(), dtype=np.uint8)
logger.debug('img shape: {}'.format(data.shape))
logger.debug(fig_weight.canvas.get_width_height())
logger.debug(fig_weight.canvas.get_width_height()[::-1])
data = data.reshape(tuple(map(add, fig_weight.canvas.get_width_height()[::-1],
fig_weight.canvas.get_width_height())[::-1]) + (3,))
# plt.imshow(data)
# plt.show()
logger.debug('img shape: {}'.format(data.shape))
del fig_weight
logger.debug(slide)
# plot weight
self.wt = QImage(data,
data.shape[1],
data.shape[0],
data.shape[1] * 3, QImage.Format_RGB888)
self.pw = QPixmap(self.wt)
self.pw.scaled(self.width(), self.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation
)
self.weightLabel.setScaledContents(False)
self.weightLabel.setPixmap(self.pw)
self.weightLabel.update()
self.weightLabel.repaint()
def load_activations(self):
if not self.input:
self.log_window(title='Error!', Msg='Please indicate a input image')
elif not self.correction:
self.log_window(title='Error!', Msg='You forgot to put the corrector')
else:
self.activations = partialRlt_and_diff(paths=self.paths, hyperparams=self.hyperparams,
conserve_nodes=[self.actList.item(i).text() for i in range(self.actList.count())],
write_rlt=False)
logger.debug(self.activations)
# todo: display the weight the input and output too
self.kern_name, self.kernels = visualize_weights(params=self.paths, write_rlt=False)
logger.debug(self.kern_name)
def save_selected_activations(self):
if not self.input:
self.log_window(title='Error!', Msg='Please indicate a input image')
else:
save_act_path = file_dialog(title='choose a folder to save the images', type='/').openFolderDialog()
selected_idx = self.actList.selectionModel().selectedIndexes()
for idx in selected_idx:
layer_name = self.actList.item(idx.row()).text()
rlt = np.squeeze(self.activations[layer_name])
if rlt.ndim == 3:
for i in range(rlt.shape[-1]):
check_N_mkdir(save_act_path+layer_name.replace('/','_'))
Image.fromarray(rlt[:, :, i]).save(save_act_path+layer_name.replace('/','_')+'/{}.tif'.format(i))
elif rlt.ndim == 2:
check_N_mkdir(save_act_path+layer_name.replace('/','_'))
Image.fromarray(rlt[:, :]).save(save_act_path + layer_name.replace('/','_') + '/act.tif')
else:
logger.debug('got an unexpected ndim of the activations: {}'.format(rlt.ndim))
def exit(self):
self.close()
def test():
app = QApplication(sys.argv)
# set ui
ui = actViewer_logic()
ui.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test() | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
test/testResourceLoader.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import tempfile
import shutil
import os
from ferenda.compat import unittest
from ferenda.errors import ResourceNotFound
from ferenda import DocumentEntry # just used for test_loadpath
from ferenda import DocumentRepository
from ferenda import util
# SUT
from ferenda import ResourceLoader
# this class mainly exists so that we can try out make_loadpath
class SubTestCase(unittest.TestCase):
pass
class Main(SubTestCase, DocumentEntry):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
loadpath = [self.tempdir + "/primary", self.tempdir + "/secondary"]
util.writefile(loadpath[0]+os.sep+"primaryresource.txt", "Hello")
util.writefile(loadpath[1]+os.sep+"secondaryresource.txt", "World")
self.resourceloader = ResourceLoader(*loadpath)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_loadpath(self):
self.assertEqual(ResourceLoader.make_loadpath(self),
["test/res", # from test.testResourceLoader.SubTestCase
"ferenda/res" # from ferenda.compat.unittest.TestCase
])
def test_exists(self):
self.assertTrue(self.resourceloader.exists("primaryresource.txt"))
self.assertTrue(self.resourceloader.exists("secondaryresource.txt"))
self.assertTrue(self.resourceloader.exists("robots.txt"))
self.assertFalse(self.resourceloader.exists("nonexistent.txt"))
def test_open(self):
with self.resourceloader.open("primaryresource.txt") as fp:
self.assertEqual("Hello", fp.read())
with self.resourceloader.open("secondaryresource.txt") as fp:
self.assertEqual("World", fp.read())
# should be available through the pkg_resources API
with self.resourceloader.open("robots.txt") as fp:
self.assertIn("# robotstxt.org/", fp.read())
with self.assertRaises(ResourceNotFound):
with self.resourceloader.open("nonexistent.txt") as fp:
fp.read()
def test_openfp(self):
fp = self.resourceloader.openfp("primaryresource.txt")
self.assertEqual("Hello", fp.read())
fp.close()
fp = self.resourceloader.openfp("secondaryresource.txt")
self.assertEqual("World", fp.read())
fp.close()
fp = self.resourceloader.openfp("robots.txt")
self.assertIn("# robotstxt.org/", fp.read())
fp.close()
with self.assertRaises(ResourceNotFound):
fp = self.resourceloader.openfp("nonexistent.txt")
def test_read(self):
self.assertEqual("Hello",
self.resourceloader.load("primaryresource.txt"))
self.assertEqual("World",
self.resourceloader.load("secondaryresource.txt"))
self.assertIn("# robotstxt.org/",
self.resourceloader.load("robots.txt"))
with self.assertRaises(ResourceNotFound):
self.resourceloader.load("nonexistent.txt")
def test_filename(self):
self.assertEqual(self.tempdir + "/primary/primaryresource.txt",
self.resourceloader.filename("primaryresource.txt"))
self.assertEqual(self.tempdir + "/secondary/secondaryresource.txt",
self.resourceloader.filename("secondaryresource.txt"))
self.assertEqual("ferenda/res/robots.txt",
self.resourceloader.filename("robots.txt"))
with self.assertRaises(ResourceNotFound):
self.resourceloader.filename("nonexistent.txt")
def test_extractdir(self):
dest = self.tempdir + os.sep + "dest"
os.mkdir(dest)
self.resourceloader.extractdir(None, dest)
self.assertEqual(set(os.listdir(dest)),
set(["primaryresource.txt", "secondaryresource.txt",
"robots.txt", "humans.txt"]))
class RepoResourceLoader(unittest.TestCase):
expected = set(["analytics-tracker.xsl", "annotations-panel.xsl", "atom.xsl",
"base.xsl", "error.xsl", "frontpage.xsl", "generic.xsl",
"grit-grddl.xsl", "keyword.xsl", "nav-search-form.xsl", "paged.xsl",
"rdfxml-grit.xsl", "search.xsl", "simplify-ooxml.xsl",
"toc.xsl", "tune-width.xsl", "statusreport.xsl"])
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.repo = DocumentRepository()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_extractdir_repo(self):
dest = self.tempdir + os.sep + "dest"
os.mkdir(dest)
self.repo.resourceloader.extractdir("xsl", dest)
extracted = [x[len(dest)+1:] for x in util.list_dirs(dest)]
self.assertEqual(self.expected, set(extracted))
def test_extractdir_newwd(self):
dest = self.tempdir + os.sep + "dest"
os.mkdir(dest)
prevdir = os.getcwd()
os.chdir(self.tempdir)
if "FERENDA_HOME" not in os.environ:
os.environ["FERENDA_HOME"] = prevdir
try:
self.repo.resourceloader.extractdir("xsl", dest)
extracted = [x[len(dest)+1:] for x in util.list_dirs(dest)]
self.assertEqual(self.expected, set(extracted))
finally:
os.chdir(prevdir)
| []
| []
| [
"FERENDA_HOME"
]
| [] | ["FERENDA_HOME"] | python | 1 | 0 | |
resolve/config.go | package resolve
import (
"log"
"os"
"strconv"
)
type Logger interface {
Printf(fmt string, args ...interface{})
}
type Config struct {
Log Logger
Verbose bool
Debug bool
IgnoreMap map[string]bool
}
func DefaultConfig() *Config {
verbose := false
if v, err := strconv.ParseBool(os.Getenv("VERBOSE")); err == nil {
verbose = v
}
debug := false
if v, err := strconv.ParseBool(os.Getenv("DEBUG")); err == nil {
debug = v
}
return &Config{
Log: log.New(os.Stderr, "", 0),
Verbose: verbose,
Debug: debug,
IgnoreMap: map[string]bool{"context.Context": true},
}
}
| [
"\"VERBOSE\"",
"\"DEBUG\""
]
| []
| [
"DEBUG",
"VERBOSE"
]
| [] | ["DEBUG", "VERBOSE"] | go | 2 | 0 | |
integration_test.go | // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/Noobygames/amqp
// +build integration
package amqp
import (
"bytes"
devrand "crypto/rand"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"math/rand"
"net"
"os"
"reflect"
"strconv"
"sync"
"testing"
"testing/quick"
"time"
)
func TestIntegrationOpenClose(t *testing.T) {
if c := integrationConnection(t, "open-close"); c != nil {
t.Logf("have connection, calling connection close")
if err := c.Close(); err != nil {
t.Fatalf("connection close: %s", err)
}
t.Logf("connection close OK")
}
}
func TestIntegrationOpenCloseChannel(t *testing.T) {
if c := integrationConnection(t, "channel"); c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel 1: %s", err)
}
ch.Close()
}
}
func TestIntegrationHighChannelChurnInTightLoop(t *testing.T) {
if c := integrationConnection(t, "channel churn"); c != nil {
defer c.Close()
for i := 0; i < 1000; i++ {
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel 1: %s", err)
}
ch.Close()
}
}
}
func TestIntegrationOpenConfig(t *testing.T) {
config := Config{}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
if _, err := c.Channel(); err != nil {
t.Errorf("expected to open channel: %s", err)
}
if err := c.Close(); err != nil {
t.Errorf("expected to close the connection: %s", err)
}
}
func TestIntegrationOpenConfigWithNetDial(t *testing.T) {
config := Config{Dial: net.Dial}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
if _, err := c.Channel(); err != nil {
t.Errorf("expected to open channel: %s", err)
}
if err := c.Close(); err != nil {
t.Errorf("expected to close the connection: %s", err)
}
}
func TestIntegrationLocalAddr(t *testing.T) {
config := Config{}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
defer c.Close()
a := c.LocalAddr()
_, portString, err := net.SplitHostPort(a.String())
if err != nil {
t.Errorf("expected to get a local network address with config %+v integration server: %s", config, a.String())
}
port, err := strconv.Atoi(portString)
if err != nil {
t.Errorf("expected to get a TCP port number with config %+v integration server: %s", config, err)
}
t.Logf("Connected to port %d\n", port)
}
// https://github.com/Noobygames/amqp/issues/94
func TestExchangePassiveOnMissingExchangeShouldError(t *testing.T) {
c := integrationConnection(t, "exch")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel 1: %s", err)
}
defer ch.Close()
if err := ch.ExchangeDeclarePassive(
"test-integration-missing-passive-exchange",
"direct", // type
false, // duration (note: is durable)
true, // auto-delete
false, // internal
false, // nowait
nil, // args
); err == nil {
t.Fatal("ExchangeDeclarePassive of a missing exchange should return error")
}
}
}
// https://github.com/Noobygames/amqp/issues/94
func TestIntegrationExchangeDeclarePassiveOnDeclaredShouldNotError(t *testing.T) {
c := integrationConnection(t, "exch")
if c != nil {
defer c.Close()
exchange := "test-integration-declared-passive-exchange"
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel: %s", err)
}
defer ch.Close()
if err := ch.ExchangeDeclare(
exchange, // name
"direct", // type
false, // durable
true, // auto-delete
false, // internal
false, // nowait
nil, // args
); err != nil {
t.Fatalf("declare exchange: %s", err)
}
if err := ch.ExchangeDeclarePassive(
exchange, // name
"direct", // type
false, // durable
true, // auto-delete
false, // internal
false, // nowait
nil, // args
); err != nil {
t.Fatalf("ExchangeDeclarePassive on a declared exchange should not error, got: %q", err)
}
}
}
func TestIntegrationExchange(t *testing.T) {
c := integrationConnection(t, "exch")
if c != nil {
defer c.Close()
channel, err := c.Channel()
if err != nil {
t.Fatalf("create channel: %s", err)
}
t.Logf("create channel OK")
exchange := "test-integration-exchange"
if err := channel.ExchangeDeclare(
exchange, // name
"direct", // type
false, // duration
true, // auto-delete
false, // internal
false, // nowait
nil, // args
); err != nil {
t.Fatalf("declare exchange: %s", err)
}
t.Logf("declare exchange OK")
if err := channel.ExchangeDelete(exchange, false, false); err != nil {
t.Fatalf("delete exchange: %s", err)
}
t.Logf("delete exchange OK")
if err := channel.Close(); err != nil {
t.Fatalf("close channel: %s", err)
}
t.Logf("close channel OK")
}
}
// https://github.com/Noobygames/amqp/issues/94
func TestIntegrationQueueDeclarePassiveOnMissingExchangeShouldError(t *testing.T) {
c := integrationConnection(t, "queue")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel1: %s", err)
}
defer ch.Close()
if _, err := ch.QueueDeclarePassive(
"test-integration-missing-passive-queue", // name
false, // duration (note: not durable)
true, // auto-delete
false, // exclusive
false, // noWait
nil, // arguments
); err == nil {
t.Fatal("QueueDeclarePassive of a missing queue should error")
}
}
}
// https://github.com/Noobygames/amqp/issues/94
func TestIntegrationPassiveQueue(t *testing.T) {
c := integrationConnection(t, "queue")
if c != nil {
defer c.Close()
name := "test-integration-declared-passive-queue"
ch, err := c.Channel()
if err != nil {
t.Fatalf("create channel1: %s", err)
}
defer ch.Close()
if _, err := ch.QueueDeclare(
name, // name
false, // durable
true, // auto-delete
false, // exclusive
false, // noWait
nil, // arguments
); err != nil {
t.Fatalf("queue declare: %s", err)
}
if _, err := ch.QueueDeclarePassive(
name, // name
false, // durable
true, // auto-delete
false, // exclusive
false, // noWait
nil, // arguments
); err != nil {
t.Fatalf("QueueDeclarePassive on declared queue should not error, got: %q", err)
}
if _, err := ch.QueueDeclarePassive(
name, // name
true, // durable (note: differs)
true, // auto-delete
false, // exclusive
false, // noWait
nil, // arguments
); err != nil {
t.Fatalf("QueueDeclarePassive on declared queue with different flags should error")
}
}
}
func TestIntegrationBasicQueueOperations(t *testing.T) {
c := integrationConnection(t, "queue")
if c != nil {
defer c.Close()
channel, err := c.Channel()
if err != nil {
t.Fatalf("create channel: %s", err)
}
t.Logf("create channel OK")
exchangeName := "test-basic-ops-exchange"
queueName := "test-basic-ops-queue"
deleteQueueFirstOptions := []bool{true, false}
for _, deleteQueueFirst := range deleteQueueFirstOptions {
if err := channel.ExchangeDeclare(
exchangeName, // name
"direct", // type
true, // duration (note: is durable)
false, // auto-delete
false, // internal
false, // nowait
nil, // args
); err != nil {
t.Fatalf("declare exchange: %s", err)
}
t.Logf("declare exchange OK")
if _, err := channel.QueueDeclare(
queueName, // name
true, // duration (note: durable)
false, // auto-delete
false, // exclusive
false, // noWait
nil, // arguments
); err != nil {
t.Fatalf("queue declare: %s", err)
}
t.Logf("declare queue OK")
if err := channel.QueueBind(
queueName, // name
"", // routingKey
exchangeName, // sourceExchange
false, // noWait
nil, // arguments
); err != nil {
t.Fatalf("queue bind: %s", err)
}
t.Logf("queue bind OK")
if deleteQueueFirst {
if _, err := channel.QueueDelete(
queueName, // name
false, // ifUnused (false=be aggressive)
false, // ifEmpty (false=be aggressive)
false, // noWait
); err != nil {
t.Fatalf("delete queue (first): %s", err)
}
t.Logf("delete queue (first) OK")
if err := channel.ExchangeDelete(exchangeName, false, false); err != nil {
t.Fatalf("delete exchange (after delete queue): %s", err)
}
t.Logf("delete exchange (after delete queue) OK")
} else { // deleteExchangeFirst
if err := channel.ExchangeDelete(exchangeName, false, false); err != nil {
t.Fatalf("delete exchange (first): %s", err)
}
t.Logf("delete exchange (first) OK")
if _, err := channel.QueueInspect(queueName); err != nil {
t.Fatalf("inspect queue state after deleting exchange: %s", err)
}
t.Logf("queue properly remains after exchange is deleted")
if _, err := channel.QueueDelete(
queueName,
false, // ifUnused
false, // ifEmpty
false, // noWait
); err != nil {
t.Fatalf("delete queue (after delete exchange): %s", err)
}
t.Logf("delete queue (after delete exchange) OK")
}
}
if err := channel.Close(); err != nil {
t.Fatalf("close channel: %s", err)
}
t.Logf("close channel OK")
}
}
func TestIntegrationConnectionNegotiatesMaxChannels(t *testing.T) {
config := Config{ChannelMax: 0}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
defer c.Close()
if want, got := defaultChannelMax, c.Config.ChannelMax; want != got {
t.Errorf("expected connection to negotiate uint16 (%d) channels, got: %d", want, got)
}
}
func TestIntegrationConnectionNegotiatesClientMaxChannels(t *testing.T) {
config := Config{ChannelMax: 16}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
defer c.Close()
if want, got := config.ChannelMax, c.Config.ChannelMax; want != got {
t.Errorf("expected client specified channel limit after handshake %d, got: %d", want, got)
}
}
func TestIntegrationChannelIDsExhausted(t *testing.T) {
config := Config{ChannelMax: 16}
c, err := DialConfig(integrationURLFromEnv(), config)
if err != nil {
t.Fatalf("expected to dial with config %+v integration server: %s", config, err)
}
defer c.Close()
for i := 1; i <= c.Config.ChannelMax; i++ {
if _, err := c.Channel(); err != nil {
t.Fatalf("expected allocating all channel ids to succed, failed on %d with %v", i, err)
}
}
if _, err := c.Channel(); err != ErrChannelMax {
t.Fatalf("expected allocating all channels to produce the client side error %#v, got: %#v", ErrChannelMax, err)
}
}
func TestIntegrationChannelClosing(t *testing.T) {
c := integrationConnection(t, "closings")
if c != nil {
defer c.Close()
// This function is run on every channel after it is successfully
// opened. It can do something to verify something. It should be
// quick; many channels may be opened!
f := func(t *testing.T, c *Channel) {
return
}
// open and close
channel, err := c.Channel()
if err != nil {
t.Fatalf("basic create channel: %s", err)
}
t.Logf("basic create channel OK")
if err := channel.Close(); err != nil {
t.Fatalf("basic close channel: %s", err)
}
t.Logf("basic close channel OK")
// deferred close
signal := make(chan bool)
go func() {
channel, err := c.Channel()
if err != nil {
t.Fatalf("second create channel: %s", err)
}
t.Logf("second create channel OK")
<-signal // a bit of synchronization
f(t, channel)
defer func() {
if err := channel.Close(); err != nil {
t.Fatalf("deferred close channel: %s", err)
}
t.Logf("deferred close channel OK")
signal <- true
}()
}()
signal <- true
select {
case <-signal:
t.Logf("(got close signal OK)")
break
case <-time.After(250 * time.Millisecond):
t.Fatalf("deferred close: timeout")
}
// multiple channels
for _, n := range []int{2, 4, 8, 16, 32, 64, 128, 256} {
channels := make([]*Channel, n)
for i := 0; i < n; i++ {
var err error
if channels[i], err = c.Channel(); err != nil {
t.Fatalf("create channel %d/%d: %s", i+1, n, err)
}
}
f(t, channel)
for i, channel := range channels {
if err := channel.Close(); err != nil {
t.Fatalf("close channel %d/%d: %s", i+1, n, err)
}
}
t.Logf("created/closed %d channels OK", n)
}
}
}
func TestIntegrationMeaningfulChannelErrors(t *testing.T) {
c := integrationConnection(t, "pub")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("Could not create channel")
}
queue := "test.integration.channel.error"
_, err = ch.QueueDeclare(queue, false, true, false, false, nil)
if err != nil {
t.Fatalf("Could not declare")
}
_, err = ch.QueueDeclare(queue, true, false, false, false, nil)
if err == nil {
t.Fatalf("Expected error, got nil")
}
e, ok := err.(*Error)
if !ok {
t.Fatalf("Expected type Error response, got %T", err)
}
if e.Code != PreconditionFailed {
t.Fatalf("Expected PreconditionFailed, got: %+v", e)
}
_, err = ch.QueueDeclare(queue, false, true, false, false, nil)
if err != ErrClosed {
t.Fatalf("Expected channel to be closed, got: %T", err)
}
}
}
// https://github.com/Noobygames/amqp/issues/6
func TestIntegrationNonBlockingClose(t *testing.T) {
c := integrationConnection(t, "#6")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("Could not create channel")
}
queue := "test.integration.blocking.close"
_, err = ch.QueueDeclare(queue, false, true, false, false, nil)
if err != nil {
t.Fatalf("Could not declare")
}
msgs, err := ch.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("Could not consume")
}
// Simulate a consumer
go func() {
for range msgs {
t.Logf("Oh my, received message on an empty queue")
}
}()
succeed := make(chan bool)
go func() {
if err = ch.Close(); err != nil {
t.Fatalf("Close produced an error when it shouldn't")
}
succeed <- true
}()
select {
case <-succeed:
break
case <-time.After(1 * time.Second):
t.Fatalf("Close timed out after 1s")
}
}
}
func TestIntegrationPublishConsume(t *testing.T) {
queue := "test.integration.publish.consume"
c1 := integrationConnection(t, "pub")
c2 := integrationConnection(t, "sub")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
pub, _ := c1.Channel()
sub, _ := c2.Channel()
pub.QueueDeclare(queue, false, true, false, false, nil)
sub.QueueDeclare(queue, false, true, false, false, nil)
defer pub.QueueDelete(queue, false, false, false)
messages, _ := sub.Consume(queue, "", false, false, false, false, nil)
pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")})
pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")})
pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 3")})
assertConsumeBody(t, messages, []byte("pub 1"))
assertConsumeBody(t, messages, []byte("pub 2"))
assertConsumeBody(t, messages, []byte("pub 3"))
}
}
func TestIntegrationConsumeFlow(t *testing.T) {
queue := "test.integration.consumer-flow"
c1 := integrationConnection(t, "pub-flow")
c2 := integrationConnection(t, "sub-flow")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
pub, _ := c1.Channel()
sub, _ := c2.Channel()
pub.QueueDeclare(queue, false, true, false, false, nil)
sub.QueueDeclare(queue, false, true, false, false, nil)
defer pub.QueueDelete(queue, false, false, false)
sub.Qos(1, 0, false)
messages, _ := sub.Consume(queue, "", false, false, false, false, nil)
pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")})
pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")})
msg := assertConsumeBody(t, messages, []byte("pub 1"))
if err := sub.Flow(false); err.(*Error).Code == NotImplemented {
t.Log("flow control is not supported on this version of rabbitmq")
return
}
msg.Ack(false)
select {
case <-messages:
t.Fatalf("message was delivered when flow was not active")
default:
}
sub.Flow(true)
msg = assertConsumeBody(t, messages, []byte("pub 2"))
msg.Ack(false)
}
}
func TestIntegrationRecoverNotImplemented(t *testing.T) {
queue := "test.recover"
if c, ch := integrationQueue(t, queue); c != nil {
if product, ok := c.Properties["product"]; ok && product.(string) == "RabbitMQ" {
defer c.Close()
err := ch.Recover(false)
if ex, ok := err.(*Error); !ok || ex.Code != 540 {
t.Fatalf("Expected NOT IMPLEMENTED got: %v", ex)
}
}
}
}
// This test is driven by a private API to simulate the server sending a channelFlow message
func TestIntegrationPublishFlow(t *testing.T) {
// TODO - no idea how to test without affecting the server or mucking internal APIs
// i'd like to make sure the RW lock can be held by multiple publisher threads
// and that multiple channelFlow messages do not block the dispatch thread
}
func TestIntegrationConsumeCancel(t *testing.T) {
queue := "test.integration.consume-cancel"
c := integrationConnection(t, "pub")
if c != nil {
defer c.Close()
ch, _ := c.Channel()
ch.QueueDeclare(queue, false, true, false, false, nil)
defer ch.QueueDelete(queue, false, false, false)
messages, _ := ch.Consume(queue, "integration-tag", false, false, false, false, nil)
ch.Publish("", queue, false, false, Publishing{Body: []byte("1")})
assertConsumeBody(t, messages, []byte("1"))
err := ch.Cancel("integration-tag", false)
if err != nil {
t.Fatalf("error cancelling the consumer: %v", err)
}
ch.Publish("", queue, false, false, Publishing{Body: []byte("2")})
select {
case <-time.After(100 * time.Millisecond):
t.Fatalf("Timeout on Close")
case _, ok := <-messages:
if ok {
t.Fatalf("Extra message on consumer when consumer should have been closed")
}
}
}
}
func (c *Connection) Generate(r *rand.Rand, _ int) reflect.Value {
urlStr := os.Getenv("AMQP_URL")
if urlStr == "" {
return reflect.ValueOf(nil)
}
conn, err := Dial(urlStr)
if err != nil {
return reflect.ValueOf(nil)
}
return reflect.ValueOf(conn)
}
func (c Publishing) Generate(r *rand.Rand, _ int) reflect.Value {
var ok bool
var t reflect.Value
p := Publishing{}
//p.DeliveryMode = uint8(r.Intn(3))
//p.Priority = uint8(r.Intn(8))
if r.Intn(2) > 0 {
p.ContentType = "application/octet-stream"
}
if r.Intn(2) > 0 {
p.ContentEncoding = "gzip"
}
if r.Intn(2) > 0 {
p.CorrelationId = fmt.Sprintf("%d", r.Int())
}
if r.Intn(2) > 0 {
p.ReplyTo = fmt.Sprintf("%d", r.Int())
}
if r.Intn(2) > 0 {
p.MessageId = fmt.Sprintf("%d", r.Int())
}
if r.Intn(2) > 0 {
p.Type = fmt.Sprintf("%d", r.Int())
}
if r.Intn(2) > 0 {
p.AppId = fmt.Sprintf("%d", r.Int())
}
if r.Intn(2) > 0 {
p.Timestamp = time.Unix(r.Int63(), r.Int63())
}
if t, ok = quick.Value(reflect.TypeOf(p.Body), r); ok {
p.Body = t.Bytes()
}
return reflect.ValueOf(p)
}
func TestQuickPublishOnly(t *testing.T) {
if c := integrationConnection(t, "quick"); c != nil {
defer c.Close()
pub, err := c.Channel()
queue := "test-publish"
if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Errorf("Failed to declare: %s", err)
return
}
defer pub.QueueDelete(queue, false, false, false)
quick.Check(func(msg Publishing) bool {
return pub.Publish("", queue, false, false, msg) == nil
}, nil)
}
}
func TestPublishEmptyBody(t *testing.T) {
c := integrationConnection(t, "empty")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Errorf("Failed to create channel")
return
}
queue := "test-TestPublishEmptyBody"
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("Could not declare")
}
messages, err := ch.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("Could not consume")
}
err = ch.Publish("", queue, false, false, Publishing{})
if err != nil {
t.Fatalf("Could not publish")
}
select {
case msg := <-messages:
if len(msg.Body) != 0 {
t.Errorf("Received non empty body")
}
case <-time.After(200 * time.Millisecond):
t.Errorf("Timeout on receive")
}
}
}
func TestPublishEmptyBodyWithHeadersIssue67(t *testing.T) {
c := integrationConnection(t, "issue67")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Errorf("Failed to create channel")
return
}
queue := "test-TestPublishEmptyBodyWithHeaders"
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("Could not declare")
}
messages, err := ch.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("Could not consume")
}
headers := Table{
"ham": "spam",
}
err = ch.Publish("", queue, false, false, Publishing{Headers: headers})
if err != nil {
t.Fatalf("Could not publish")
}
select {
case msg := <-messages:
if msg.Headers["ham"] == nil {
t.Fatalf("Headers aren't sent")
}
if msg.Headers["ham"] != "spam" {
t.Fatalf("Headers are wrong")
}
case <-time.After(200 * time.Millisecond):
t.Errorf("Timeout on receive")
}
}
}
func TestQuickPublishConsumeOnly(t *testing.T) {
c1 := integrationConnection(t, "quick-pub")
c2 := integrationConnection(t, "quick-sub")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
pub, err := c1.Channel()
sub, err := c2.Channel()
queue := "TestPublishConsumeOnly"
if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Errorf("Failed to declare: %s", err)
return
}
if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Errorf("Failed to declare: %s", err)
return
}
defer sub.QueueDelete(queue, false, false, false)
ch, err := sub.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Errorf("Could not sub: %s", err)
}
quick.CheckEqual(
func(msg Publishing) []byte {
empty := Publishing{Body: msg.Body}
if pub.Publish("", queue, false, false, empty) != nil {
return []byte{'X'}
}
return msg.Body
},
func(msg Publishing) []byte {
out := <-ch
out.Ack(false)
return out.Body
},
nil)
}
}
func TestQuickPublishConsumeBigBody(t *testing.T) {
c1 := integrationConnection(t, "big-pub")
c2 := integrationConnection(t, "big-sub")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
pub, err := c1.Channel()
sub, err := c2.Channel()
queue := "test-pubsub"
if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Errorf("Failed to declare: %s", err)
return
}
ch, err := sub.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Errorf("Could not sub: %s", err)
}
fixture := Publishing{
Body: make([]byte, 1e4+1000),
}
if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Errorf("Failed to declare: %s", err)
return
}
err = pub.Publish("", queue, false, false, fixture)
if err != nil {
t.Errorf("Could not publish big body")
}
select {
case msg := <-ch:
if bytes.Compare(msg.Body, fixture.Body) != 0 {
t.Errorf("Consumed big body didn't match")
}
case <-time.After(200 * time.Millisecond):
t.Errorf("Timeout on receive")
}
}
}
func TestIntegrationGetOk(t *testing.T) {
if c := integrationConnection(t, "getok"); c != nil {
defer c.Close()
queue := "test.get-ok"
ch, _ := c.Channel()
ch.QueueDeclare(queue, false, true, false, false, nil)
ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")})
msg, ok, err := ch.Get(queue, false)
if err != nil {
t.Fatalf("Failed get: %v", err)
}
if !ok {
t.Fatalf("Get on a queued message did not find the message")
}
if string(msg.Body) != "ok" {
t.Fatalf("Get did not get the correct message")
}
}
}
func TestIntegrationGetEmpty(t *testing.T) {
if c := integrationConnection(t, "getok"); c != nil {
defer c.Close()
queue := "test.get-ok"
ch, _ := c.Channel()
ch.QueueDeclare(queue, false, true, false, false, nil)
_, ok, err := ch.Get(queue, false)
if err != nil {
t.Fatalf("Failed get: %v", err)
}
if !ok {
t.Fatalf("Get on a queued message retrieved a message when it shouldn't have")
}
}
}
func TestIntegrationTxCommit(t *testing.T) {
if c := integrationConnection(t, "txcommit"); c != nil {
defer c.Close()
queue := "test.tx.commit"
ch, _ := c.Channel()
ch.QueueDeclare(queue, false, true, false, false, nil)
if err := ch.Tx(); err != nil {
t.Fatalf("tx.select failed")
}
ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")})
if err := ch.TxCommit(); err != nil {
t.Fatalf("tx.commit failed")
}
msg, ok, err := ch.Get(queue, false)
if err != nil || !ok {
t.Fatalf("Failed get: %v", err)
}
if string(msg.Body) != "ok" {
t.Fatalf("Get did not get the correct message from the transaction")
}
}
}
func TestIntegrationTxRollback(t *testing.T) {
if c := integrationConnection(t, "txrollback"); c != nil {
defer c.Close()
queue := "test.tx.rollback"
ch, _ := c.Channel()
ch.QueueDeclare(queue, false, true, false, false, nil)
if err := ch.Tx(); err != nil {
t.Fatalf("tx.select failed")
}
ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")})
if err := ch.TxRollback(); err != nil {
t.Fatalf("tx.rollback failed")
}
_, ok, err := ch.Get(queue, false)
if err != nil {
t.Fatalf("Failed get: %v", err)
}
if ok {
t.Fatalf("message was published when it should have been rolled back")
}
}
}
func TestIntegrationReturn(t *testing.T) {
if c, ch := integrationQueue(t, "return"); c != nil {
defer c.Close()
ret := make(chan Return, 1)
ch.NotifyReturn(ret)
// mandatory publish to an exchange without a binding should be returned
ch.Publish("", "return-without-binding", true, false, Publishing{Body: []byte("mandatory")})
select {
case res := <-ret:
if string(res.Body) != "mandatory" {
t.Fatalf("expected return of the same message")
}
if res.ReplyCode != NoRoute {
t.Fatalf("expected no consumers reply code on the Return result, got: %v", res.ReplyCode)
}
case <-time.After(200 * time.Millisecond):
t.Fatalf("no return was received within 200ms")
}
}
}
func TestIntegrationCancel(t *testing.T) {
queue := "cancel"
consumerTag := "test.cancel"
if c, ch := integrationQueue(t, queue); c != nil {
defer c.Close()
cancels := ch.NotifyCancel(make(chan string, 1))
go func() {
if _, err := ch.Consume(queue, consumerTag, false, false, false, false, nil); err != nil {
t.Fatalf("cannot consume from %q to test NotifyCancel: %v", queue, err)
}
if _, err := ch.QueueDelete(queue, false, false, false); err != nil {
t.Fatalf("cannot delete integration queue: %v", err)
}
}()
select {
case tag := <-cancels:
if want, got := consumerTag, tag; want != got {
t.Fatalf("expected to be notified of deleted queue with consumer tag, got: %q", got)
}
case <-time.After(200 * time.Millisecond):
t.Fatalf("expected to be notified of deleted queue with 200ms")
}
}
}
func TestIntegrationConfirm(t *testing.T) {
if c, ch := integrationQueue(t, "confirm"); c != nil {
defer c.Close()
confirms := ch.NotifyPublish(make(chan Confirmation, 1))
if err := ch.Confirm(false); err != nil {
t.Fatalf("could not confirm")
}
ch.Publish("", "confirm", false, false, Publishing{Body: []byte("confirm")})
select {
case confirmed := <-confirms:
if confirmed.DeliveryTag != 1 {
t.Fatalf("expected ack starting with delivery tag of 1")
}
case <-time.After(200 * time.Millisecond):
t.Fatalf("no ack was received within 200ms")
}
}
}
// https://github.com/Noobygames/amqp/issues/61
func TestRoundTripAllFieldValueTypes61(t *testing.T) {
if conn := integrationConnection(t, "issue61"); conn != nil {
defer conn.Close()
timestamp := time.Unix(100000000, 0)
headers := Table{
"A": []interface{}{
[]interface{}{"nested array", int32(3)},
Decimal{2, 1},
Table{"S": "nested table in array"},
int32(2 << 20),
string("array string"),
timestamp,
nil,
byte(2),
float64(2.64),
float32(2.32),
int64(2 << 60),
int16(2 << 10),
bool(true),
[]byte{'b', '2'},
},
"D": Decimal{1, 1},
"F": Table{"S": "nested table in table"},
"I": int32(1 << 20),
"S": string("string"),
"T": timestamp,
"V": nil,
"b": byte(1),
"d": float64(1.64),
"f": float32(1.32),
"l": int64(1 << 60),
"s": int16(1 << 10),
"t": bool(true),
"x": []byte{'b', '1'},
}
queue := "test.issue61-roundtrip"
ch, _ := conn.Channel()
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("Could not declare")
}
msgs, err := ch.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("Could not consume")
}
err = ch.Publish("", queue, false, false, Publishing{Body: []byte("ignored"), Headers: headers})
if err != nil {
t.Fatalf("Could not publish: %v", err)
}
msg, ok := <-msgs
if !ok {
t.Fatalf("Channel closed prematurely likely due to publish exception")
}
for k, v := range headers {
if !reflect.DeepEqual(v, msg.Headers[k]) {
t.Errorf("Round trip header not the same for key %q: expected: %#v, got %#v", k, v, msg.Headers[k])
}
}
}
}
// Declares a queue with the x-message-ttl extension to exercise integer
// serialization.
//
// Relates to https://github.com/Noobygames/amqp/issues/60
//
func TestDeclareArgsXMessageTTL(t *testing.T) {
if conn := integrationConnection(t, "declareTTL"); conn != nil {
defer conn.Close()
ch, _ := conn.Channel()
args := Table{"x-message-ttl": int32(9000000)}
// should not drop the connection
if _, err := ch.QueueDeclare("declareWithTTL", false, true, false, false, args); err != nil {
t.Fatalf("cannot declare with TTL: got: %v", err)
}
}
}
// Sets up the topology where rejected messages will be forwarded
// to a fanout exchange, with a single queue bound.
//
// Relates to https://github.com/Noobygames/amqp/issues/56
//
func TestDeclareArgsRejectToDeadLetterQueue(t *testing.T) {
if conn := integrationConnection(t, "declareArgs"); conn != nil {
defer conn.Close()
ex, q := "declareArgs", "declareArgs-deliveries"
dlex, dlq := ex+"-dead-letter", q+"-dead-letter"
ch, _ := conn.Channel()
if err := ch.ExchangeDeclare(ex, "fanout", false, true, false, false, nil); err != nil {
t.Fatalf("cannot declare %v: got: %v", ex, err)
}
if err := ch.ExchangeDeclare(dlex, "fanout", false, true, false, false, nil); err != nil {
t.Fatalf("cannot declare %v: got: %v", dlex, err)
}
if _, err := ch.QueueDeclare(dlq, false, true, false, false, nil); err != nil {
t.Fatalf("cannot declare %v: got: %v", dlq, err)
}
if err := ch.QueueBind(dlq, "#", dlex, false, nil); err != nil {
t.Fatalf("cannot bind %v to %v: got: %v", dlq, dlex, err)
}
if _, err := ch.QueueDeclare(q, false, true, false, false, Table{
"x-dead-letter-exchange": dlex,
}); err != nil {
t.Fatalf("cannot declare %v with dlq %v: got: %v", q, dlex, err)
}
if err := ch.QueueBind(q, "#", ex, false, nil); err != nil {
t.Fatalf("cannot bind %v: got: %v", ex, err)
}
fails, err := ch.Consume(q, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("cannot consume %v: got: %v", q, err)
}
// Reject everything consumed
go func() {
for d := range fails {
d.Reject(false)
}
}()
// Publish the 'poison'
if err := ch.Publish(ex, q, true, false, Publishing{Body: []byte("ignored")}); err != nil {
t.Fatalf("publishing failed")
}
// spin-get until message arrives on the dead-letter queue with a
// synchronous parse to exercise the array field (x-death) set by the
// server relating to issue-56
for i := 0; i < 10; i++ {
d, got, err := ch.Get(dlq, false)
if !got && err == nil {
continue
} else if err != nil {
t.Fatalf("expected success in parsing reject, got: %v", err)
} else {
// pass if we've parsed an array
if v, ok := d.Headers["x-death"]; ok {
if _, ok := v.([]interface{}); ok {
return
}
}
t.Fatalf("array field x-death expected in the headers, got: %v (%T)", d.Headers, d.Headers["x-death"])
}
}
t.Fatalf("expectd dead-letter after 10 get attempts")
}
}
// https://github.com/Noobygames/amqp/issues/48
func TestDeadlockConsumerIssue48(t *testing.T) {
if conn := integrationConnection(t, "issue48"); conn != nil {
defer conn.Close()
deadline := make(chan bool)
go func() {
select {
case <-time.After(5 * time.Second):
panic("expected to receive 2 deliveries while in an RPC, got a deadlock")
case <-deadline:
// pass
}
}()
ch, err := conn.Channel()
if err != nil {
t.Fatalf("got error on channel.open: %v", err)
}
queue := "test-issue48"
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("expected to declare a queue: %v", err)
}
if err := ch.Confirm(false); err != nil {
t.Fatalf("got error on confirm: %v", err)
}
confirms := ch.NotifyPublish(make(chan Confirmation, 2))
for i := 0; i < cap(confirms); i++ {
// Fill the queue with some new or remaining publishings
ch.Publish("", queue, false, false, Publishing{Body: []byte("")})
}
for i := 0; i < cap(confirms); i++ {
// Wait for them to land on the queue so they'll be delivered on consume
<-confirms
}
// Consuming should send them all on the wire
msgs, err := ch.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("got error on consume: %v", err)
}
// We pop one off the chan, the other is on the wire
<-msgs
// Opening a new channel (any RPC) while another delivery is on the wire
if _, err := conn.Channel(); err != nil {
t.Fatalf("got error on consume: %v", err)
}
// We pop the next off the chan
<-msgs
deadline <- true
}
}
// https://github.com/Noobygames/amqp/issues/46
func TestRepeatedChannelExceptionWithPublishAndMaxProcsIssue46(t *testing.T) {
conn := integrationConnection(t, "issue46")
if conn != nil {
for i := 0; i < 100; i++ {
ch, err := conn.Channel()
if err != nil {
t.Fatalf("expected error only on publish, got error on channel.open: %v", err)
}
for j := 0; j < 10; j++ {
err = ch.Publish("not-existing-exchange", "some-key", false, false, Publishing{Body: []byte("some-data")})
if err, ok := err.(Error); ok {
if err.Code != 504 {
t.Fatalf("expected channel only exception, got: %v", err)
}
}
}
}
}
}
// https://github.com/Noobygames/amqp/issues/43
func TestChannelExceptionWithCloseIssue43(t *testing.T) {
conn := integrationConnection(t, "issue43")
if conn != nil {
go func() {
for err := range conn.NotifyClose(make(chan *Error)) {
t.Log(err.Error())
}
}()
c1, err := conn.Channel()
if err != nil {
panic(err)
}
go func() {
for err := range c1.NotifyClose(make(chan *Error)) {
t.Log("Channel1 Close: " + err.Error())
}
}()
c2, err := conn.Channel()
if err != nil {
panic(err)
}
go func() {
for err := range c2.NotifyClose(make(chan *Error)) {
t.Log("Channel2 Close: " + err.Error())
}
}()
// Cause an asynchronous channel exception causing the server
// to send a "channel.close" method either before or after the next
// asynchronous method.
err = c1.Publish("nonexisting-exchange", "", false, false, Publishing{})
if err != nil {
panic(err)
}
// Receive or send the channel close method, the channel shuts down
// but this expects a channel.close-ok to be received.
c1.Close()
// This ensures that the 2nd channel is unaffected by the channel exception
// on channel 1.
err = c2.ExchangeDeclare("test-channel-still-exists", "direct", false, true, false, false, nil)
if err != nil {
panic(err)
}
}
}
// https://github.com/Noobygames/amqp/issues/7
func TestCorruptedMessageIssue7(t *testing.T) {
messageCount := 1024
c1 := integrationConnection(t, "")
c2 := integrationConnection(t, "")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
pub, err := c1.Channel()
if err != nil {
t.Fatalf("Cannot create Channel")
}
sub, err := c2.Channel()
if err != nil {
t.Fatalf("Cannot create Channel")
}
queue := "test-corrupted-message-regression"
if _, err := pub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("Cannot declare")
}
if _, err := sub.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("Cannot declare")
}
msgs, err := sub.Consume(queue, "", false, false, false, false, nil)
if err != nil {
t.Fatalf("Cannot consume")
}
for i := 0; i < messageCount; i++ {
err := pub.Publish("", queue, false, false, Publishing{
Body: generateCrc32Random(7 * i),
})
if err != nil {
t.Fatalf("Failed to publish")
}
}
for i := 0; i < messageCount; i++ {
select {
case msg := <-msgs:
assertMessageCrc32(t, msg.Body, fmt.Sprintf("missed match at %d", i))
case <-time.After(200 * time.Millisecond):
t.Fatalf("Timeout on recv")
}
}
}
}
// https://github.com/Noobygames/amqp/issues/136
func TestChannelCounterShouldNotPanicIssue136(t *testing.T) {
if c := integrationConnection(t, "issue136"); c != nil {
defer c.Close()
var wg sync.WaitGroup
// exceeds 65535 channels
for i := 0; i < 8; i++ {
wg.Add(1)
go func(i int) {
for j := 0; j < 10000; j++ {
ch, err := c.Channel()
if err != nil {
t.Fatalf("failed to create channel %d:%d, got: %v", i, j, err)
}
if err := ch.Close(); err != nil {
t.Fatalf("failed to close channel %d:%d, got: %v", i, j, err)
}
}
wg.Done()
}(i)
}
wg.Wait()
}
}
func TestExchangeDeclarePrecondition(t *testing.T) {
c1 := integrationConnection(t, "exchange-double-declare")
c2 := integrationConnection(t, "exchange-double-declare-cleanup")
if c1 != nil && c2 != nil {
defer c1.Close()
defer c2.Close()
ch, err := c1.Channel()
if err != nil {
t.Fatalf("Create channel")
}
exchange := "test-mismatched-redeclare"
err = ch.ExchangeDeclare(
exchange,
"direct", // exchangeType
false, // durable
true, // auto-delete
false, // internal
false, // noWait
nil, // arguments
)
if err != nil {
t.Fatalf("Could not initially declare exchange")
}
err = ch.ExchangeDeclare(
exchange,
"direct",
true, // different durability
true,
false,
false,
nil,
)
if err == nil {
t.Fatalf("Expected to fail a redeclare with different durability, didn't receive an error")
}
if err, ok := err.(Error); ok {
if err.Code != PreconditionFailed {
t.Fatalf("Expected precondition error")
}
if !err.Recover {
t.Fatalf("Expected to be able to recover")
}
}
ch2, _ := c2.Channel()
if err = ch2.ExchangeDelete(exchange, false, false); err != nil {
t.Fatalf("Could not delete exchange: %v", err)
}
}
}
func TestRabbitMQQueueTTLGet(t *testing.T) {
if c := integrationRabbitMQ(t, "ttl"); c != nil {
defer c.Close()
queue := "test.rabbitmq-message-ttl"
channel, err := c.Channel()
if err != nil {
t.Fatalf("channel: %v", err)
}
if _, err = channel.QueueDeclare(
queue,
false,
true,
false,
false,
Table{"x-message-ttl": int32(100)}, // in ms
); err != nil {
t.Fatalf("queue declare: %s", err)
}
channel.Publish("", queue, false, false, Publishing{Body: []byte("ttl")})
time.Sleep(200 * time.Millisecond)
_, ok, err := channel.Get(queue, false)
if ok {
t.Fatalf("Expected the message to expire in 100ms, it didn't expire after 200ms")
}
if err != nil {
t.Fatalf("Failed to get on ttl queue")
}
}
}
func TestRabbitMQQueueNackMultipleRequeue(t *testing.T) {
if c := integrationRabbitMQ(t, "nack"); c != nil {
defer c.Close()
if c.isCapable("basic.nack") {
queue := "test.rabbitmq-basic-nack"
channel, err := c.Channel()
if err != nil {
t.Fatalf("channel: %v", err)
}
if _, err = channel.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("queue declare: %s", err)
}
channel.Publish("", queue, false, false, Publishing{Body: []byte("1")})
channel.Publish("", queue, false, false, Publishing{Body: []byte("2")})
m1, ok, err := channel.Get(queue, false)
if !ok || err != nil || m1.Body[0] != '1' {
t.Fatalf("could not get message %v", m1)
}
m2, ok, err := channel.Get(queue, false)
if !ok || err != nil || m2.Body[0] != '2' {
t.Fatalf("could not get message %v", m2)
}
m2.Nack(true, true)
m1, ok, err = channel.Get(queue, false)
if !ok || err != nil || m1.Body[0] != '1' {
t.Fatalf("could not get message %v", m1)
}
m2, ok, err = channel.Get(queue, false)
if !ok || err != nil || m2.Body[0] != '2' {
t.Fatalf("could not get message %v", m2)
}
}
}
}
func TestConsumerCancelNotification(t *testing.T) {
c := integrationConnection(t, "consumer cancel notification")
if c != nil {
defer c.Close()
ch, err := c.Channel()
if err != nil {
t.Fatalf("got error on channel.open: %v", err)
}
queue := "test-consumer-cancel-notification"
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
t.Fatalf("expected to declare a queue: %v", err)
}
if _, err := ch.Consume(queue, "", false, false, false, false, nil); err != nil {
t.Fatalf("basic.consume failed")
}
// consumer cancel notification channel
ccnChan := make(chan string, 1)
ch.NotifyCancel(ccnChan)
if _, err := ch.QueueDelete(queue, false, false, true); err != nil {
t.Fatalf("queue.delete failed: %s", err)
}
select {
case <-ccnChan:
// do nothing
case <-time.After(time.Second * 10):
t.Errorf("basic.cancel wasn't received")
t.Fail()
}
// we don't close ccnChan because channel shutdown
// does it
}
}
func TestConcurrentChannelAndConnectionClose(t *testing.T) {
c := integrationConnection(t, "concurrent channel and connection test")
if c != nil {
ch, err := c.Channel()
if err != nil {
t.Fatalf("got error on channel.open: %v", err)
}
var wg sync.WaitGroup
wg.Add(2)
starter := make(chan struct{})
go func() {
defer wg.Done()
<-starter
c.Close()
}()
go func() {
defer wg.Done()
<-starter
ch.Close()
}()
close(starter)
wg.Wait()
}
}
/*
* Support for integration tests
*/
func integrationURLFromEnv() string {
url := os.Getenv("AMQP_URL")
if url == "" {
url = "amqp://"
}
return url
}
func loggedConnection(t *testing.T, conn *Connection, name string) *Connection {
if name != "" {
conn.conn = &logIO{t, name, conn.conn}
}
return conn
}
// Returns a connection to the AMQP if the AMQP_URL environment
// variable is set and a connection can be established.
func integrationConnection(t *testing.T, name string) *Connection {
conn, err := Dial(integrationURLFromEnv())
if err != nil {
t.Fatalf("cannot dial integration server. Is the rabbitmq-server service running? %s", err)
return nil
}
return loggedConnection(t, conn, name)
}
// Returns a connection, channel and declares a queue when the AMQP_URL is in the environment
func integrationQueue(t *testing.T, name string) (*Connection, *Channel) {
if conn := integrationConnection(t, name); conn != nil {
if channel, err := conn.Channel(); err == nil {
if _, err = channel.QueueDeclare(name, false, true, false, false, nil); err == nil {
return conn, channel
}
}
}
return nil, nil
}
// Delegates to integrationConnection and only returns a connection if the
// product is RabbitMQ
func integrationRabbitMQ(t *testing.T, name string) *Connection {
if conn := integrationConnection(t, "connect"); conn != nil {
if server, ok := conn.Properties["product"]; ok && server == "RabbitMQ" {
return conn
}
}
return nil
}
func assertConsumeBody(t *testing.T, messages <-chan Delivery, want []byte) (msg *Delivery) {
select {
case got := <-messages:
if bytes.Compare(want, got.Body) != 0 {
t.Fatalf("Message body does not match want: %v, got: %v, for: %+v", want, got.Body, got)
}
msg = &got
case <-time.After(200 * time.Millisecond):
t.Fatalf("Timeout waiting for %v", want)
}
return msg
}
// Pulls out the CRC and verifies the remaining content against the CRC
func assertMessageCrc32(t *testing.T, msg []byte, assert string) {
size := binary.BigEndian.Uint32(msg[:4])
crc := crc32.NewIEEE()
crc.Write(msg[8:])
if binary.BigEndian.Uint32(msg[4:8]) != crc.Sum32() {
t.Fatalf("Message does not match CRC: %s", assert)
}
if int(size) != len(msg)-8 {
t.Fatalf("Message does not match size, should=%d, is=%d: %s", size, len(msg)-8, assert)
}
}
// Creates a random body size with a leading 32-bit CRC in network byte order
// that verifies the remaining slice
func generateCrc32Random(size int) []byte {
msg := make([]byte, size+8)
if _, err := io.ReadFull(devrand.Reader, msg); err != nil {
panic(err)
}
crc := crc32.NewIEEE()
crc.Write(msg[8:])
binary.BigEndian.PutUint32(msg[0:4], uint32(size))
binary.BigEndian.PutUint32(msg[4:8], crc.Sum32())
return msg
}
| [
"\"AMQP_URL\"",
"\"AMQP_URL\""
]
| []
| [
"AMQP_URL"
]
| [] | ["AMQP_URL"] | go | 1 | 0 | |
vendor/github.com/golangci/golangci-lint/pkg/commands/root.go | package commands
import (
"fmt"
"os"
"runtime"
"runtime/pprof"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func (e *Executor) persistentPreRun(cmd *cobra.Command, args []string) {
if e.cfg.Run.PrintVersion {
fmt.Fprintf(logutils.StdOut, "golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date)
os.Exit(0)
}
runtime.GOMAXPROCS(e.cfg.Run.Concurrency)
if e.cfg.Run.CPUProfilePath != "" {
f, err := os.Create(e.cfg.Run.CPUProfilePath)
if err != nil {
e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.CPUProfilePath, err)
}
if err := pprof.StartCPUProfile(f); err != nil {
e.log.Fatalf("Can't start CPU profiling: %s", err)
}
}
}
func (e *Executor) persistentPostRun(cmd *cobra.Command, args []string) {
if e.cfg.Run.CPUProfilePath != "" {
pprof.StopCPUProfile()
}
if e.cfg.Run.MemProfilePath != "" {
f, err := os.Create(e.cfg.Run.MemProfilePath)
if err != nil {
e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.MemProfilePath, err)
}
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
e.log.Fatalf("Can't write heap profile: %s", err)
}
}
os.Exit(e.exitCode)
}
func getDefaultConcurrency() int {
if os.Getenv("HELP_RUN") == "1" {
return 8 // to make stable concurrency for README help generating builds
}
return runtime.NumCPU()
}
func (e *Executor) initRoot() {
rootCmd := &cobra.Command{
Use: "golangci-lint",
Short: "golangci-lint is a smart linters runner.",
Long: `Smart, fast linters runner. Run it in cloud for every GitHub pull request on https://golangci.com`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
e.log.Fatalf("Can't run help: %s", err)
}
},
PersistentPreRun: e.persistentPreRun,
PersistentPostRun: e.persistentPostRun,
}
initRootFlagSet(rootCmd.PersistentFlags(), e.cfg, e.needVersionOption())
e.rootCmd = rootCmd
}
func (e *Executor) needVersionOption() bool {
return e.date != ""
}
func initRootFlagSet(fs *pflag.FlagSet, cfg *config.Config, needVersionOption bool) {
fs.BoolVarP(&cfg.Run.IsVerbose, "verbose", "v", false, wh("verbose output"))
var silent bool
fs.BoolVarP(&silent, "silent", "s", false, wh("disables congrats outputs"))
if err := fs.MarkHidden("silent"); err != nil {
panic(err)
}
err := fs.MarkDeprecated("silent",
"now golangci-lint by default is silent: it doesn't print Congrats message")
if err != nil {
panic(err)
}
fs.StringVar(&cfg.Run.CPUProfilePath, "cpu-profile-path", "", wh("Path to CPU profile output file"))
fs.StringVar(&cfg.Run.MemProfilePath, "mem-profile-path", "", wh("Path to memory profile output file"))
fs.IntVarP(&cfg.Run.Concurrency, "concurrency", "j", getDefaultConcurrency(), wh("Concurrency (default NumCPU)"))
if needVersionOption {
fs.BoolVar(&cfg.Run.PrintVersion, "version", false, wh("Print version"))
}
}
| [
"\"HELP_RUN\""
]
| []
| [
"HELP_RUN"
]
| [] | ["HELP_RUN"] | go | 1 | 0 | |
oso/settings/base.py | """
Django settings for oso project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from .social_config import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'base.apps.BaseConfig',
'inventory.apps.InventoryConfig',
'tosp_auth.apps.TospAuthConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oso.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'oso.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travis_ci_test',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT'],
}
}
AUTHENTICATION_BACKENDS = (
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# LOGIN REDIRECT URL
LOGIN_REDIRECT_URL = '/'
| []
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"SECRET_KEY",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "SECRET_KEY", "DB_USER"] | python | 6 | 0 | |
pam.go | package main
import (
"os"
)
type PAMEvent struct {
Username string
RemoteHost string
SessionType string
}
func NewPAMEvent() PAMEvent {
var p PAMEvent
p.Username = os.Getenv("PAM_USER")
p.RemoteHost = os.Getenv("PAM_RHOST")
p.SessionType = os.Getenv("PAM_TYPE")
return p
}
| [
"\"PAM_USER\"",
"\"PAM_RHOST\"",
"\"PAM_TYPE\""
]
| []
| [
"PAM_RHOST",
"PAM_USER",
"PAM_TYPE"
]
| [] | ["PAM_RHOST", "PAM_USER", "PAM_TYPE"] | go | 3 | 0 | |
reinforcement_learning/common/sagemaker_rl/coach_launcher.py | from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters
from rl_coach.agents.policy_gradients_agent import PolicyGradientsAgentParameters
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.base_parameters import VisualizationParameters, TaskParameters, Frameworks
from rl_coach.utils import short_dynamic_import
from rl_coach.core_types import SelectedPhaseOnlyDumpFilter, MaxDumpFilter, RunPhase
import rl_coach.core_types
from rl_coach import logger
from rl_coach.logger import screen
import argparse
import copy
import logging
import os
import sys
import shutil
import glob
import re
from .configuration_list import ConfigurationList
from rl_coach.coach import CoachLauncher
screen.set_use_colors(False) # Simple text logging so it looks good in CloudWatch
class CoachConfigurationList(ConfigurationList):
"""Helper Object for converting CLI arguments (or SageMaker hyperparameters)
into Coach configuration.
"""
# Being security-paranoid and not instantiating any arbitrary string the customer passes in
ALLOWED_TYPES = {
'Frames': rl_coach.core_types.Frames,
'EnvironmentSteps': rl_coach.core_types.EnvironmentSteps,
'EnvironmentEpisodes': rl_coach.core_types.EnvironmentEpisodes,
'TrainingSteps': rl_coach.core_types.TrainingSteps,
'Time': rl_coach.core_types.Time,
}
class SageMakerCoachPresetLauncher(CoachLauncher):
"""Base class for training RL tasks using RL-Coach.
Customers subclass this to define specific kinds of workloads, overriding these methods as needed.
"""
def __init__(self):
super().__init__()
self.hyperparams = None
def get_config_args(self, parser: argparse.ArgumentParser) -> argparse.Namespace:
"""Overrides the default CLI parsing.
Sets the configuration parameters for what a SageMaker run should do.
Note, this does not support the "play" mode.
"""
# first, convert the parser to a Namespace object with all default values.
empty_arg_list = []
args, _ = parser.parse_known_args(args=empty_arg_list)
parser = self.sagemaker_argparser()
sage_args, unknown = parser.parse_known_args()
# Now fill in the args that we care about.
sagemaker_job_name = os.environ.get("sagemaker_job_name", "sagemaker-experiment")
args.experiment_name = logger.get_experiment_name(sagemaker_job_name)
# Override experiment_path used for outputs
args.experiment_path = '/opt/ml/output/intermediate'
rl_coach.logger.experiment_path = '/opt/ml/output/intermediate' # for gifs
args.checkpoint_save_dir = '/opt/ml/output/data/checkpoint'
args.checkpoint_save_secs = 10 # should avoid hardcoding
# onnx for deployment for mxnet (not tensorflow)
save_model = (sage_args.save_model == 1)
backend = os.getenv('COACH_BACKEND', 'tensorflow')
if save_model and backend == "mxnet":
args.export_onnx_graph = True
args.no_summary = True
args.num_workers = sage_args.num_workers
args.framework = Frameworks[backend]
args.preset = sage_args.RLCOACH_PRESET
# args.apply_stop_condition = True # uncomment for old coach behaviour
self.hyperparameters = CoachConfigurationList()
if len(unknown) % 2 == 1:
raise ValueError("Odd number of command-line arguments specified. Key without value.")
for i in range(0, len(unknown), 2):
name = unknown[i]
if name.startswith("--"):
name = name[2:]
else:
raise ValueError("Unknown command-line argument %s" % name)
val = unknown[i+1]
self.map_hyperparameter(name, val)
return args
def map_hyperparameter(self, name, value):
"""This is a good method to override where customers can specify custom shortcuts
for hyperparameters. Default takes everything starting with "rl." and sends it
straight to the graph manager.
"""
if name.startswith("rl."):
self.apply_hyperparameter(name, value)
else:
raise ValueError("Unknown hyperparameter %s" % name)
def apply_hyperparameter(self, name, value):
"""Save this hyperparameter to be applied to the graph_manager object when
it's ready.
"""
print("Applying RL hyperparameter %s=%s" % (name,value))
self.hyperparameters.store(name, value)
def default_preset_name(self):
"""
Sub-classes will typically return a single hard-coded string.
"""
try:
#TODO: remove this after converting all samples.
default_preset = self.DEFAULT_PRESET
screen.warning("Deprecated configuration of default preset. Please implement default_preset_name()")
return default_preset
except:
pass
raise NotImplementedError("Sub-classes must specify the name of the default preset "+
"for this RL problem. This will be the name of a python "+
"file (without .py) that defines a graph_manager variable")
def sagemaker_argparser(self) -> argparse.ArgumentParser:
"""
Expose only the CLI arguments that make sense in the SageMaker context.
"""
parser = argparse.ArgumentParser()
# Arguably this would be cleaner if we copied the config from the base class argparser.
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-p', '--RLCOACH_PRESET',
help="(string) Name of the file with the RLCoach preset",
default=self.default_preset_name(),
type=str)
parser.add_argument('--save_model',
help="(int) Flag to save model artifact after training finish",
default=0,
type=int)
return parser
def path_of_main_launcher(self):
"""
A bit of python magic to find the path of the file that launched the current process.
"""
main_mod = sys.modules['__main__']
try:
launcher_file = os.path.abspath(sys.modules['__main__'].__file__)
return os.path.dirname(launcher_file)
except AttributeError:
# If __main__.__file__ is missing, then we're probably in an interactive python shell
return os.getcwd()
def preset_from_name(self, preset_name):
preset_path = self.path_of_main_launcher()
print("Loading preset %s from %s" % (preset_name, preset_path))
preset_path = os.path.join(self.path_of_main_launcher(),preset_name) + '.py:graph_manager'
graph_manager = short_dynamic_import(preset_path, ignore_module_case=True)
return graph_manager
def get_graph_manager_from_args(self, args):
# First get the graph manager for the customer-specified (or default) preset
graph_manager = self.preset_from_name(args.preset)
# Now override whatever config is specified in hyperparameters.
self.hyperparameters.apply_subset(graph_manager, "rl.")
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
return graph_manager
def _save_tf_model(self):
ckpt_dir = '/opt/ml/output/data/checkpoint'
model_dir = '/opt/ml/model'
import tensorflow as tf # importing tensorflow here so that MXNet docker image is compatible with this file.
# Re-Initialize from the checkpoint so that you will have the latest models up.
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})
# Create a new session with a new tf graph.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer()) # initialize the checkpoint.
# This is the node that will accept the input.
input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
'network_0/observation/observation:0')
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy')
# Save the model as a servable model.
tf.saved_model.simple_save(session=sess,
export_dir='model',
inputs={"observation": input_nodes},
outputs={"policy": output_nodes.outputs[0]})
# Move to the appropriate folder. Don't mind the directory, this just works.
# rl-cart-pole is the name of the model. Remember it.
shutil.move('model/', model_dir + '/model/tf-model/00000001/')
# EASE will pick it up and upload to the right path.
print("Success")
def _save_onnx_model(self):
from .onnx_utils import fix_onnx_model
ckpt_dir = '/opt/ml/output/data/checkpoint'
model_dir = '/opt/ml/model'
# find latest onnx file
# currently done by name, expected to be changed in future release of coach.
glob_pattern = os.path.join(ckpt_dir, '*.onnx')
onnx_files = [file for file in glob.iglob(glob_pattern, recursive=True)]
if len(onnx_files) > 0:
extract_step = lambda string: int(re.search('/(\d*)_Step.*', string, re.IGNORECASE).group(1))
onnx_files.sort(key=extract_step)
latest_onnx_file = onnx_files[-1]
# move to model directory
filepath_from = os.path.abspath(latest_onnx_file)
filepath_to = os.path.join(model_dir, "model.onnx")
shutil.move(filepath_from, filepath_to)
fix_onnx_model(filepath_to)
else:
screen.warning("No ONNX files found in {}".format(ckpt_dir))
@classmethod
def train_main(cls):
"""Entrypoint for training.
Parses command-line arguments and starts training.
"""
trainer = cls()
trainer.launch()
# Create model artifact for model.tar.gz
parser = trainer.sagemaker_argparser()
sage_args, unknown = parser.parse_known_args()
if sage_args.save_model == 1:
backend = os.getenv('COACH_BACKEND', 'tensorflow')
if backend == 'tensorflow':
trainer._save_tf_model()
if backend == 'mxnet':
trainer._save_onnx_model()
class SageMakerCoachLauncher(SageMakerCoachPresetLauncher):
"""
Older version of the launcher that doesn't use preset, but instead effectively has a single preset built in.
"""
def __init__(self):
super().__init__()
screen.warning("DEPRECATION WARNING: Please switch to SageMakerCoachPresetLauncher")
#TODO: Remove this whole class when nobody's using it any more.
def define_environment(self):
return NotImplementedEror("Sub-class must define environment e.g. GymVectorEnvironment(level='your_module:YourClass')")
def get_graph_manager_from_args(self, args):
"""Returns the GraphManager object for coach to use to train by calling improve()
"""
# NOTE: TaskParameters are not configurable at this time.
# Visualization
vis_params = VisualizationParameters()
self.config_visualization(vis_params)
self.hyperparameters.apply_subset(vis_params, "vis_params.")
# Schedule
schedule_params = ScheduleParameters()
self.config_schedule(schedule_params)
self.hyperparameters.apply_subset(schedule_params, "schedule_params.")
# Agent
agent_params = self.define_agent()
self.hyperparameters.apply_subset(agent_params, "agent_params.")
# Environment
env_params = self.define_environment()
self.hyperparameters.apply_subset(env_params, "env_params.")
graph_manager = BasicRLGraphManager(
agent_params=agent_params,
env_params=env_params,
schedule_params=schedule_params,
vis_params=vis_params,
)
return graph_manager
def config_schedule(self, schedule_params):
pass
def define_agent(self):
raise NotImplementedError("Subclass must create define_agent() method which returns an AgentParameters object. e.g.\n" \
" return rl_coach.agents.dqn_agent.DQNAgentParameters()");
def config_visualization(self, vis_params):
vis_params.dump_gifs = True
vis_params.video_dump_methods = [SelectedPhaseOnlyDumpFilter(RunPhase.TEST), MaxDumpFilter()]
vis_params.print_networks_summary = True
return vis_params
| []
| []
| [
"COACH_BACKEND",
"sagemaker_job_name"
]
| [] | ["COACH_BACKEND", "sagemaker_job_name"] | python | 2 | 0 | |
src/base/base.go | // MIT License
//
// Copyright (c) 2020 CADCloud
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package base
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/tls"
"encoding/base64"
"fmt"
"golang.org/x/crypto/bcrypt"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"net/mail"
"net/smtp"
"os"
"strings"
"time"
)
// MinIOServer defines the basic structure to retrieve a user minio daemon
type MinIOServer struct {
URI string
Port string
}
// MaxMinIOServer defines the maximum number of minio daemon per storage server
const MaxMinIOServer = 100
// MinIOServerBasePort defines the TCP port of the first minio daemon (then increments are performed)
const MinIOServerBasePort = 9400
// User define a user entry
type User struct {
Nickname string
Password string
TokenType string
TokenAuth string
TokenSecret string
CreationDate string
Lastlogin string
Email string
Active int
ValidationString string
Ports string
Server string
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/")
var simpleLetters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
var randInit = 0
func randAlphaSlashPlus(n int) string {
if randInit == 0 {
rand.Seed(time.Now().UnixNano())
}
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func randAlpha(n int) string {
if randInit == 0 {
rand.Seed(time.Now().UnixNano())
}
b := make([]rune, n)
for i := range b {
b[i] = simpleLetters[rand.Intn(len(simpleLetters))]
}
return string(b)
}
// GenerateAccountACKLink generates a unique random string used into validation email
func GenerateAccountACKLink(length int) string {
return randAlpha(length)
}
// GenerateAuthToken defines initial random authentication token for minio servers
func GenerateAuthToken(TokenType string, length int) string {
return randAlphaSlashPlus(length)
}
// HashPassword is computing password hash
func HashPassword(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
return string(bytes), err
}
// CheckPasswordHash is returning true if password hash match with string
func CheckPasswordHash(password, hash string) bool {
err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password))
return err == nil
}
// Send some email
var smtpServer = os.Getenv("SMTP_SERVER") // example: smtp.google.com:587
var smtpAccount = os.Getenv("SMTP_ACCOUNT")
var smtpPassword = os.Getenv("SMTP_PASSWORD")
// SendEmail is sending a validation email
func SendEmail(email string, subject string, validationString string) {
servername := smtpServer
host, _, _ := net.SplitHostPort(servername)
// If I have a short login (aka the login do not contain the domain name from the SMTP server)
shortName := strings.Split(smtpAccount, "@")
var from mail.Address
if len(shortName) > 1 {
from = mail.Address{"", smtpAccount}
} else {
from = mail.Address{"", smtpAccount + "@" + host}
}
to := mail.Address{"", email}
subj := subject
body := validationString
// Setup headers
headers := make(map[string]string)
headers["From"] = from.String()
headers["To"] = to.String()
headers["Subject"] = subj
// Setup message
message := ""
for k, v := range headers {
message += fmt.Sprintf("%s: %s\r\n", k, v)
}
message += "\r\n" + body
// Connect to the SMTP Server
auth := smtp.PlainAuth("", smtpAccount, smtpPassword, host)
// TLS config
tlsconfig := &tls.Config{
InsecureSkipVerify: true,
ServerName: host,
}
// uncomment the following line to use a pure SSL connection without STARTTLS
//conn, err := tls.Dial("tcp", servername, tlsconfig)
conn, err := smtp.Dial(servername)
if err != nil {
log.Panic(err)
}
// comment that line to use SSL connection
conn.StartTLS(tlsconfig)
// Auth
if err = conn.Auth(auth); err != nil {
log.Panic(err)
}
// To && From
if err = conn.Mail(from.Address); err != nil {
log.Panic(err)
}
if err = conn.Rcpt(to.Address); err != nil {
log.Panic(err)
}
// Data
w, err := conn.Data()
if err != nil {
log.Panic(err)
}
_, err = w.Write([]byte(message))
if err != nil {
log.Panic(err)
}
err = w.Close()
if err != nil {
log.Panic(err)
}
conn.Quit()
if err != nil {
log.Printf("smtp error: %s", err)
}
}
// Request is genrating an http request based on method parameter and returns associated data's in case of success
func Request(method string, URI string, Path string, Data string, content []byte, query string, Key string, SecretKey string) (*http.Response, error) {
client := &http.Client{}
myDate := time.Now().UTC().Format(http.TimeFormat)
myDate = strings.Replace(myDate, "GMT", "+0000", -1)
var req *http.Request
if content != nil {
req, _ = http.NewRequest(method, URI, bytes.NewReader(content))
} else {
req, _ = http.NewRequest(method, URI, nil)
}
stringToSign := method + "\n\n" + Data + "\n" + myDate + "\n" + Path
mac := hmac.New(sha1.New, []byte(SecretKey))
mac.Write([]byte(stringToSign))
expectedMAC := mac.Sum(nil)
signature := base64.StdEncoding.EncodeToString(expectedMAC)
req.Header.Set("Authorization", "AWS "+Key+":"+signature)
req.Header.Set("Date", myDate)
req.Header.Set("Content-Type", Data)
if len(content) > 0 {
req.ContentLength = int64(len(content))
}
req.URL.RawQuery = query
// That is a new request so let's do it
var response *http.Response
var err error
response, err = client.Do(req)
return response, err
}
// HTTPGetRequest Basic Get reauest
func HTTPGetRequest(request string) string {
resp, err := http.Get(request)
if err != nil {
log.Fatalln(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
return (string(body))
}
// HTTPDeleteRequest basic delete request
func HTTPDeleteRequest(request string) {
client := &http.Client{}
content := []byte{0}
httprequest, err := http.NewRequest("DELETE", request, bytes.NewReader(content))
httprequest.ContentLength = 0
response, err := client.Do(httprequest)
if err != nil {
log.Fatal(err)
} else {
defer response.Body.Close()
_, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
}
}
// HTTPPutRequest basic Put request
func HTTPPutRequest(request string, content []byte, contentType string) string {
print("Running a PUT Request \n")
client := &http.Client{}
httprequest, err := http.NewRequest("PUT", request, bytes.NewReader(content))
httprequest.Header.Set("Content-Type", contentType)
httprequest.ContentLength = int64(len(content))
response, err := client.Do(httprequest)
if err != nil {
log.Fatal(err)
} else {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
return string(contents)
}
return ""
}
// HTTPGetBody retrieve body from an http answer
func HTTPGetBody(r *http.Request) []byte {
buf, _ := ioutil.ReadAll(r.Body)
rdr1 := ioutil.NopCloser(bytes.NewBuffer(buf))
rdr2 := ioutil.NopCloser(bytes.NewBuffer(buf))
b := new(bytes.Buffer)
b.ReadFrom(rdr1)
r.Body = rdr2
return (b.Bytes())
}
| [
"\"SMTP_SERVER\"",
"\"SMTP_ACCOUNT\"",
"\"SMTP_PASSWORD\""
]
| []
| [
"SMTP_ACCOUNT",
"SMTP_SERVER",
"SMTP_PASSWORD"
]
| [] | ["SMTP_ACCOUNT", "SMTP_SERVER", "SMTP_PASSWORD"] | go | 3 | 0 | |
src/main/java/mutualauth/TestJettyHttpClient.java | package mutualauth;
import com.sforce.soap.partner.Connector;
import com.sforce.ws.ConnectorConfig;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import java.io.FileInputStream;
import java.net.URL;
import java.security.KeyStore;
import org.eclipse.jetty.client.HttpClient;
import org.eclipse.jetty.util.ssl.SslContextFactory;
public class TestJettyHttpClient {
private static final String USERNAME = System.getenv("USERNAME");
private static final String PASSWORD = System.getenv("PASSWORD");
private static final String KEYSTORE_PATH = System.getenv("KEYSTORE_PATH");
private static final String KEYSTORE_PASSWORD = System.getenv("KEYSTORE_PASSWORD");
private static final int MUTUAL_AUTHENTICATION_PORT = 8443;
private static final String API_VERSION = "41.0";
public static void main(String[] args) throws Exception {
KeyStore ks = KeyStore.getInstance("PKCS12");
try (FileInputStream fis = new FileInputStream(KEYSTORE_PATH)) {
ks.load(fis, KEYSTORE_PASSWORD.toCharArray());
}
KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
kmf.init(ks, KEYSTORE_PASSWORD.toCharArray());
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(kmf.getKeyManagers(), null, null);
// Login as normal to get instance URL and session token
ConnectorConfig config = new ConnectorConfig();
config.setAuthEndpoint("https://login.salesforce.com/services/Soap/u/" + API_VERSION);
config.setUsername(USERNAME);
config.setPassword(PASSWORD);
// Uncomment for more detail on what's going on!
//config.setTraceMessage(true);
// This will set the session info in config
Connector.newConnection(config);
// Display some current settings
System.out.println("Auth EndPoint: "+config.getAuthEndpoint());
System.out.println("Service EndPoint: "+config.getServiceEndpoint());
System.out.println("Username: "+config.getUsername());
System.out.println("SessionId: "+config.getSessionId());
String instance = new URL(config.getServiceEndpoint()).getHost();
String sessionId = config.getSessionId();
// URL to get a list of REST services
String url = "https://" + instance + ":" + MUTUAL_AUTHENTICATION_PORT
+ "/services/data/v" + API_VERSION;
SslContextFactory sslContextFactory = new SslContextFactory();
sslContextFactory.setKeyStore(ks);
// Need to set password in the SSLContextFactory even though it's set in the KeyStore
sslContextFactory.setKeyStorePassword(KEYSTORE_PASSWORD);
HttpClient httpClient = new HttpClient(sslContextFactory);
httpClient.start();
String response = httpClient.newRequest(url)
.header("Authorization", "OAuth " + sessionId)
.header("X-PrettyPrint", "1")
.send()
.getContentAsString();
System.out.println(response);
httpClient.stop();
}
}
| [
"\"USERNAME\"",
"\"PASSWORD\"",
"\"KEYSTORE_PATH\"",
"\"KEYSTORE_PASSWORD\""
]
| []
| [
"KEYSTORE_PASSWORD",
"USERNAME",
"PASSWORD",
"KEYSTORE_PATH"
]
| [] | ["KEYSTORE_PASSWORD", "USERNAME", "PASSWORD", "KEYSTORE_PATH"] | java | 4 | 0 | |
src/nile/utils/debug.py | """Functions used to help debug a rejected transaction."""
import json
import logging
import os
import re
import subprocess
import time
from nile.common import (
BUILD_DIRECTORY,
DEPLOYMENTS_FILENAME,
GATEWAYS,
RETRY_AFTER_SECONDS,
)
def debug(tx_hash, network, contracts_file=None):
"""Use available contracts to help locate the error in a rejected transaction."""
command = ["starknet", "tx_status", "--hash", tx_hash]
if network == "mainnet":
os.environ["STARKNET_NETWORK"] = "alpha-mainnet"
elif network == "goerli":
os.environ["STARKNET_NETWORK"] = "alpha-goerli"
else:
command.append(f"--feeder_gateway_url={GATEWAYS.get(network)}")
logging.info(
"⏳ Querying the network to check transaction status and identify contracts..."
)
while True:
receipt = json.loads(subprocess.check_output(command))
status = receipt["tx_status"]
if status == "REJECTED":
break
output = f"Transaction status: {status}"
if status.startswith("ACCEPTED"):
logging.info(f"✅ {output}. No error in transaction.")
return
logging.info(f"🕒 {output}. Trying again in a moment...")
time.sleep(RETRY_AFTER_SECONDS)
error_message = receipt["tx_failure_reason"]["error_message"]
addresses = set(
int(address, 16)
for address in re.findall("0x[\\da-f]{1,64}", str(error_message))
)
if not addresses:
logging.warning(
"🛑 The transaction was rejected but no contract address was identified "
"in the error message."
)
logging.info(f"Error message:\n{error_message}")
return error_message
file = contracts_file or f"{network}.{DEPLOYMENTS_FILENAME}"
# contracts_file should already link to compiled contracts and not ABIs
to_contract = (lambda x: x) if contracts_file else _abi_to_build_path
contracts = _locate_error_lines_with_abis(file, addresses, to_contract)
if not contracts:
logging.warning(
"🛑 The transaction was rejected but no contract data is locally "
"available to improve the error message."
)
logging.info(error_message)
return error_message
command += ["--contracts", ",".join(contracts), "--error_message"]
logging.info(f"🧾 Found contracts: {contracts}")
logging.info("⏳ Querying the network with identified contracts...")
output = subprocess.check_output(command)
logging.info(f"🧾 Error message:\n{output.decode()}")
return output
def _abi_to_build_path(filename):
return os.path.join(BUILD_DIRECTORY, os.path.basename(filename))
def _locate_error_lines_with_abis(file, addresses, to_contract):
contracts = []
with open(file) as file_stream:
for line_idx, line in enumerate(file_stream):
try:
line_address, abi, *_ = line.split(":")
except ValueError:
logging.warning(
f"⚠ Skipping misformatted line #{line_idx+1} in {file}."
)
continue
if int(line.split(":")[0], 16) in addresses:
contracts.append(f"{line_address}:{to_contract(abi.rstrip())}")
return contracts
| []
| []
| [
"STARKNET_NETWORK"
]
| [] | ["STARKNET_NETWORK"] | python | 1 | 0 | |
src/main/groovy/com/blackducksoftware/integration/hub/detect/bomtool/docker/DockerProperties.java | /**
* hub-detect
*
* Copyright (C) 2018 Black Duck Software, Inc.
* http://www.blackducksoftware.com/
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.blackducksoftware.integration.hub.detect.bomtool.docker;
import com.blackducksoftware.integration.hub.detect.DetectConfiguration;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
@Component
public class DockerProperties {
@Autowired
DetectConfiguration detectConfiguration;
public void populatePropertiesFile(final File dockerPropertiesFile, final File bomToolOutputDirectory) throws IOException, FileNotFoundException {
final Properties dockerProperties = new Properties();
dockerProperties.setProperty("logging.level.com.blackducksoftware", this.detectConfiguration.getLoggingLevel());
dockerProperties.setProperty("upload.bdio", "false");
dockerProperties.setProperty("no.prompt", "true");
dockerProperties.setProperty("output.path", bomToolOutputDirectory.getAbsolutePath());
dockerProperties.setProperty("output.include.containerfilesystem", "true");
dockerProperties.setProperty("logging.level.com.blackducksoftware", this.detectConfiguration.getLoggingLevel());
dockerProperties.setProperty("phone.home", "false");
for (final String additionalProperty : this.detectConfiguration.getAdditionalDockerPropertyNames()) {
final String dockerKey = getKeyWithoutPrefix(additionalProperty, DetectConfiguration.DOCKER_PROPERTY_PREFIX);
addDockerProperty(dockerProperties, additionalProperty, dockerKey);
}
dockerProperties.store(new FileOutputStream(dockerPropertiesFile), "");
}
public void populateEnvironmentVariables(final Map<String, String> environmentVariables, final String dockerExecutablePath) throws IOException {
String path = System.getenv("PATH");
final File dockerExecutableFile = new File(dockerExecutablePath);
path += File.pathSeparator + dockerExecutableFile.getParentFile().getCanonicalPath();
environmentVariables.put("PATH", path);
environmentVariables.put("DOCKER_INSPECTOR_VERSION", this.detectConfiguration.getDockerInspectorVersion());
final String detectCurlOpts = System.getenv("DETECT_CURL_OPTS");
if (StringUtils.isNotBlank(detectCurlOpts)) {
environmentVariables.put("DOCKER_INSPECTOR_CURL_OPTS", detectCurlOpts);
}
environmentVariables.put("BLACKDUCK_HUB_PROXY_HOST", this.detectConfiguration.getHubProxyHost());
environmentVariables.put("BLACKDUCK_HUB_PROXY_PORT", this.detectConfiguration.getHubProxyPort());
environmentVariables.put("BLACKDUCK_HUB_PROXY_USERNAME", this.detectConfiguration.getHubProxyUsername());
environmentVariables.put("BLACKDUCK_HUB_PROXY_PASSWORD", this.detectConfiguration.getHubProxyPassword());
environmentVariables.put("BLACKDUCK_HUB_PROXY_NTLM_DOMAIN", this.detectConfiguration.getHubProxyNtlmDomain());
environmentVariables.put("BLACKDUCK_HUB_PROXY_NTLM_WORKSTATION", this.detectConfiguration.getHubProxyNtlmWorkstation());
for (final Map.Entry<String, String> environmentProperty : System.getenv().entrySet()) {
final String key = environmentProperty.getKey();
if (key != null && key.startsWith(DetectConfiguration.DOCKER_ENVIRONMENT_PREFIX)) {
environmentVariables.put(getKeyWithoutPrefix(key, DetectConfiguration.DOCKER_ENVIRONMENT_PREFIX), environmentProperty.getValue());
}
}
}
private String getKeyWithoutPrefix(final String key, final String prefix) {
return key.substring(prefix.length());
}
private void addDockerProperty(final Properties dockerProperties, final String key, final String dockerKey) {
final String value = this.detectConfiguration.getDetectProperty(key);
dockerProperties.setProperty(dockerKey, value);
}
}
| [
"\"PATH\"",
"\"DETECT_CURL_OPTS\""
]
| []
| [
"DETECT_CURL_OPTS",
"PATH"
]
| [] | ["DETECT_CURL_OPTS", "PATH"] | java | 2 | 0 | |
clearstack/run_setup.py | #
# Copyright (c) 2015 Intel Corporation
#
# Author: Alberto Murillo <[email protected]>
# Author: Julio Montes <[email protected]>
# Author: Victor Morales <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
from importlib import import_module
from clearstack import utils
from clearstack.controller import Controller
from clearstack.common.util import LOG
def load_plugins():
""" return if plugins already are loaded """
if Controller.get().get_all_plugins():
return
path = "plugins"
base_module = "clearstack.{0}".format(path)
directory = "{0}/{1}".format(os.path.dirname(
os.path.realpath(__file__)), path)
rx_val = r'^[a-zA-Z]+_[0-9]{3}\.py$'
files = [fd for fd in os.listdir(directory) if re.match(rx_val, fd)]
for fd in sorted(files, key=_get_weight):
plugin = import_module("{0}.{1}".format(base_module, fd.split(".")[0]))
Controller.get().add_plugin(plugin)
try:
getattr(plugin, "init_config")()
except AttributeError:
LOG.debug("missing attribute: init_config in %s",
plugin.__file__)
def add_arguments(parser):
load_plugins()
for group in Controller.get().get_all_groups():
for argument in group.get_all_arguments():
parser.add_argument("--{0}".format(argument.cmd_option),
action="store",
dest=argument.conf_name,
help=argument.description,
choices=argument.option_list)
def load_sequences():
load_plugins()
for plugin in Controller.get().get_all_plugins():
try:
getattr(plugin, "init_sequences")()
except AttributeError:
LOG.debug("missing attribute: init_sequences in %s",
plugin.__file__)
def run_all_sequences():
load_sequences()
try:
utils.copy_resources()
except Exception as e:
raise e
try:
Controller.get().run_all_sequences()
except Exception as e:
raise e
finally:
utils.get_logs()
def generate_admin_openrc():
conf = Controller.get().CONF
home = os.getenv('HOME')
with open("{0}/admin-openrc.sh".format(home), "w") as f:
f.write('export OS_PROJECT_DOMAIN_ID=default\n')
f.write('export OS_USER_DOMAIN_ID=default\n')
f.write('export OS_PROJECT_NAME=admin\n')
f.write('export OS_USERNAME="admin"\n')
f.write('export OS_TENANT_NAME="admin"\n')
f.write('export OS_AUTH_URL=http://{0}:35357/v3\n'
.format(conf['CONFIG_CONTROLLER_HOST']))
f.write('export OS_REGION_NAME="{0}"\n'
.format(conf['CONFIG_KEYSTONE_REGION']))
f.write('export OS_PASSWORD={0}\n'
.format(conf['CONFIG_KEYSTONE_ADMIN_PW']))
f.write('export OS_IDENTITY_API_VERSION=3\n')
def _get_weight(item):
tmp = item.split('_')[-1]
tmp = tmp.split('.')[0]
return tmp
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
ipinfo_test.go | package ipinfo_test
import (
"fmt"
"os"
"testing"
"github.com/johnaoss/ipinfo"
)
var token = os.Getenv("IPINFO_TOKEN")
// TestGetRequest asserts we can get the IP info for Google's DNS servers.
func TestGetRequest(t *testing.T) {
client := ipinfo.NewClient(token)
resp, err := client.Info("8.8.8.8")
if err != nil {
t.Fatalf("Failed to get client info: %v", err)
}
if resp.IP != "8.8.8.8" {
t.Errorf("Expected IP to be 8.8.8.8, instead given: %s", resp.IP)
}
t.Logf("%+v\n", resp)
}
// TestUnauthorized asserts we can get the info from the unauthorized API.
func TestUnauthorized(t *testing.T) {
resp, err := ipinfo.Info("8.8.8.8")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
t.Logf("%+v\n", resp)
}
func ExampleInfo() {
resp, _ := ipinfo.Info("8.8.8.8")
fmt.Println(resp.City)
// Output: Mountain View
}
| [
"\"IPINFO_TOKEN\""
]
| []
| [
"IPINFO_TOKEN"
]
| [] | ["IPINFO_TOKEN"] | go | 1 | 0 | |
scripts/ci/get_modified_boards.py | #!/usr/bin/env python3
# A script to generate a list of boards that have changed or added and create an
# arguemnts file for sanitycheck to allow running more tests for those boards.
import sys
import re, os
from email.utils import parseaddr
import sh
import logging
import argparse
if "ZEPHYR_BASE" not in os.environ:
logging.error("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
logger = None
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
def init_logs():
log_lev = os.environ.get('LOG_LEVEL', None)
level = logging.INFO
global logger
if log_lev == "DEBUG":
level = logging.DEBUG
elif log_lev == "ERROR":
level = logging.ERROR
console = logging.StreamHandler()
format = logging.Formatter('%(levelname)-8s: %(message)s')
console.setFormatter(format)
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(level)
logging.debug("Log init completed")
def parse_args():
parser = argparse.ArgumentParser(
description="Generate a sanitycheck argument for for boards "
" that have changed")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
boards = set()
args = parse_args()
if not args.commits:
exit(1)
commit = sh.git("diff","--name-only", args.commits, **sh_special_args)
files = commit.split("\n")
for f in files:
if f.endswith(".rst") or f.endswith(".png") or f.endswith(".jpg"):
continue
p = re.match("^boards\/[^/]+\/([^/]+)\/", f)
if p and p.groups():
boards.add(p.group(1))
if boards:
print("-p\n%s" %("\n-p\n".join(boards)))
if __name__ == "__main__":
main()
| []
| []
| [
"ZEPHYR_BASE",
"LOG_LEVEL"
]
| [] | ["ZEPHYR_BASE", "LOG_LEVEL"] | python | 2 | 0 | |
SerialMonitor/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017-2018 Artur K. Lidtke and Aleksander Lidtke
---------------
Graphical interface program that allows the user to communicate with an
Arduino or other piece of hardware via a serial port.
GUI built with wxFormbuilder 3.5.1 (https://github.com/wxFormBuilder/wxFormBuilder)
To install on Ubuntu Linux:
add-apt-repository ppa:wxformbuilder/release
apt-get install wxformbuilder
---------------
Distributed under the MIT licence:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import SerialMonitor.serialMonitorBaseClasses as baseClasses
import SerialMonitor.commsInterface as commsInterface
import wx, string
import os, sys, time
import serial
import glob
import logging, unicodedata
# Set the module version consistent with pip freeze. Handle exception if didn't
# install with pip
import pkg_resources as pkg
try:
__version__ = pkg.get_distribution("SerialMonitor").version.lstrip('-').rstrip('-')
except:
__version__ = "unknown_version"
class pleaseReconnectDialog(wx.Dialog):
def __init__(self,parent):
""" Tells the user to reconnect to the serial port for the new connection
settings to take effect."""
wx.Dialog.__init__(self,parent,-1,'Please reconnect',size=(300,120))
self.CenterOnScreen(wx.BOTH)
okButton = wx.Button(self, wx.ID_OK, 'OK')
okButton.SetDefault()
text = wx.StaticText(self, -1, 'Please reconnect to the serial port for the changes to take effect.')
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(text, 1, wx.ALIGN_CENTER|wx.TOP, 10)
vbox.Add(okButton, 1, wx.ALIGN_CENTER|wx.BOTTOM, 10)
self.SetSizer(vbox)
class serialDetailsDialog( baseClasses.serialDetailsDialog ):
def __init__(self, parent, currentStopBits, currentParity, currentByteSize):
""" Parent is the parent object, currentStopBits, currentPartiy and
currentByte size are the currently used serial.Serial settings, which
will be selected when the dialog is opened.
"""
# initialise the underlying object
baseClasses.serialDetailsDialog.__init__( self, parent )
# create bespoke fields for holding the vailable choices
self.stopBitsChoices = []
self.parityChoices = []
self.byteSizeChoices = []
# Add the selections to the dropdown menus (defined by the pySerial module).
for stopBit in serial.Serial.STOPBITS:
self.stopBitsChoice.Append(str(stopBit))
self.stopBitsChoices.append(stopBit)
self.stopBitsChoice.SetSelection(self.stopBitsChoices.index(currentStopBits))
for key, val in serial.PARITY_NAMES.items():
self.parityChoice.Append(val)
self.parityChoices.append(key)
self.parityChoice.SetSelection(self.parityChoices.index(currentParity))
for byteSize in serial.Serial.BYTESIZES:
self.byteSizeChoice.Append(str(byteSize))
self.byteSizeChoices.append(byteSize)
self.byteSizeChoice.SetSelection(self.byteSizeChoices.index(currentByteSize))
class serialMonitorGuiMainFrame( baseClasses.mainFrame ):
#============================
# CONSTRUCTOR
#============================
def __init__(self):
""" Create the main frame, deriving from a baseline object which has all the panels, buttons, etc.
already defined. """
# initialise the underlying object
baseClasses.mainFrame.__init__(self, None)
# File logger name.
self.fileLoggerName = None # Overwrite with a file name when user chooses to log to a file.
self.loggingLevel = "ERROR"
# Create a logger for the application.
self.logger = logging.getLogger("SMLog") # It stands for Serial Monitor, right ;)
self.handler = logging.StreamHandler() # Will output to STDERR.
self.logger.setLevel(logging.DEBUG) # Collect all levels in the main logger.
self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.loggingLevel) # Filter logs at the handler level.
# Logging to file will log everything.
self.logger.addHandler(self.handler)
# serial communication
self.portOpen = False # indicates if the serial communication port is open
self.currentPort = 'None' # currently chosen port
self.currentSerialConnection = 0 # holds the serial connection object once it has been initialised
self.serialOutputBuffer = '' # buffer for storing inbound data if it arrives in chunks
# set default values
self.readDelay = int(self.readDelayTxtCtrl.GetValue())
self.BaudRate = int(self.baudRateTxtCtrl.GetValue())
# No raw output so hexOutputCheckbox checkbox won't change anything.
# Disable it not to confuse the users.
self.hexOutputCheckbox.Enable(False)
# Current serial connection details.
self.currentStopBits = serial.STOPBITS_ONE
self.currentParity = serial.PARITY_NONE
self.currentByteSize = serial.EIGHTBITS
# initialise the timing function for receiving the data from the serial port at a specific interval
self.parseOutputsTimer.Start(int(self.readDelay))
# update the ports available at start-up
self.updatePorts(suppressWarn=True)
self.portChoice.SetSelection(0)
self.Layout() # Make sure everything is nicely located in the sizers on startup.
#============================
# EVENT HANDLING FUNCTIONS
#============================
def onClose(self, event):
""" close the serial port before terminating, need to make sure it isn't left hanging """
if self.portOpen:
self.currentSerialConnection.close()
self.logger.info('Disconnected from port before shutdown.')
self.Destroy()
def onSendInput(self, event):
""" pass the message from the txtControl to the message parsing method that
links with the comms protocol. """
self.sendMessage( self.inputTextControl.GetLineText(0) )
self.inputTextControl.Clear()
def onChoseSerialPort(self, event):
""" picks up the newly selected port and attempts to connect to a peripheral device via it """
self.logger.debug('Choosing serial port.')
# ignore the None option
if self.portChoice.GetStringSelection() != 'None':
try:
# don't re-open a working stream
if self.portChoice.GetStringSelection() != self.currentPort:
# close any open ports if present
if self.portOpen:
self.currentSerialConnection.close()
self.currentSerialConnection = serial.Serial(port=self.portChoice.GetStringSelection(),
baudrate=self.BaudRate,
timeout=2,
stopbits=self.currentStopBits,
parity=self.currentParity,
bytesize=self.currentByteSize)
self.logger.debug('Checking {}'.format(self.currentSerialConnection))
if self.checkConnection(): # Try to connnect to the user-selected port.
self.portOpen = True
self.currentPort = self.portChoice.GetStringSelection()
self.logger.info('Connected to port {}'.format(self.currentPort))
# To verify the setting of the serial connection details.
self.logger.debug('baud={},stop bits={},parity={},byte size={}'.format(
self.currentSerialConnection.baudrate,
self.currentSerialConnection.stopbits,
self.currentSerialConnection.parity,
self.currentSerialConnection.bytesize,))
else: # Something's wrong, couldn't connect.
wx.MessageBox('Cannot connect to port {}.'.format(
self.portChoice.GetStringSelection()), 'Error',
wx.OK | wx.ICON_ERROR)
self.logger.error('Could not connect to port {}'.format(
self.portChoice.GetStringSelection()))
self.currentSerialConnection = 0
self.portOpen = False
self.updatePorts()
self.portChoice.SetSelection(0) # Go back to 'None' selection.
except BaseException as unknonwError:
wx.MessageBox('Unknown problem occurred while establishing connection using the chosen port!', 'Error',
wx.OK | wx.ICON_ERROR)
self.currentSerialConnection = 0
self.portOpen = False
self.updatePorts()
self.portChoice.SetSelection(0) # Go back to 'None' selection.
self.logger.error('Failed to connect to a port due to {}.'.format(unknonwError))
# if None is chosen then close the current port
else:
self.disconnect()
def onUpdatePorts(self, event):
""" call the update ports method - need a wrapper to be able to call it during initialisation """
self.logger.debug('Attempting to update avaialble ports.')
self.updatePorts()
self.Layout() # makes sure the choice dropdown is big enough to fit all the choice options
def onDisconnect(self, event):
""" Call the disconnect method """
self.disconnect()
def onParseOutputs(self, event):
""" Get information from the data received via the serial port, if there is anything available """
self.parseOutputs()
def onUpdateBaudRate(self, event):
""" Update the Baud rate but do not restart the connection; the change will take effect
when the next connection gets established """
# attempt to retrieve the entire contenst of the txtCtrl. If they are
# an int, use them. otherwise, revert back to the old value and let the
# user figure out they're making a mistake
self.logger.debug('Attempting to update baud rate.')
try:
newValue = int(self.baudRateTxtCtrl.GetValue())
self.BaudRate = newValue
self.notifyToReconnect() # Some people are confused about how this works.
except ValueError as ve:
self.baudRateTxtCtrl.SetValue("{:d}".format(self.BaudRate))
self.logger.error('ValueError while updating read delay: {}'.format(ve))
wx.MessageBox('Please specify integer baud rate','Incorrect baud rate',
wx.OK | wx.ICON_WARNING)
def onUpdateReadDelay(self, event):
""" Update the rate at which outputs are being read from the serial port
and restart the timer for the changes to take effect """
self.logger.debug('Attempting to update read delay.')
try:
newValue = int(self.readDelayTxtCtrl.GetValue())
self.readDelay = newValue
self.parseOutputsTimer.Start(int(self.readDelay))
self.logger.info('Changed read delay to {} ms.'.format(self.readDelay))
except ValueError as ve:
self.readDelayTxtCtrl.SetValue("{:d}".format(self.readDelay))
self.logger.error('ValueError while updating read delay: {}'.format(ve))
def onClearConsole(self, event):
""" Clear the output/input console """
self.logger.debug('Console cleared.')
self.logFileTextControl.Clear()
def onToggleLogFile(self, event):
""" Open a log file if none is active, or close the existing one. """
self.logger.debug('Attempting to open a log file.')
if self.fileLoggerName is None:
fileDialog = wx.FileDialog(self, "Choose log file", os.getcwd(),
time.strftime("%Y%m%d%H%M%S_SM.log"),
"Log files (*.log)|*.log",
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
fileDialog.ShowModal() # Wait for response.
self.fileLoggerName = fileDialog.GetPath() # User-chosen log file.
fileHandler = logging.FileHandler(self.fileLoggerName)
fileHandler.setFormatter(self.formatter) # Default log formatter.
self.logger.addHandler(fileHandler) # Already logs to STDERR, now also the file.
else:
dlg=wx.MessageDialog(self, "Stop logging?", "Stop", wx.YES_NO|wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES: # Avoid accidental log termination.
# Remove the file handler from the logger.
for hdlr in self.logger.handlers:
if isinstance(hdlr, logging.FileHandler): # Only one file handler foreseen.
self.logger.removeHandler(hdlr)
self.fileLoggerName = None # Reset.
else: # The checkbox should still be checked if we don't stop logging.
self.fileLogCheckbox.SetValue(True)
def onRawOutputTicked(self, event):
""" Raw output checkbox status defines whether hex output can also be
enabled or not. Grey it out when it won't affect the program not to
confuse the users. """
self.logger.debug('Raw output ticked: {}. Current raw output state: {}.'.format(
event.IsChecked(),self.hexOutputCheckbox.GetValue()))
if event.IsChecked(): # Hex output can now be enabled.
self.hexOutputCheckbox.Enable(True)
else: # Now hex output won't change anything.
self.hexOutputCheckbox.Enable(False) # Grey it out.
# Upon re-enabling raw output start from the default state of the hex output, too.
self.hexOutputCheckbox.SetValue(False)
def onEditSerialPort( self, event ):
""" Edit the more fine details of the serial connection, like the parity
or the stopbits. """
self.logger.debug('Attempting to edit serial connection details.')
# Main frame is the parent of this.
serialDialog = serialDetailsDialog(self, self.currentStopBits,
self.currentParity, self.currentByteSize)
result = serialDialog.ShowModal() # Need a user to click OK or cancel.
if result == wx.ID_OK: # User selected new settings so change the current defaults.
self.currentStopBits = serialDialog.stopBitsChoices[serialDialog.stopBitsChoice.GetSelection()]
self.currentParity = serialDialog.parityChoices[serialDialog.parityChoice.GetSelection()]
self.currentByteSize = serialDialog.byteSizeChoices[serialDialog.byteSizeChoice.GetSelection()]
self.logger.debug('Changed serial settings to: stop bits={}, parity={}, byte size={}'.format(
self.currentStopBits,self.currentParity,self.currentByteSize))
# Tell the user to reconnect for changes to take effect.
self.notifyToReconnect()
else: # Nothing's changed.
pass
def onLoggingLevelChosen(self, event):
""" Check if the new logging level is different to the currently selected
one and, if so, do an update. """
# Retrieve the new selection.
newLevel = self.loggingLevelChoice.GetStringSelection()
if (newLevel != self.loggingLevel):
self.loggingLevel = newLevel
if self.loggingLevel == "ERROR":
self.handler.setLevel(logging.ERROR)
elif self.loggingLevel == "WARNING":
self.handler.setLevel(logging.WARNING)
elif self.loggingLevel == "INFO":
self.handler.setLevel(logging.INFO)
elif self.loggingLevel == "DEBUG":
self.handler.setLevel(logging.DEBUG)
else:
self.logger.warning("Incorrect logging level {} selected, falling back to DEBUG".format(newLevel))
self.loggingLevel = "DEBUG"
logget.setLevel(logging.DEBUG)
self.loggingLevelChoice.SetStringSelection("DEBUG")
#============================
# OTHER FUNCTIONS
#============================
def updatePorts(self, suppressWarn=False):
""" Checks the list of open serial ports and updates the internal list
and the options shown in the dropdown selection menu.
Args
-----
suppressWarn (bool): whether to suppress showing a wx.MessageBox with
a warning if no active ports are found.
"""
# check what ports are currently open
ports = commsInterface.getActivePorts()
if len(ports) <= 0 and not suppressWarn:
wx.MessageBox('Check connection and port permissions.', 'Found no active ports!',
wx.ICON_ERROR, None)
# save current selection
currentSelection = self.portChoice.GetStringSelection()
# Remove the current options
for i in range(len(self.portChoice.GetStrings())-1, -1, -1):
self.portChoice.Delete(i)
# add the newly found ports
self.portChoice.Append('None')
for port in ports:
self.portChoice.Append(port)
# attempt to return to the last selected port, use None if it's not found
if currentSelection in ports:
for i in range(len(ports)):
if ports[i] == currentSelection:
self.portChoice.SetSelection(i+1)
else:
self.portChoice.SetSelection(0)
self.currentPort = 'None'
def disconnect(self):
""" Drop the current connection with the serial port """
if self.portOpen:
self.currentSerialConnection.close()
self.currentSerialConnection = 0
self.portOpen = False
self.portChoice.SetSelection(0)
self.currentPort = 'None'
self.logger.info('User disconnected from port.')
def checkConnection(self):
""" Checks if there is anything still connected to the port.
Returns
-------
True if `self.currentSerialConnection` port is readable, False otherwise.
"""
if not commsInterface.checkConnection(self.currentSerialConnection):
# handle all internal nuts and bolts related to the connection
# by setting them back to defaults.
self.currentSerialConnection = 0
self.portOpen = False
self.currentPort = 'None'
# let the user know something's wrong
self.logger.error('Lost port connection.')
wx.MessageBox('Port isn\'t readable! Check the connection...', 'Error',
wx.OK | wx.ICON_ERROR)
# check what ports are open once the user has had a chance to react.
self.updatePorts()
return False
else: # All is good.
return True
def writeToTextBox(self, msg, prepend="", colour=(0,0,0)):
""" Log a message inside the main text display window.
Refreshes the position inside the text box, writes the message, and sets
the cursour at the end of the text box to avoid issues with the user
accidentally clicking somewhere and disturbing the output process.
Arguments
---------
msg (string) - string representation of the message to be shown
Optional
---------
prepend (string, default empty) - how to prepend the message, useful
for highlighting e.g. in/out directions, etc.
colour (int tuple, len=3, default=(0,0,0)) - RGB colour of text
"""
# Move the cursor to the end of the box
self.logFileTextControl.MoveEnd()
# Set colour if needed
if colour != (0,0,0):
self.logFileTextControl.BeginTextColour(colour)
# Write the message, with a preamble if desired.
if len(prepend) > 0:
prepend = "{}".format(prepend) # Format the desired preamble nicely.
self.logFileTextControl.WriteText(r'{}{}'.format(prepend, msg))
# Scroll to the end of the box.
self.logFileTextControl.ShowPosition(self.logFileTextControl.GetLastPosition())
# Re-set colour to default but only if it's been changed to avoid WX
# warning 'Debug: Too many EndStyle calls!"'.
if colour != (0,0,0):
self.logFileTextControl.EndTextColour()
def sendMessage(self, msg):
""" Sends a message to the port via the serial conneciton, but also takes
care of any additional operations, such as logging the message.
Arguments
---------
msg (string) - representation of the message to be sent
"""
# make sure the connection has not been broken
if self.portOpen:
if self.checkConnection():
# Send the message; need to pass as a regular string to avoid compatibility
# issues with new wxWidgets which use unicode string formatting
# Convert msg to bytes, then pass to serial.
self.currentSerialConnection.write(msg.encode('utf-8'))
# Log in the main display box in new line and in blue to make sure it stands out.
self.writeToTextBox(msg+'\n',prepend='\nOUT: ',colour=(0,0,255))
# Log the sent command.
self.logger.info(r'OUT: {}'.format(msg))
def parseOutputs(self):
""" Check the serial connection for any inbound information and read it if it's
available. Pass it to the respective handlers accordingly. """
if self.portOpen:
if self.checkConnection():
# # if incoming bytes are waiting to be read from the serial input buffer
# if (self.currentSerialConnection.inWaiting() > 0):
# # Read the bytes.
# dataStr = self.currentSerialConnection.read(
# self.currentSerialConnection.inWaiting() )
#
# # Pass to the buffer and convert from binary array to ASCII
# # and split the output on EOL characters, unless the user
# # desires to see the raw, undecoded output. In such case,
# # don't expect end of line characters and replace unkown bytes
# # with unicode replacement character. Also allow the user
# # to see the hex code of the received bytes, not unicode.
#
# # Processed and (arguably) nicely formatted output.
# if not self.rawOutputCheckbox.GetValue():
# try:
# self.serialOutputBuffer += dataStr.decode('ascii')
#
# # extract any full lines and log them - there can be more than
# # one, depending on the loop frequencies on either side of the
# # serial conneciton
# lines = self.serialOutputBuffer.rpartition("\n")
# if lines[0]:
# for line in lines[0].split("\n"):
# # Write the line to text ctrl and log it.
# self.writeToTextBox(msg+"\n")
# logger.info(line)
#
# # this is where one can pass the outputs to where they need to go
#
# # Keep the remaining output in buffer if there are no EOL characters
# # in it. This is useful if only part of a message was received on last
# # buffer update.
# self.serialOutputBuffer = lines[2]
#
# except UnicodeDecodeError as uderr:
# # Sometimes rubbish gets fed to the serial port.
# # Print the error in the console to let the user know something's not right.
# self.writeToTextBox("!!! ERROR DECODING ASCII STRING !!!\n", colour=(255,0,0))
# # Log the error and the line that caused it.
# logger.warning('UnicodeDecodeError :( with string:\n\t{}'.format(dataStr))
#
# # Raw but not formatted output.
# elif not self.hexOutputCheckbox.GetValue():
# # Just print whatever came out of the serial port.
# # Writing unicode(dataStr) to logFileTextControl will sometimes
# # skip characters (e.g. for 0x00) and the remaining parts of the dataStr.
# # Write one character at the time and repalce invalid bytes manually.
# for c in dataStr:
# try:
# self.writeToTextBox(chr(c))
#
# # c was an unknown byte - replace it.
# except UnicodeDecodeError:
# self.writeToTextBox(u'\uFFFD')
#
# # Log the line that we received.
# logger.info(str(dataStr))
#
# else: # Hex output.
# # Hex encoding of the datStr.
# hexDataStr = ":".join("{}".format(hex(c)) for c in dataStr)
# self.writeToTextBox(hexDataStr)
# logger.info(hexDataStr)
# see in what format to request data
if not self.rawOutputCheckbox.GetValue():
outputFormat = "formatted"
elif not self.hexOutputCheckbox.GetValue():
outputFormat = "raw"
else:
outputFormat = "hex"
# grab the outputs
output, self.serialOutputBuffer, warningSummary = commsInterface.grabPortOutput(
self.currentSerialConnection, self.serialOutputBuffer, outputFormat)
# Log and print received data in the text box. output is a string,
# which is Unicode in Python 3, so no need to cast.
# Only print when there is some message to avoid spamming the logs
# with empty lines.
if len(output) > 0:
# Replace control characters with unicode unknown character.
# Otherwise, the log might stall. Never seen this happen in
# the wx text box but just to be safe.
cleanOutput=''.join(ch if unicodedata.category(ch)[0]!='C' else chr(0xFFFD) for ch in output)
self.writeToTextBox(cleanOutput)
self.logger.info(cleanOutput)
# Log and print (in red) warnings, if there are any.
if len(warningSummary) > 0:
for w in warningSummary:
self.writeToTextBox("{}, check the log!\n".format(w), colour=(255,0,0))
self.logger.warning(warningSummary[w])
def notifyToReconnect(self):
""" Notify the user to reconnect to the serial port for the changes they've
made to take effect by opening a dialog. It'll automatically disappear
after two seconds. """
reconnectInfoDialog = pleaseReconnectDialog(self)
# Automatically close after some time.
wx.CallLater(2000, reconnectInfoDialog.Destroy)
reconnectInfoDialog.ShowModal()
# implements the GUI class to run a wxApp
class serialMonitorGuiApp(wx.App):
def OnInit(self):
#TODO Maybe should call the parent wx.App.OnInit method here?
self.frame = serialMonitorGuiMainFrame()
self.SetTopWindow(self.frame)
self.frame.Show(True)
return True
def main():
""" Used by an entry-point script. """
# need an environment variable on Ubuntu to make the menu bars show correctly
env = os.environ
if not(('UBUNTU_MENUPROXY' in env) and (env['UBUNTU_MENUPROXY'] == 0)):
os.environ["UBUNTU_MENUPROXY"] = "0"
# start the app
app = serialMonitorGuiApp()
app.MainLoop()
if __name__ == "__main__":
main()
| []
| []
| [
"UBUNTU_MENUPROXY"
]
| [] | ["UBUNTU_MENUPROXY"] | python | 1 | 0 | |
cmd/distributed/roles/roles.go | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package roles
import (
"context"
"fmt"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"github.com/milvus-io/milvus/internal/datanode"
"github.com/milvus-io/milvus/internal/dataservice"
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/indexservice"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/masterservice"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proxynode"
"github.com/milvus-io/milvus/internal/querynode"
"github.com/milvus-io/milvus/internal/queryservice"
"github.com/milvus-io/milvus/cmd/distributed/components"
"github.com/milvus-io/milvus/internal/logutil"
"github.com/milvus-io/milvus/internal/msgstream"
"github.com/milvus-io/milvus/internal/util/trace"
)
func newMsgFactory(localMsg bool) msgstream.Factory {
if localMsg {
return msgstream.NewRmsFactory()
}
return msgstream.NewPmsFactory()
}
type MilvusRoles struct {
EnableMaster bool `env:"ENABLE_MASTER"`
EnableProxyNode bool `env:"ENABLE_PROXY_NODE"`
EnableQueryService bool `env:"ENABLE_QUERY_SERVICE"`
EnableQueryNode bool `env:"ENABLE_QUERY_NODE"`
EnableDataService bool `env:"ENABLE_DATA_SERVICE"`
EnableDataNode bool `env:"ENABLE_DATA_NODE"`
EnableIndexService bool `env:"ENABLE_INDEX_SERVICE"`
EnableIndexNode bool `env:"ENABLE_INDEX_NODE"`
EnableMsgStreamService bool `env:"ENABLE_MSGSTREAM_SERVICE"`
}
func (mr *MilvusRoles) EnvValue(env string) bool {
env = strings.ToLower(env)
env = strings.Trim(env, " ")
return env == "1" || env == "true"
}
func (mr *MilvusRoles) Run(localMsg bool) {
if os.Getenv("DEPLOY_MODE") == "STANDALONE" {
closer := trace.InitTracing("standalone")
if closer != nil {
defer closer.Close()
}
}
ctx, cancel := context.WithCancel(context.Background())
if mr.EnableMaster {
var ms *components.MasterService
var wg sync.WaitGroup
wg.Add(1)
go func() {
masterservice.Params.Init()
logutil.SetupLogger(&masterservice.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
ms, err = components.NewMasterService(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = ms.Run()
}()
wg.Wait()
if ms != nil {
defer ms.Stop()
}
metrics.RegisterMaster()
}
if mr.EnableProxyNode {
var pn *components.ProxyNode
var wg sync.WaitGroup
wg.Add(1)
go func() {
proxynode.Params.Init()
logutil.SetupLogger(&proxynode.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
pn, err = components.NewProxyNode(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = pn.Run()
}()
wg.Wait()
if pn != nil {
defer pn.Stop()
}
metrics.RegisterProxyNode()
}
if mr.EnableQueryService {
var qs *components.QueryService
var wg sync.WaitGroup
wg.Add(1)
go func() {
queryservice.Params.Init()
logutil.SetupLogger(&queryservice.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
qs, err = components.NewQueryService(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = qs.Run()
}()
wg.Wait()
if qs != nil {
defer qs.Stop()
}
metrics.RegisterQueryService()
}
if mr.EnableQueryNode {
var qn *components.QueryNode
var wg sync.WaitGroup
wg.Add(1)
go func() {
querynode.Params.Init()
logutil.SetupLogger(&querynode.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
qn, err = components.NewQueryNode(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = qn.Run()
}()
wg.Wait()
if qn != nil {
defer qn.Stop()
}
metrics.RegisterQueryNode()
}
if mr.EnableDataService {
var ds *components.DataService
var wg sync.WaitGroup
wg.Add(1)
go func() {
dataservice.Params.Init()
logutil.SetupLogger(&dataservice.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
ds, err = components.NewDataService(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = ds.Run()
}()
wg.Wait()
if ds != nil {
defer ds.Stop()
}
metrics.RegisterDataService()
}
if mr.EnableDataNode {
var dn *components.DataNode
var wg sync.WaitGroup
wg.Add(1)
go func() {
datanode.Params.Init()
logutil.SetupLogger(&datanode.Params.Log)
defer log.Sync()
factory := newMsgFactory(localMsg)
var err error
dn, err = components.NewDataNode(ctx, factory)
if err != nil {
panic(err)
}
wg.Done()
_ = dn.Run()
}()
wg.Wait()
if dn != nil {
defer dn.Stop()
}
metrics.RegisterDataNode()
}
if mr.EnableIndexService {
var is *components.IndexService
var wg sync.WaitGroup
wg.Add(1)
go func() {
indexservice.Params.Init()
logutil.SetupLogger(&indexservice.Params.Log)
defer log.Sync()
var err error
is, err = components.NewIndexService(ctx)
if err != nil {
panic(err)
}
wg.Done()
_ = is.Run()
}()
wg.Wait()
if is != nil {
defer is.Stop()
}
metrics.RegisterIndexService()
}
if mr.EnableIndexNode {
var in *components.IndexNode
var wg sync.WaitGroup
wg.Add(1)
go func() {
indexnode.Params.Init()
logutil.SetupLogger(&indexnode.Params.Log)
defer log.Sync()
var err error
in, err = components.NewIndexNode(ctx)
if err != nil {
panic(err)
}
wg.Done()
_ = in.Run()
}()
wg.Wait()
if in != nil {
defer in.Stop()
}
metrics.RegisterIndexNode()
}
if mr.EnableMsgStreamService {
var mss *components.MsgStream
var wg sync.WaitGroup
wg.Add(1)
go func() {
var err error
mss, err = components.NewMsgStreamService(ctx)
if err != nil {
panic(err)
}
wg.Done()
_ = mss.Run()
}()
wg.Wait()
if mss != nil {
defer mss.Stop()
}
metrics.RegisterMsgStreamService()
}
metrics.ServeHTTP()
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
sig := <-sc
fmt.Printf("Get %s signal to exit\n", sig.String())
// some deferred Stop has race with context cancel
cancel()
}
| [
"\"DEPLOY_MODE\""
]
| []
| [
"DEPLOY_MODE"
]
| [] | ["DEPLOY_MODE"] | go | 1 | 0 | |
src/cf/terminal/color.go | package terminal
import (
"fmt"
"os"
"regexp"
"runtime"
"code.google.com/p/go.crypto/ssh/terminal"
)
type Color uint
const (
red Color = 31
green = 32
yellow = 33
// blue = 34
magenta = 35
cyan = 36
grey = 37
white = 38
)
var (
colorize func(message string, color Color, bold int) string
OsSupportsColors = runtime.GOOS != "windows"
TerminalSupportsColors = isTerminal()
)
func init() {
ResetColorSupport()
}
func ResetColorSupport() {
if colorsDisabled() {
colorize = func(message string, _ Color, _ int) string {
return message
}
} else {
colorize = func(message string, color Color, bold int) string {
return fmt.Sprintf("\033[%d;%dm%s\033[0m", bold, color, message)
}
}
}
func colorsDisabled() bool {
userDisabledColors := os.Getenv("CF_COLOR") == "false"
return userDisabledColors ||
(os.Getenv("CF_COLOR") != "true" && (!TerminalSupportsColors || !OsSupportsColors))
}
func Colorize(message string, color Color) string {
return colorize(message, color, 0)
}
func ColorizeBold(message string, color Color) string {
return colorize(message, color, 1)
}
var decolorizerRegex = regexp.MustCompile(`\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]`)
func decolorize(message string) string {
return string(decolorizerRegex.ReplaceAll([]byte(message), []byte("")))
}
func HeaderColor(message string) string {
return ColorizeBold(message, white)
}
func CommandColor(message string) string {
return ColorizeBold(message, yellow)
}
func StoppedColor(message string) string {
return ColorizeBold(message, grey)
}
func AdvisoryColor(message string) string {
return ColorizeBold(message, yellow)
}
func CrashedColor(message string) string {
return ColorizeBold(message, red)
}
func FailureColor(message string) string {
return ColorizeBold(message, red)
}
func SuccessColor(message string) string {
return ColorizeBold(message, green)
}
func EntityNameColor(message string) string {
return ColorizeBold(message, cyan)
}
func PromptColor(message string) string {
return ColorizeBold(message, cyan)
}
func TableContentHeaderColor(message string) string {
return ColorizeBold(message, cyan)
}
func WarningColor(message string) string {
return ColorizeBold(message, magenta)
}
func LogStdoutColor(message string) string {
return Colorize(message, white)
}
func LogStderrColor(message string) string {
return Colorize(message, red)
}
func LogAppHeaderColor(message string) string {
return ColorizeBold(message, yellow)
}
func LogSysHeaderColor(message string) string {
return ColorizeBold(message, cyan)
}
func isTerminal() bool {
return terminal.IsTerminal(1)
}
| [
"\"CF_COLOR\"",
"\"CF_COLOR\""
]
| []
| [
"CF_COLOR"
]
| [] | ["CF_COLOR"] | go | 1 | 0 | |
kubos-core/docker.py | #!/usr/bin/env python2
#
# KubOS Core Flight Services
# Copyright (C) 2015 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A convenience script for building and running RIOT executables with docker
#
import argparse
import glob
import os
import shlex
import sys
import subprocess
this_dir = os.path.dirname(os.path.abspath(__file__))
kubos_root = os.path.dirname(this_dir)
pwd = os.environ['PWD']
cmd_relpath = os.path.relpath(pwd, kubos_root)
def machine_config(machine):
cfg = subprocess.check_output('docker-machine config ' + machine,
shell=True)
if not cfg:
return []
return shlex.split(cfg.strip())
def find_elf():
binaries = glob.glob('./bin/native/*.elf')
if len(binaries) == 0:
return None
return os.path.relpath(binaries[0], pwd)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--machine', help='Docker machine config to use')
parser.add_argument('command', help='make|run')
args, extra = parser.parse_known_args()
docker_cmd = ['docker']
if args.machine:
docker_cmd.extend(machine_config(args.machine))
docker_cmd.extend(['run', '-it', '-v', kubos_root + ':/data/riotbuild',
'-w', '/data/riotbuild/' + cmd_relpath, 'riotbuild'])
if args.command == 'make':
docker_cmd.append('make')
docker_cmd.extend(extra)
elif args.command == 'run':
elf_relpath = find_elf()
if not elf_relpath:
parser.error('No ELF binaries found in ./bin/native')
docker_cmd.append(elf_relpath)
docker_cmd.extend(extra)
else:
parser.error('Unknown command: "%s"' % args.command)
print '>', ' '.join(docker_cmd)
os.execvp('docker', docker_cmd)
if __name__ == '__main__':
main()
| []
| []
| [
"PWD"
]
| [] | ["PWD"] | python | 1 | 0 | |
OBB_Train_Station/wsgi.py | """
WSGI config for OBB_Train_Station project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OBB_Train_Station.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.