filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
tracer_test.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package apm_test import ( "bufio" "compress/zlib" "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "runtime" "strconv" "sync" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.elastic.co/apm/v2" "go.elastic.co/apm/v2/apmtest" "go.elastic.co/apm/v2/internal/apmhostutil" "go.elastic.co/apm/v2/internal/apmversion" "go.elastic.co/apm/v2/model" "go.elastic.co/apm/v2/transport" "go.elastic.co/apm/v2/transport/transporttest" ) func TestTracerStats(t *testing.T) { tracer := apmtest.NewDiscardTracer() defer tracer.Close() for i := 0; i < 500; i++ { tracer.StartTransaction("name", "type").End() } tracer.Flush(nil) assert.Equal(t, apm.TracerStats{ TransactionsSent: 500, }, tracer.Stats()) } func TestTracerUserAgent(t *testing.T) { sendRequest := func(serviceVersion string) string { waitc := make(chan string, 1) srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { select { case waitc <- r.UserAgent(): default: } })) defer func() { srv.Close() close(waitc) }() os.Setenv("ELASTIC_APM_SERVER_URL", srv.URL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "apmtest", ServiceVersion: serviceVersion, }) require.NoError(t, err) defer tracer.Close() tracer.StartTransaction("name", "type").End() tracer.Flush(nil) return <-waitc } assert.Equal(t, fmt.Sprintf("apm-agent-go/%s (apmtest)", apmversion.AgentVersion), sendRequest("")) assert.Equal(t, fmt.Sprintf("apm-agent-go/%s (apmtest 1.0.0)", apmversion.AgentVersion), sendRequest("1.0.0")) } func TestTracerClosedSendNonBlocking(t *testing.T) { tracer, err := apm.NewTracer("tracer_testing", "") assert.NoError(t, err) tracer.Close() for i := 0; i < 1001; i++ { tracer.StartTransaction("name", "type").End() } assert.Equal(t, uint64(1), tracer.Stats().TransactionsDropped) } func TestNewTracerNonBlocking(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { <-req.Context().Done() })) defer server.Close() os.Setenv("ELASTIC_APM_SERVER_URL", server.URL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") // NewTracer should not block for any significant amount of time, // even if the server is initially unresponsive. before := time.Now() tracer, err := apm.NewTracer("tracer_testing", "") assert.NoError(t, err) tracer.Close() newTracerTime := time.Since(before) assert.Less(t, int64(newTracerTime), int64(time.Second)) } func TestTracerCloseImmediately(t *testing.T) { tracer, err := apm.NewTracer("tracer_testing", "") assert.NoError(t, err) tracer.Close() } func TestTracerFlushEmpty(t *testing.T) { tracer, err := apm.NewTracer("tracer_testing", "") assert.NoError(t, err) defer tracer.Close() tracer.Flush(nil) } func TestTracerMaxSpans(t *testing.T) { test := func(n int) { t.Run(fmt.Sprint(n), func(t *testing.T) { tracer, r := transporttest.NewRecorderTracer() defer tracer.Close() tracer.SetMaxSpans(n) tx := tracer.StartTransaction("name", "type") defer tx.End() // SetMaxSpans only affects transactions started // after the call. tracer.SetMaxSpans(99) for i := 0; i < n; i++ { span := tx.StartSpan("name", "type", nil) assert.False(t, span.Dropped()) span.End() } span := tx.StartSpan("name", "type", nil) assert.True(t, span.Dropped()) span.End() tracer.Flush(nil) assert.Len(t, r.Payloads().Spans, n) }) } test(0) test(23) } func TestTracerErrors(t *testing.T) { tracer, r := transporttest.NewRecorderTracer() defer tracer.Close() error_ := tracer.NewError(errors.New("zing")) error_.Send() tracer.Flush(nil) payloads := r.Payloads() exception := payloads.Errors[0].Exception stacktrace := exception.Stacktrace assert.Equal(t, "zing", exception.Message) assert.Equal(t, "errors", exception.Module) assert.Equal(t, "errorString", exception.Type) require.NotEmpty(t, stacktrace) assert.Equal(t, "TestTracerErrors", stacktrace[0].Function) } func TestTracerErrorFlushes(t *testing.T) { tracer, recorder := transporttest.NewRecorderTracer() defer tracer.Close() payloads := make(chan transporttest.Payloads, 1) var wg sync.WaitGroup wg.Add(1) done := make(chan struct{}) go func() { defer wg.Done() var last int for { select { case <-time.After(10 * time.Millisecond): p := recorder.Payloads() if n := len(p.Errors) + len(p.Transactions); n > last { last = n payloads <- p } case <-done: return } } }() defer wg.Wait() defer close(done) // Sending a transaction should not cause a request // to be sent immediately. tracer.StartTransaction("name", "type").End() select { case <-time.After(200 * time.Millisecond): case p := <-payloads: t.Fatalf("unexpected payloads: %+v", p) } // Sending an error flushes the request body. tracer.NewError(errors.New("zing")).Send() deadline := time.After(2 * time.Second) for { var p transporttest.Payloads select { case <-deadline: t.Fatalf("timed out waiting for request") case p = <-payloads: } if len(p.Errors) != 0 { assert.Len(t, p.Errors, 1) break } // The transport may not have decoded // the error yet, continue waiting. } } func TestTracerRecovered(t *testing.T) { tracer, r := transporttest.NewRecorderTracer() defer tracer.Close() capturePanic(tracer, "blam") tracer.Flush(nil) payloads := r.Payloads() error0 := payloads.Errors[0] transaction := payloads.Transactions[0] span := payloads.Spans[0] assert.Equal(t, "blam", error0.Exception.Message) assert.Equal(t, transaction.ID, error0.TransactionID) assert.Equal(t, span.ID, error0.ParentID) } func capturePanic(tracer *apm.Tracer, v interface{}) { tx := tracer.StartTransaction("name", "type") defer tx.End() span := tx.StartSpan("name", "type", nil) defer span.End() defer func() { if v := recover(); v != nil { e := tracer.Recovered(v) e.SetSpan(span) e.Send() } }() panic(v) } func TestTracerServiceNameValidation(t *testing.T) { _, err := apm.NewTracer("wot!", "") assert.EqualError(t, err, `invalid service name "wot!": character '!' is not in the allowed set (a-zA-Z0-9 _-)`) } func TestSpanStackTrace(t *testing.T) { tracer, r := transporttest.NewRecorderTracer() defer tracer.Close() tracer.SetSpanFramesMinDuration(10 * time.Millisecond) tx := tracer.StartTransaction("name", "type") s := tx.StartSpan("name", "type", nil) s.Duration = 9 * time.Millisecond s.End() s = tx.StartSpan("name", "type", nil) s.Duration = 10 * time.Millisecond s.End() s = tx.StartSpan("name", "type", nil) s.SetStacktrace(1) s.Duration = 11 * time.Millisecond s.End() tx.End() tracer.Flush(nil) spans := r.Payloads().Spans require.Len(t, spans, 3) // Span 0 took only 9ms, so we don't set its stacktrace. assert.Nil(t, spans[0].Stacktrace) // Span 1 took the required 10ms, so we set its stacktrace. assert.NotNil(t, spans[1].Stacktrace) assert.NotEqual(t, spans[1].Stacktrace[0].Function, "TestSpanStackTrace") // Span 2 took more than the required 10ms, but its stacktrace // was already set; we don't replace it. assert.NotNil(t, spans[2].Stacktrace) assert.Equal(t, spans[2].Stacktrace[0].Function, "TestSpanStackTrace") } func TestTracerRequestSize(t *testing.T) { os.Setenv("ELASTIC_APM_API_REQUEST_SIZE", "1KB") defer os.Unsetenv("ELASTIC_APM_API_REQUEST_SIZE") // Set the request time to some very long duration, // to highlight the fact that the request size is // the cause of request completion. os.Setenv("ELASTIC_APM_API_REQUEST_TIME", "60s") defer os.Unsetenv("ELASTIC_APM_API_REQUEST_TIME") requestHandled := make(chan struct{}, 1) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if req.URL.Path == "/" { return } io.Copy(ioutil.Discard, req.Body) requestHandled <- struct{}{} })) defer server.Close() os.Setenv("ELASTIC_APM_SERVER_URL", server.URL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") httpTransport, err := transport.NewHTTPTransport(transport.HTTPTransportOptions{}) require.NoError(t, err) tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "tracer_testing", Transport: httpTransport, }) require.NoError(t, err) defer tracer.Close() // Send through a bunch of transactions, filling up the API request // size, causing the request to be immediately completed. clientStart := time.Now() for i := 0; i < 500; i++ { tracer.StartTransaction("name", "type").End() // Yield to the tracer for more predictable timing. runtime.Gosched() } <-requestHandled clientEnd := time.Now() assert.Condition(t, func() bool { // Should be considerably less than 10s, which is // considerably less than the configured 60s limit. return clientEnd.Sub(clientStart) < 10*time.Second }) } func TestTracerBufferSize(t *testing.T) { os.Setenv("ELASTIC_APM_API_REQUEST_SIZE", "1KB") os.Setenv("ELASTIC_APM_API_BUFFER_SIZE", "10KB") defer os.Unsetenv("ELASTIC_APM_API_REQUEST_SIZE") defer os.Unsetenv("ELASTIC_APM_API_BUFFER_SIZE") var recorder transporttest.RecorderTransport unblock := make(chan struct{}) tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "transporttest", Transport: blockedTransport{ Transport: &recorder, unblocked: unblock, }, }) require.NoError(t, err) defer tracer.Close() // Send a bunch of transactions, which will be buffered. Because the // buffer cannot hold all of them we should expect to see some of the // older ones discarded. const N = 1000 for i := 0; i < N; i++ { tracer.StartTransaction(fmt.Sprint(i), "type").End() } close(unblock) // allow requests through now for { stats := tracer.Stats() if stats.TransactionsSent+stats.TransactionsDropped == N { require.NotZero(t, stats.TransactionsSent) require.NotZero(t, stats.TransactionsDropped) break } tracer.Flush(nil) } stats := tracer.Stats() p := recorder.Payloads() assert.Equal(t, int(stats.TransactionsSent), len(p.Transactions)) // It's possible that the tracer loop receives the flush request after // all transactions are in the channel buffer, before any individual // transactions make their way through. In most cases we would expect // to see the "0" transaction in the request, but that won't be the // case if the flush comes first. offset := 0 for i, tx := range p.Transactions { if tx.Name != fmt.Sprint(i+offset) { require.Equal(t, 0, offset) n, err := strconv.Atoi(tx.Name) require.NoError(t, err) offset = n - i t.Logf("found gap of %d after first %d transactions", offset, i) } } assert.NotEqual(t, 0, offset) } func TestTracerBodyUnread(t *testing.T) { os.Setenv("ELASTIC_APM_API_REQUEST_SIZE", "1KB") defer os.Unsetenv("ELASTIC_APM_API_REQUEST_SIZE") // Don't consume the request body in the handler; close the connection. var requests int64 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddInt64(&requests, 1) w.Header().Set("Connection", "close") })) defer server.Close() os.Setenv("ELASTIC_APM_SERVER_URL", server.URL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") httpTransport, err := transport.NewHTTPTransport(transport.HTTPTransportOptions{}) require.NoError(t, err) tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "tracer_testing", Transport: httpTransport, }) require.NoError(t, err) defer tracer.Close() for atomic.LoadInt64(&requests) <= 2 { tracer.StartTransaction("name", "type").End() tracer.Flush(nil) } } func TestTracerMetadata(t *testing.T) { tracer, recorder := transporttest.NewRecorderTracer() defer tracer.Close() tracer.StartTransaction("name", "type").End() tracer.Flush(nil) // TODO(axw) check other metadata system, _, _, _ := recorder.Metadata() container, err := apmhostutil.Container() if err != nil { assert.Nil(t, system.Container) } else { require.NotNil(t, system.Container) assert.Equal(t, container, system.Container) } // Cloud metadata is disabled by apmtest by default. assert.Equal(t, "none", os.Getenv("ELASTIC_APM_CLOUD_PROVIDER")) assert.Zero(t, recorder.CloudMetadata()) } func TestTracerKubernetesMetadata(t *testing.T) { t.Run("no-env", func(t *testing.T) { system, _, _, _ := getSubprocessMetadata(t) assert.Nil(t, system.Kubernetes) }) t.Run("namespace-only", func(t *testing.T) { system, _, _, _ := getSubprocessMetadata(t, "KUBERNETES_NAMESPACE=myapp") assert.Equal(t, &model.Kubernetes{ Namespace: "myapp", }, system.Kubernetes) }) t.Run("pod-only", func(t *testing.T) { system, _, _, _ := getSubprocessMetadata(t, "KUBERNETES_POD_NAME=luna", "KUBERNETES_POD_UID=oneone!11") assert.Equal(t, &model.Kubernetes{ Pod: &model.KubernetesPod{ Name: "luna", UID: "oneone!11", }, }, system.Kubernetes) }) t.Run("node-only", func(t *testing.T) { system, _, _, _ := getSubprocessMetadata(t, "KUBERNETES_NODE_NAME=noddy") assert.Equal(t, &model.Kubernetes{ Node: &model.KubernetesNode{ Name: "noddy", }, }, system.Kubernetes) }) } func TestTracerActive(t *testing.T) { tracer, _ := transporttest.NewRecorderTracer() defer tracer.Close() assert.True(t, tracer.Active()) // Kick off calls to tracer.Active concurrently // with the tracer.Close, to test that we ensure // there are no data races. go func() { for i := 0; i < 100; i++ { tracer.Active() } }() } func TestTracerCaptureHeaders(t *testing.T) { tracer, recorder := transporttest.NewRecorderTracer() defer tracer.Close() req, err := http.NewRequest("GET", "http://testing.invalid", nil) require.NoError(t, err) req.Header.Set("foo", "bar") respHeaders := make(http.Header) respHeaders.Set("baz", "qux") for _, enabled := range []bool{false, true} { tracer.SetCaptureHeaders(enabled) tx := tracer.StartTransaction("name", "type") tx.Context.SetHTTPRequest(req) tx.Context.SetHTTPResponseHeaders(respHeaders) tx.Context.SetHTTPStatusCode(202) tx.End() } tracer.Flush(nil) payloads := recorder.Payloads() require.Len(t, payloads.Transactions, 2) for i, enabled := range []bool{false, true} { tx := payloads.Transactions[i] require.NotNil(t, tx.Context.Request) require.NotNil(t, tx.Context.Response) if enabled { assert.NotNil(t, tx.Context.Request.Headers) assert.NotNil(t, tx.Context.Response.Headers) } else { assert.Nil(t, tx.Context.Request.Headers) assert.Nil(t, tx.Context.Response.Headers) } } } func TestTracerDefaultTransport(t *testing.T) { mux := http.NewServeMux() mux.HandleFunc("/intake/v2/events", func(w http.ResponseWriter, r *http.Request) {}) srv := httptest.NewServer(mux) t.Run("valid", func(t *testing.T) { os.Setenv("ELASTIC_APM_SERVER_URL", srv.URL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") tracer, err := apm.NewTracer("", "") require.NoError(t, err) defer tracer.Close() tracer.StartTransaction("name", "type").End() tracer.Flush(nil) assert.Equal(t, apm.TracerStats{TransactionsSent: 1}, tracer.Stats()) }) t.Run("invalid", func(t *testing.T) { os.Setenv("ELASTIC_APM_SERVER_TIMEOUT", "never") defer os.Unsetenv("ELASTIC_APM_SERVER_TIMEOUT") // NewTracer returns errors. tracer, err := apm.NewTracer("", "") require.Error(t, err) assert.EqualError(t, err, "failed to parse ELASTIC_APM_SERVER_TIMEOUT: invalid duration never") // Implicitly created Tracers will have a discard tracer. apm.SetDefaultTracer(nil) tracer = apm.DefaultTracer() tracer.StartTransaction("name", "type").End() tracer.Flush(nil) assert.Equal(t, apm.TracerStats{ Errors: apm.TracerStatsErrors{ SendStream: 1, }, }, tracer.Stats()) }) } func TestTracerUnsampledTransactions(t *testing.T) { newTracer := func(v, remoteV uint32) (*apm.Tracer, *serverVersionRecorderTransport) { transport := serverVersionRecorderTransport{ RecorderTransport: &transporttest.RecorderTransport{}, ServerVersion: v, RemoteServerVersion: remoteV, } tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "transporttest", Transport: &transport, }) require.NoError(t, err) return tracer, &transport } t.Run("drop", func(t *testing.T) { tracer, recorder := newTracer(0, 8) defer tracer.Close() tracer.SetSampler(apm.NewRatioSampler(0.0)) tx := tracer.StartTransaction("tx", "unsampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.Empty(t, txs) }) t.Run("send", func(t *testing.T) { tracer, recorder := newTracer(0, 7) defer tracer.Close() tracer.SetSampler(apm.NewRatioSampler(0.0)) tx := tracer.StartTransaction("tx", "unsampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.NotEmpty(t, txs) assert.Equal(t, txs[0].Type, "unsampled") }) t.Run("send-sampled-7", func(t *testing.T) { tracer, recorder := newTracer(0, 8) defer tracer.Close() tx := tracer.StartTransaction("tx", "sampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.NotEmpty(t, txs) assert.Equal(t, txs[0].Type, "sampled") }) t.Run("send-sampled-8", func(t *testing.T) { tracer, recorder := newTracer(0, 8) defer tracer.Close() tx := tracer.StartTransaction("tx", "sampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.NotEmpty(t, txs) assert.Equal(t, txs[0].Type, "sampled") }) t.Run("send-unimplemented-interface", func(t *testing.T) { tracer, recorder := transporttest.NewRecorderTracer() defer tracer.Close() tracer.SetSampler(apm.NewRatioSampler(0.0)) tx := tracer.StartTransaction("tx", "unsampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.NotEmpty(t, txs) assert.Equal(t, txs[0].Type, "unsampled") }) t.Run("send-onerror", func(t *testing.T) { tracer, recorder := newTracer(0, 0) defer tracer.Close() tracer.SetSampler(apm.NewRatioSampler(0.0)) tx := tracer.StartTransaction("tx", "unsampled") tx.End() tracer.Flush(nil) txs := recorder.Payloads().Transactions require.NotEmpty(t, txs) assert.Equal(t, txs[0].Type, "unsampled") }) } func TestTracerUnsampledTransactionsHTTPTransport(t *testing.T) { newTracer := func(srvURL string) (*apm.Tracer, *transport.HTTPTransport) { os.Setenv("ELASTIC_APM_SERVER_URL", srvURL) defer os.Unsetenv("ELASTIC_APM_SERVER_URL") transport, err := transport.NewHTTPTransport(transport.HTTPTransportOptions{}) require.NoError(t, err) tracer, err := apm.NewTracerOptions(apm.TracerOptions{ ServiceName: "transporttest", Transport: transport, }) require.NoError(t, err) return tracer, transport } type event struct { Tx *model.Transaction `json:"transaction,omitempty"` } countTransactions := func(body io.ReadCloser) uint32 { reader, err := zlib.NewReader(body) require.NoError(t, err) scanner := bufio.NewScanner(reader) var tCount uint32 for scanner.Scan() { var e event json.Unmarshal([]byte(scanner.Text()), &e) assert.NoError(t, err) if e.Tx != nil { tCount++ } } return tCount } intakeHandlerFunc := func(tCounter *uint32) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { defer r.Body.Close() atomic.AddUint32(tCounter, countTransactions(r.Body)) rw.WriteHeader(202) }) } // This handler is used to test for cache invalidation, it will return an // error only once when the number of transactions is 100, so we can test // the cache invalidation. intakeHandlerErr100Func := func(tCounter *uint32) http.Handler { var hasErrored bool return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { defer r.Body.Close() if atomic.LoadUint32(tCounter) == 100 && !hasErrored { hasErrored = true io.Copy(ioutil.Discard, r.Body) http.Error(rw, "error-message", http.StatusInternalServerError) return } atomic.AddUint32(tCounter, countTransactions(r.Body)) rw.WriteHeader(202) }) } rootHandlerFunc := func(v string, rootCounter *uint32) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { // Only handle requests that match the path. if r.URL.Path != "/" { return } rw.WriteHeader(200) rw.Write([]byte(fmt.Sprintf(`{"version":"%s"}`, v))) atomic.AddUint32(rootCounter, 1) }) } generateTx := func(tracer *apm.Tracer) { // Sends 100 unsampled transactions to the tracer. tracer.SetSampler(apm.NewRatioSampler(0.0)) for i := 0; i < 100; i++ { tx := tracer.StartTransaction("tx", "unsampled") tx.End() } // Sends 100 sampled transactions to the tracer. tracer.SetSampler(apm.NewRatioSampler(1.0)) for i := 0; i < 100; i++ { tx := tracer.StartTransaction("tx", "sampled") tx.End() } <-time.After(time.Millisecond) tracer.Flush(nil) } waitMajorServerVersion := func(t *testing.T, transport *transport.HTTPTransport, expected int) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for ctx.Err() == nil { actual := int(transport.MajorServerVersion(ctx, false)) if actual == expected { return } } t.Fatalf("timed out waiting for major server version to become %d", expected) } t.Run("pre-8-sends-all", func(t *testing.T) { var tCounter, rootCounter uint32 mux := http.NewServeMux() mux.Handle("/intake/v2/events", intakeHandlerFunc(&tCounter)) mux.Handle("/", rootHandlerFunc("7.17.0", &rootCounter)) srv := httptest.NewServer(mux) defer srv.Close() tracer, transport := newTracer(srv.URL) waitMajorServerVersion(t, transport, 7) generateTx(tracer) assert.Equal(t, uint32(200), atomic.LoadUint32(&tCounter)) assert.Equal(t, uint32(1), atomic.LoadUint32(&rootCounter)) }) t.Run("post-8-sends-sampled-only", func(t *testing.T) { var tCounter, rootCounter uint32 mux := http.NewServeMux() mux.Handle("/intake/v2/events", intakeHandlerFunc(&tCounter)) mux.Handle("/", rootHandlerFunc("8.0.0", &rootCounter)) srv := httptest.NewServer(mux) defer srv.Close() tracer, transport := newTracer(srv.URL) waitMajorServerVersion(t, transport, 8) generateTx(tracer) assert.Equal(t, uint32(100), atomic.LoadUint32(&tCounter)) assert.Equal(t, uint32(1), atomic.LoadUint32(&rootCounter)) }) t.Run("post-8-sends-sampled-only-after-cache-invalidation-send-all", func(t *testing.T) { // This test case asserts that when the server's major version is >= 8 // only the sampled transactions are sent. After 100 transactions have // been sent to the server, the server will return a 500 error and will // invalidate the cache, causing all transactions (sampled and unsampled) // to be sent, until the version is refreshed. Since it will take 10s // for the version to be refreshed, this test doesn't assert that. var tCounter, rootCounter uint32 mux := http.NewServeMux() mux.Handle("/intake/v2/events", intakeHandlerErr100Func(&tCounter)) mux.Handle("/", rootHandlerFunc("8.0.0", &rootCounter)) srv := httptest.NewServer(mux) defer srv.Close() tracer, transport := newTracer(srv.URL) waitMajorServerVersion(t, transport, 8) for i := 0; i < 3; i++ { generateTx(tracer) } assert.Equal(t, uint32(300), atomic.LoadUint32(&tCounter)) assert.Equal(t, uint32(1), atomic.LoadUint32(&rootCounter)) // Manually refresh the remote version. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() transport.MajorServerVersion(ctx, true) assert.Equal(t, uint32(2), atomic.LoadUint32(&rootCounter)) // Send 100 sampled and 100 unsampled txs. generateTx(tracer) assert.Equal(t, uint32(400), atomic.LoadUint32(&tCounter)) }) t.Run("invalid-version-sends-all", func(t *testing.T) { var tCounter, rootCounter uint32 mux := http.NewServeMux() mux.Handle("/intake/v2/events", intakeHandlerFunc(&tCounter)) mux.Handle("/", rootHandlerFunc("invalid-version", &rootCounter)) srv := httptest.NewServer(mux) defer srv.Close() tracer, _ := newTracer(srv.URL) generateTx(tracer) assert.Equal(t, uint32(200), atomic.LoadUint32(&tCounter)) assert.Equal(t, uint32(1), atomic.LoadUint32(&rootCounter)) }) } type blockedTransport struct { transport.Transport unblocked chan struct{} } func (bt blockedTransport) SendStream(ctx context.Context, r io.Reader) error { select { case <-ctx.Done(): return ctx.Err() case <-bt.unblocked: return bt.Transport.SendStream(ctx, r) } } // serverVersionRecorderTransport wraps a RecorderTransport providing the type serverVersionRecorderTransport struct { *transporttest.RecorderTransport ServerVersion uint32 RemoteServerVersion uint32 } // MajorServerVersion returns the stored version. func (r *serverVersionRecorderTransport) MajorServerVersion(_ context.Context, refreshStale bool) uint32 { if refreshStale { atomic.StoreUint32(&r.ServerVersion, r.RemoteServerVersion) } return atomic.LoadUint32(&r.ServerVersion) }
[ "\"ELASTIC_APM_CLOUD_PROVIDER\"" ]
[]
[ "ELASTIC_APM_CLOUD_PROVIDER" ]
[]
["ELASTIC_APM_CLOUD_PROVIDER"]
go
1
0
create/create.go
package create import ( "fmt" "os" "path/filepath" "strings" "text/template" "time" rice "github.com/GeertJohan/go.rice" "github.com/bitrise-io/go-utils/colorstring" "github.com/bitrise-io/go-utils/command" "github.com/bitrise-io/go-utils/fileutil" "github.com/bitrise-io/go-utils/pathutil" "github.com/bitrise-io/go-utils/templateutil" "github.com/bitrise-io/goinp/goinp" "github.com/bitrise-io/gows/goutil" "github.com/pkg/errors" ) const ( toolkitTypeBash = "bash" toolkitTypeGo = "go" ) // GoToolkitInventoryModel ... type GoToolkitInventoryModel struct { // PackageID: e.g.: github.com/bitrise-io/bitrise PackageID string } // InventoryModel ... type InventoryModel struct { Author string Title string ID string Summary string Description string PrimaryTypeTag string // WebsiteURL string SourceCodeURL string SupportURL string // ToolkitType string GoToolkitInventory GoToolkitInventoryModel // Year int } // Step ... func Step() error { inventoryForCreateStep := InventoryModel{ Author: "", Title: "", ID: "", Summary: "", Description: "", PrimaryTypeTag: "", // WebsiteURL: "", SourceCodeURL: "", SupportURL: "", // ToolkitType: toolkitTypeBash, GoToolkitInventory: GoToolkitInventoryModel{ PackageID: "", }, // Year: time.Now().Year(), } { defaultAuthor := readAuthorFromGitConfig() author, err := goinp.AskForStringWithDefault(colorstring.Green("Who are you / who's the author?"), defaultAuthor) if err != nil { return errors.Wrap(err, "Failed to determine author") } inventoryForCreateStep.Author = author } { title, err := goinp.AskForString(colorstring.Green("What's the title / name of the Step?")) if err != nil { return errors.Wrap(err, "Failed to determine title") } inventoryForCreateStep.Title = title } { id := generateIDFromString(inventoryForCreateStep.Title) printInfoLine("Generated Step ID (from provided Title):", id) inventoryForCreateStep.ID = id } { summary, err := goinp.AskForString(colorstring.Green("Please provide a summary")) if err != nil { return errors.Wrap(err, "Failed to determine summary") } inventoryForCreateStep.Summary = summary } { description, err := goinp.AskForString(colorstring.Green("Please provide a description")) if err != nil { return errors.Wrap(err, "Failed to determine description") } inventoryForCreateStep.Description = description } { // available primary categories / type_tags: // https://github.com/bitrise-io/bitrise/blob/master/_docs/step-development-guideline.md#step-grouping-convention fmt.Println() primaryTypeTag, err := goinp.SelectFromStrings(colorstring.Green("What's the primary category of this Step?"), []string{ "access-control", "artifact-info", "installer", "deploy", "utility", "dependency", "code-sign", "build", "test", "notification", }) if err != nil { return errors.Wrap(err, "Failed to determine primary category") } inventoryForCreateStep.PrimaryTypeTag = primaryTypeTag } { fmt.Println() fmt.Println("Toolkit: the entry/base language of the Step.") fmt.Println("Our recommendation is to use Bash for very simple Steps") fmt.Println(" and for more complex ones use another language, one which we have toolkit support for.") fmt.Println("If you're just getting started with Step development our suggestion is to select Bash,") fmt.Println(" as that's the easiest option. It's possible to convert the step later, if needed.") fmt.Println("Note: Of course even if you select e.g. Bash as the entry language, you can run other scripts from there,") fmt.Println(" so it's possible to write the majority of the step's code in e.g. Ruby,") fmt.Println(" and have an entry Bash script which does nothing else except running the Ruby script.") toolkitType, err := goinp.SelectFromStrings(colorstring.Green("Which toolkit (language) would you like to use?"), []string{ toolkitTypeBash, toolkitTypeGo, }) if err != nil { return errors.Wrap(err, "Failed to determine the toolkit") } inventoryForCreateStep.ToolkitType = toolkitType } { fmt.Println() fmt.Println("Website & source code URL:") isGitHub, err := goinp.AskForBoolWithDefault(colorstring.Green("Will you host the source code on GitHub?"), true) if err != nil { return errors.Wrap(err, "Failed to determine whether source will be hosted on GitHub") } websiteURL := "" supportURL := "" if isGitHub { ghUsername, err := goinp.AskForString(colorstring.Green("What's your GitHub username (user/org where you'll register the step's repository)?")) if err != nil { return errors.Wrap(err, "Failed to determine GitHub username") } websiteURL = fmt.Sprintf("https://github.com/%s/%s", ghUsername, stepDirAndRepoNameFromID(inventoryForCreateStep.ID)) fmt.Println("We'll use", colorstring.Yellow(websiteURL), "as the website/repo URL for this step.") fmt.Println("Please when you create the repository on GitHub for the step") fmt.Println(" create it under the user/org:", colorstring.Yellow(ghUsername)) fmt.Println(" and the name of the repository should be:", colorstring.Yellow(stepDirAndRepoNameFromID(inventoryForCreateStep.ID))) supportURL = websiteURL + "/issues" } else { fmt.Println("To use your step quickly in your bitrise configs, and in case you'll want to share it with others,") fmt.Println(" you'll have to make the source code available on a git hosting service.") fmt.Println("Please create a repository on your favorite source code hosting service,") fmt.Println(" with the repository name:", colorstring.Yellow(stepDirAndRepoNameFromID(inventoryForCreateStep.ID))) fmt.Println("Once created, please copy paste the repo's HTTPS URL.") fmt.Println("If you create it on GitHub the HTTPS URL should look like this:") fmt.Println(" " + colorstring.Yellow("https://github.com/YOUR-GITHUB-USERNAME/"+stepDirAndRepoNameFromID(inventoryForCreateStep.ID))) websiteURL, err = goinp.AskForString(colorstring.Green("What's the step's repo (website) URL?")) if err != nil { return errors.Wrap(err, "Failed to determine the package ID") } supportURL = websiteURL } inventoryForCreateStep.WebsiteURL = websiteURL inventoryForCreateStep.SourceCodeURL = websiteURL inventoryForCreateStep.SupportURL = supportURL } if inventoryForCreateStep.ToolkitType == toolkitTypeGo { if goPkgID, err := goutil.ParsePackageNameFromURL(inventoryForCreateStep.SourceCodeURL); err != nil { fmt.Println() fmt.Println(" [!] Failed to parse Go package ID from URL, error:", err) fmt.Println() fmt.Println("Go programs require a Go package ID, in order to work well with the standard Go tools.") fmt.Println("The package ID looks like this usually: SOURCE-CODE-HOSTING-SERVICE/user/package-name") fmt.Println(" Example: github.com/bitrise-io/bitrise") fmt.Println("If you (plan to) use GitHub for hosting this step's source code,") fmt.Println("the suggested package name for this step is:", colorstring.Yellow("github.com/YOUR-GITHUB-USERNAME/"+stepDirAndRepoNameFromID(inventoryForCreateStep.ID))) userInputGoPkgID, err := goinp.AskForString(colorstring.Green("What should be the Go package ID?")) if err != nil { return errors.Wrap(err, "Failed to determine the package ID") } inventoryForCreateStep.GoToolkitInventory.PackageID = userInputGoPkgID } else { inventoryForCreateStep.GoToolkitInventory.PackageID = goPkgID } } return createStep(inventoryForCreateStep) } func readAuthorFromGitConfig() string { userName, err := command.New("git", "config", "user.name").RunAndReturnTrimmedOutput() if err != nil { return "" } return userName } func generateIDFromString(s string) string { s = strings.TrimSpace(strings.ToLower(s)) s = strings.Map(func(r rune) rune { if (r < 'a' || r > 'z') && (r < '0' || r > '9') { return '-' } return r }, s) return strings.Trim(s, "-") } func printInfoLine(s string, args ...string) { parts := append([]string{colorstring.Yellow(s)}, args...) fmt.Println(strings.Join(parts, " ")) } func printSuccessLine(s string, args ...string) { parts := append([]string{colorstring.Green(s)}, args...) fmt.Println(strings.Join(parts, " ")) } func stepDirAndRepoNameFromID(stepID string) string { return "bitrise-step-" + stepID } func createStep(inventory InventoryModel) error { fmt.Println() // create directory stepDirAbsPth := "" if inventory.ToolkitType == toolkitTypeBash { baseDirPath := stepDirAndRepoNameFromID(inventory.ID) absPth, err := pathutil.AbsPath(baseDirPath) if err != nil { return errors.Wrapf(err, "Failed to get absolute path for step directory (%s)", baseDirPath) } stepDirAbsPth = absPth } else if inventory.ToolkitType == toolkitTypeGo { gopath := os.Getenv("GOPATH") if len(gopath) < 1 { // no GOPATH env set - use "${HOME}/go", which is the default GOPATH since Go 1.8 gopath = filepath.Join(pathutil.UserHomeDir(), "go") } baseDirPath := filepath.Join(gopath, "src", inventory.GoToolkitInventory.PackageID) absPth, err := pathutil.AbsPath(baseDirPath) if err != nil { return errors.Wrapf(err, "Failed to get absolute path for step directory (%s)", baseDirPath) } stepDirAbsPth = absPth } else { return errors.Errorf("Invalid Toolkit Type: %s", inventory.ToolkitType) } printInfoLine("Creating Step directory at:", stepDirAbsPth) if exists, err := pathutil.IsPathExists(stepDirAbsPth); err != nil { return errors.Wrap(err, "Failed to check whether step dir already exists") } else if exists { return errors.Errorf("Directory (%s) already exists!", stepDirAbsPth) } if err := os.MkdirAll(stepDirAbsPth, 0755); err != nil { return errors.Wrap(err, "Failed to create step directory") } // save files from templates for _, aTemplate := range []struct { TemplatePath string FilePath string ToolkitFilter string }{ { TemplatePath: "README.md.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "README.md"), }, { TemplatePath: "LICENSE.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "LICENSE"), }, { TemplatePath: "gitignore.gotemplate", FilePath: filepath.Join(stepDirAbsPth, ".gitignore"), }, { TemplatePath: "step.yml.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "step.yml"), }, { TemplatePath: "bitrise.yml.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "bitrise.yml"), }, { TemplatePath: "bitrise.secrets.yml.gotemplate", FilePath: filepath.Join(stepDirAbsPth, ".bitrise.secrets.yml"), }, // Toolkit: Bash { TemplatePath: "bash/step.sh.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "step.sh"), ToolkitFilter: toolkitTypeBash, }, // Toolkit: Go { TemplatePath: "go/main.go.gotemplate", FilePath: filepath.Join(stepDirAbsPth, "main.go"), ToolkitFilter: toolkitTypeGo, }, } { if aTemplate.ToolkitFilter != "" && aTemplate.ToolkitFilter != inventory.ToolkitType { // skip continue } if err := evaluateTemplateAndWriteToFile(aTemplate.FilePath, aTemplate.TemplatePath, inventory); err != nil { return errors.Wrap(err, "Failed to write template into file") } fmt.Println(" *", colorstring.Green("[OK]"), "created:", aTemplate.FilePath) } fmt.Println() fmt.Println(colorstring.Yellow("Initializing git repository in step directory ...")) if err := initGitRepoAtPath(stepDirAbsPth, inventory.SourceCodeURL); err != nil { return errors.Wrap(err, "Failed to initialize git repository in step directory") } fmt.Println() printSuccessLine("Step is ready!") fmt.Println() fmt.Println("You can find it at:", stepDirAbsPth) fmt.Println() fmt.Println("TIP:", colorstring.Yellow("cd"), "into", colorstring.Yellow(stepDirAbsPth), "and run", colorstring.Yellow("bitrise run test"), "for a quick test drive!") return nil } func initGitRepoAtPath(dirPth string, remoteURL string) error { { cmdGitInit := command.New("git", "init") fmt.Println(" $", cmdGitInit.PrintableCommandArgs()) if cmdLog, err := cmdGitInit.SetDir(dirPth).RunAndReturnTrimmedCombinedOutput(); err != nil { return errors.Wrapf(err, "Failed to 'git init' in directory (%s). Output: %s", dirPth, cmdLog) } } { cmdGitRemoteAdd := command.New("git", "remote", "add", "origin", remoteURL) fmt.Println(" $", cmdGitRemoteAdd.PrintableCommandArgs()) if cmdLog, err := cmdGitRemoteAdd.SetDir(dirPth).RunAndReturnTrimmedCombinedOutput(); err != nil { return errors.Wrapf(err, "Failed to 'git remote add origin %s'. Output: %s", remoteURL, cmdLog) } } return nil } func evaluateTemplate(templatePth string, inventory InventoryModel) (string, error) { templatesBox, err := rice.FindBox("templates") if err != nil { return "", errors.Wrap(err, "Failed to find templates dir/box") } templateContent, err := templatesBox.String(templatePth) if err != nil { return "", errors.Wrapf(err, "Failed to read %s template", templatePth) } evaluatedContent, err := templateutil.EvaluateTemplateStringToString(templateContent, inventory, template.FuncMap{}) if err != nil { return "", errors.Wrapf(err, "Failed to evaluate template %s", templatePth) } return evaluatedContent, nil } func evaluateTemplateAndWriteToFile(filePth, templatePth string, inventory InventoryModel) error { evaluatedContent, err := evaluateTemplate(templatePth, inventory) if err != nil { return errors.Wrap(err, "Failed to evaluate template") } if err := fileutil.WriteStringToFile(filePth, evaluatedContent); err != nil { return errors.Wrapf(err, "Failed to write evaluated template into file (%s)", filePth) } return nil }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
deafwave/cmds/start_funcs.py
import asyncio import os import subprocess import sys from pathlib import Path from typing import Optional from deafwave.daemon.client import DaemonProxy, connect_to_daemon_and_validate from deafwave.util.service_groups import services_for_groups def launch_start_daemon(root_path: Path) -> subprocess.Popen: os.environ["DEAFWAVE_ROOT"] = str(root_path) # TODO: use startupinfo=subprocess.DETACHED_PROCESS on windows deafwave = sys.argv[0] process = subprocess.Popen( f"{deafwave} run_daemon".split(), stdout=subprocess.PIPE) return process async def create_start_daemon_connection(root_path: Path) -> Optional[DaemonProxy]: connection = await connect_to_daemon_and_validate(root_path) if connection is None: print("Starting daemon") # launch a daemon process = launch_start_daemon(root_path) # give the daemon a chance to start up if process.stdout: process.stdout.readline() await asyncio.sleep(1) # it prints "daemon: listening" connection = await connect_to_daemon_and_validate(root_path) if connection: return connection return None async def async_start(root_path: Path, group: str, restart: bool) -> None: daemon = await create_start_daemon_connection(root_path) if daemon is None: print("Failed to create the deafwave daemon") return None for service in services_for_groups(group): if await daemon.is_running(service_name=service): print(f"{service}: ", end="", flush=True) if restart: if not await daemon.is_running(service_name=service): print("not running") elif await daemon.stop_service(service_name=service): print("stopped") else: print("stop failed") else: print("Already running, use `-r` to restart") continue print(f"{service}: ", end="", flush=True) msg = await daemon.start_service(service_name=service) success = msg and msg["data"]["success"] if success is True: print("started") else: error = "no response" if msg: error = msg["data"]["error"] print(f"{service} failed to start. Error: {error}") await daemon.close()
[]
[]
[ "DEAFWAVE_ROOT" ]
[]
["DEAFWAVE_ROOT"]
python
1
0
redash/settings/__init__.py
import os import importlib import ssl from funcy import distinct, remove from flask_talisman import talisman from .helpers import ( fix_assets_path, array_from_string, parse_boolean, int_or_none, set_from_string, add_decode_responses_to_redis_url, cast_int_or_default ) from .organization import DATE_FORMAT, TIME_FORMAT # noqa # _REDIS_URL is the unchanged REDIS_URL we get from env vars, to be used later with RQ _REDIS_URL = os.environ.get( "REDASH_REDIS_URL", os.environ.get("REDIS_URL", "redis://localhost:6379/0") ) # This is the one to use for Redash' own connection: REDIS_URL = add_decode_responses_to_redis_url(_REDIS_URL) PROXIES_COUNT = int(os.environ.get("REDASH_PROXIES_COUNT", "1")) STATSD_HOST = os.environ.get("REDASH_STATSD_HOST", "127.0.0.1") STATSD_PORT = int(os.environ.get("REDASH_STATSD_PORT", "8125")) STATSD_PREFIX = os.environ.get("REDASH_STATSD_PREFIX", "redash") STATSD_USE_TAGS = parse_boolean(os.environ.get("REDASH_STATSD_USE_TAGS", "false")) # Connection settings for Redash's own database (where we store the queries, results, etc) SQLALCHEMY_DATABASE_URI = os.environ.get( "REDASH_DATABASE_URL", os.environ.get("DATABASE_URL", "postgresql:///postgres") ) SQLALCHEMY_MAX_OVERFLOW = int_or_none(os.environ.get("SQLALCHEMY_MAX_OVERFLOW")) SQLALCHEMY_POOL_SIZE = int_or_none(os.environ.get("SQLALCHEMY_POOL_SIZE")) SQLALCHEMY_DISABLE_POOL = parse_boolean( os.environ.get("SQLALCHEMY_DISABLE_POOL", "false") ) SQLALCHEMY_ENABLE_POOL_PRE_PING = parse_boolean( os.environ.get("SQLALCHEMY_ENABLE_POOL_PRE_PING", "false") ) SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = False RQ_REDIS_URL = os.environ.get("RQ_REDIS_URL", _REDIS_URL) # The following enables periodic job (every 5 minutes) of removing unused query results. QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean( os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "true") ) QUERY_RESULTS_CLEANUP_COUNT = int( os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_COUNT", "100") ) QUERY_RESULTS_CLEANUP_MAX_AGE = int( os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7") ) SCHEMAS_REFRESH_SCHEDULE = int(os.environ.get("REDASH_SCHEMAS_REFRESH_SCHEDULE", 30)) AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key") INVITATION_TOKEN_MAX_AGE = int( os.environ.get("REDASH_INVITATION_TOKEN_MAX_AGE", 60 * 60 * 24 * 7) ) # The secret key to use in the Flask app for various cryptographic features SECRET_KEY = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f") # The secret key to use when encrypting data source options DATASOURCE_SECRET_KEY = os.environ.get("REDASH_SECRET_KEY", SECRET_KEY) # Whether and how to redirect non-HTTP requests to HTTPS. Disabled by default. ENFORCE_HTTPS = parse_boolean(os.environ.get("REDASH_ENFORCE_HTTPS", "false")) ENFORCE_HTTPS_PERMANENT = parse_boolean( os.environ.get("REDASH_ENFORCE_HTTPS_PERMANENT", "false") ) # Whether file downloads are enforced or not. ENFORCE_FILE_SAVE = parse_boolean(os.environ.get("REDASH_ENFORCE_FILE_SAVE", "true")) # Whether api calls using the json query runner will block private addresses ENFORCE_PRIVATE_ADDRESS_BLOCK = parse_boolean( os.environ.get("REDASH_ENFORCE_PRIVATE_IP_BLOCK", "true") ) # Whether to use secure cookies by default. COOKIES_SECURE = parse_boolean( os.environ.get("REDASH_COOKIES_SECURE", str(ENFORCE_HTTPS)) ) # Whether the session cookie is set to secure. SESSION_COOKIE_SECURE = parse_boolean( os.environ.get("REDASH_SESSION_COOKIE_SECURE") or str(COOKIES_SECURE) ) # Whether the session cookie is set HttpOnly. SESSION_COOKIE_HTTPONLY = parse_boolean( os.environ.get("REDASH_SESSION_COOKIE_HTTPONLY", "true") ) SESSION_EXPIRY_TIME = int(os.environ.get("REDASH_SESSION_EXPIRY_TIME", 60 * 60 * 6)) # Whether the session cookie is set to secure. REMEMBER_COOKIE_SECURE = parse_boolean( os.environ.get("REDASH_REMEMBER_COOKIE_SECURE") or str(COOKIES_SECURE) ) # Whether the remember cookie is set HttpOnly. REMEMBER_COOKIE_HTTPONLY = parse_boolean( os.environ.get("REDASH_REMEMBER_COOKIE_HTTPONLY", "true") ) # The amount of time before the remember cookie expires. REMEMBER_COOKIE_DURATION = int( os.environ.get("REDASH_REMEMBER_COOKIE_DURATION", 60 * 60 * 24 * 31) ) # Doesn't set X-Frame-Options by default since it's highly dependent # on the specific deployment. # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options # for more information. FRAME_OPTIONS = os.environ.get("REDASH_FRAME_OPTIONS", "deny") FRAME_OPTIONS_ALLOW_FROM = os.environ.get("REDASH_FRAME_OPTIONS_ALLOW_FROM", "") # Whether and how to send Strict-Transport-Security response headers. # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security # for more information. HSTS_ENABLED = parse_boolean( os.environ.get("REDASH_HSTS_ENABLED") or str(ENFORCE_HTTPS) ) HSTS_PRELOAD = parse_boolean(os.environ.get("REDASH_HSTS_PRELOAD", "false")) HSTS_MAX_AGE = int(os.environ.get("REDASH_HSTS_MAX_AGE", talisman.ONE_YEAR_IN_SECS)) HSTS_INCLUDE_SUBDOMAINS = parse_boolean( os.environ.get("REDASH_HSTS_INCLUDE_SUBDOMAINS", "false") ) # Whether and how to send Content-Security-Policy response headers. # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy # for more information. # Overriding this value via an environment variables requires setting it # as a string in the general CSP format of a semicolon separated list of # individual CSP directives, see https://github.com/GoogleCloudPlatform/flask-talisman#example-7 # for more information. E.g.: CONTENT_SECURITY_POLICY = os.environ.get( "REDASH_CONTENT_SECURITY_POLICY", "default-src 'self'; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; font-src 'self' data:; img-src 'self' http: https: data: blob:; object-src 'none'; frame-ancestors 'none'; frame-src redash.io;", ) CONTENT_SECURITY_POLICY_REPORT_URI = os.environ.get( "REDASH_CONTENT_SECURITY_POLICY_REPORT_URI", "" ) CONTENT_SECURITY_POLICY_REPORT_ONLY = parse_boolean( os.environ.get("REDASH_CONTENT_SECURITY_POLICY_REPORT_ONLY", "false") ) CONTENT_SECURITY_POLICY_NONCE_IN = array_from_string( os.environ.get("REDASH_CONTENT_SECURITY_POLICY_NONCE_IN", "") ) # Whether and how to send Referrer-Policy response headers. Defaults to # 'strict-origin-when-cross-origin'. # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy # for more information. REFERRER_POLICY = os.environ.get( "REDASH_REFERRER_POLICY", "strict-origin-when-cross-origin" ) # Whether and how to send Feature-Policy response headers. Defaults to # an empty value. # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy # for more information. FEATURE_POLICY = os.environ.get("REDASH_REFERRER_POLICY", "") MULTI_ORG = parse_boolean(os.environ.get("REDASH_MULTI_ORG", "false")) GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "") GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "") GOOGLE_OAUTH_ENABLED = bool(GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET) # If Redash is behind a proxy it might sometimes receive a X-Forwarded-Proto of HTTP # even if your actual Redash URL scheme is HTTPS. This will cause Flask to build # the SAML redirect URL incorrect thus failing auth. This is especially common if # you're behind a SSL/TCP configured AWS ELB or similar. # This setting will force the URL scheme. SAML_SCHEME_OVERRIDE = os.environ.get("REDASH_SAML_SCHEME_OVERRIDE", "") SAML_ENCRYPTION_PEM_PATH = os.environ.get("REDASH_SAML_ENCRYPTION_PEM_PATH", "") SAML_ENCRYPTION_CERT_PATH = os.environ.get("REDASH_SAML_ENCRYPTION_CERT_PATH", "") SAML_ENCRYPTION_ENABLED = SAML_ENCRYPTION_PEM_PATH != "" and SAML_ENCRYPTION_CERT_PATH != "" # Enables the use of an externally-provided and trusted remote user via an HTTP # header. The "user" must be an email address. # # By default the trusted header is X-Forwarded-Remote-User. You can change # this by setting REDASH_REMOTE_USER_HEADER. # # Enabling this authentication method is *potentially dangerous*, and it is # your responsibility to ensure that only a trusted frontend (usually on the # same server) can talk to the redash backend server, otherwise people will be # able to login as anyone they want by directly talking to the redash backend. # You must *also* ensure that any special header in the original request is # removed or always overwritten by your frontend, otherwise your frontend may # pass it through to the backend unchanged. # # Note that redash will only check the remote user once, upon the first need # for a login, and then set a cookie which keeps the user logged in. Dropping # the remote user header after subsequent requests won't automatically log the # user out. Doing so could be done with further work, but usually it's # unnecessary. # # If you also set the organization setting auth_password_login_enabled to false, # then your authentication will be seamless. Otherwise a link will be presented # on the login page to trigger remote user auth. REMOTE_USER_LOGIN_ENABLED = parse_boolean( os.environ.get("REDASH_REMOTE_USER_LOGIN_ENABLED", "false") ) REMOTE_USER_HEADER = os.environ.get( "REDASH_REMOTE_USER_HEADER", "X-Forwarded-Remote-User" ) # If the organization setting auth_password_login_enabled is not false, then users will still be # able to login through Redash instead of the LDAP server LDAP_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_LDAP_LOGIN_ENABLED", "false")) # Bind LDAP using SSL. Default is False LDAP_SSL = parse_boolean(os.environ.get("REDASH_LDAP_USE_SSL", "false")) # Choose authentication method(SIMPLE, ANONYMOUS or NTLM). Default is SIMPLE LDAP_AUTH_METHOD = os.environ.get("REDASH_LDAP_AUTH_METHOD", "SIMPLE") # The LDAP directory address (ex. ldap://10.0.10.1:389) LDAP_HOST_URL = os.environ.get("REDASH_LDAP_URL", None) # The DN & password used to connect to LDAP to determine the identity of the user being authenticated. # For AD this should be "org\\user". LDAP_BIND_DN = os.environ.get("REDASH_LDAP_BIND_DN", None) LDAP_BIND_DN_PASSWORD = os.environ.get("REDASH_LDAP_BIND_DN_PASSWORD", "") # AD/LDAP email and display name keys LDAP_DISPLAY_NAME_KEY = os.environ.get("REDASH_LDAP_DISPLAY_NAME_KEY", "displayName") LDAP_EMAIL_KEY = os.environ.get("REDASH_LDAP_EMAIL_KEY", "mail") # Prompt that should be shown above username/email field. LDAP_CUSTOM_USERNAME_PROMPT = os.environ.get( "REDASH_LDAP_CUSTOM_USERNAME_PROMPT", "LDAP/AD/SSO username:" ) # LDAP Search DN TEMPLATE (for AD this should be "(sAMAccountName=%(username)s)"") LDAP_SEARCH_TEMPLATE = os.environ.get( "REDASH_LDAP_SEARCH_TEMPLATE", "(cn=%(username)s)" ) # The schema to bind to (ex. cn=users,dc=ORG,dc=local) LDAP_SEARCH_DN = os.environ.get( "REDASH_LDAP_SEARCH_DN", os.environ.get("REDASH_SEARCH_DN") ) STATIC_ASSETS_PATH = fix_assets_path( os.environ.get("REDASH_STATIC_ASSETS_PATH", "../client/dist/") ) FLASK_TEMPLATE_PATH = fix_assets_path( os.environ.get("REDASH_FLASK_TEMPLATE_PATH", STATIC_ASSETS_PATH) ) # Time limit (in seconds) for scheduled queries. Set this to -1 to execute without a time limit. SCHEDULED_QUERY_TIME_LIMIT = int( os.environ.get("REDASH_SCHEDULED_QUERY_TIME_LIMIT", -1) ) # Time limit (in seconds) for adhoc queries. Set this to -1 to execute without a time limit. ADHOC_QUERY_TIME_LIMIT = int(os.environ.get("REDASH_ADHOC_QUERY_TIME_LIMIT", -1)) JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600 * 12)) JOB_DEFAULT_FAILURE_TTL = int( os.environ.get("REDASH_JOB_DEFAULT_FAILURE_TTL", 7 * 24 * 60 * 60) ) LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO") LOG_STDOUT = parse_boolean(os.environ.get("REDASH_LOG_STDOUT", "false")) LOG_PREFIX = os.environ.get("REDASH_LOG_PREFIX", "") LOG_FORMAT = os.environ.get( "REDASH_LOG_FORMAT", LOG_PREFIX + "[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] %(message)s", ) RQ_WORKER_JOB_LOG_FORMAT = os.environ.get( "REDASH_RQ_WORKER_JOB_LOG_FORMAT", ( LOG_PREFIX + "[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] " "job.func_name=%(job_func_name)s " "job.id=%(job_id)s %(message)s" ), ) # Mail settings: MAIL_SERVER = os.environ.get("REDASH_MAIL_SERVER", "localhost") MAIL_PORT = int(os.environ.get("REDASH_MAIL_PORT", 25)) MAIL_USE_TLS = parse_boolean(os.environ.get("REDASH_MAIL_USE_TLS", "false")) MAIL_USE_SSL = parse_boolean(os.environ.get("REDASH_MAIL_USE_SSL", "false")) MAIL_USERNAME = os.environ.get("REDASH_MAIL_USERNAME", None) MAIL_PASSWORD = os.environ.get("REDASH_MAIL_PASSWORD", None) MAIL_DEFAULT_SENDER = os.environ.get("REDASH_MAIL_DEFAULT_SENDER", None) MAIL_MAX_EMAILS = os.environ.get("REDASH_MAIL_MAX_EMAILS", None) MAIL_ASCII_ATTACHMENTS = parse_boolean( os.environ.get("REDASH_MAIL_ASCII_ATTACHMENTS", "false") ) def email_server_is_configured(): return MAIL_DEFAULT_SENDER is not None HOST = os.environ.get("REDASH_HOST", "") SEND_FAILURE_EMAIL_INTERVAL = int( os.environ.get("REDASH_SEND_FAILURE_EMAIL_INTERVAL", 60) ) MAX_FAILURE_REPORTS_PER_QUERY = int( os.environ.get("REDASH_MAX_FAILURE_REPORTS_PER_QUERY", 100) ) ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE = os.environ.get( "REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE", "({state}) {alert_name}" ) # How many requests are allowed per IP to the login page before # being throttled? # See https://flask-limiter.readthedocs.io/en/stable/#rate-limit-string-notation RATELIMIT_ENABLED = parse_boolean(os.environ.get("REDASH_RATELIMIT_ENABLED", "true")) THROTTLE_LOGIN_PATTERN = os.environ.get("REDASH_THROTTLE_LOGIN_PATTERN", "50/hour") LIMITER_STORAGE = os.environ.get("REDASH_LIMITER_STORAGE", REDIS_URL) THROTTLE_PASS_RESET_PATTERN = os.environ.get("REDASH_THROTTLE_PASS_RESET_PATTERN", "10/hour") # CORS settings for the Query Result API (and possibly future external APIs). # In most cases all you need to do is set REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN # to the calling domain (or domains in a comma separated list). ACCESS_CONTROL_ALLOW_ORIGIN = set_from_string( os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN", "") ) ACCESS_CONTROL_ALLOW_CREDENTIALS = parse_boolean( os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_CREDENTIALS", "false") ) ACCESS_CONTROL_REQUEST_METHOD = os.environ.get( "REDASH_CORS_ACCESS_CONTROL_REQUEST_METHOD", "GET, POST, PUT" ) ACCESS_CONTROL_ALLOW_HEADERS = os.environ.get( "REDASH_CORS_ACCESS_CONTROL_ALLOW_HEADERS", "Content-Type" ) # Query Runners default_query_runners = [ "redash.query_runner.athena", "redash.query_runner.big_query", "redash.query_runner.google_spreadsheets", "redash.query_runner.graphite", "redash.query_runner.mongodb", "redash.query_runner.couchbase", "redash.query_runner.mysql", "redash.query_runner.pg", "redash.query_runner.url", "redash.query_runner.influx_db", "redash.query_runner.elasticsearch", "redash.query_runner.amazon_elasticsearch", "redash.query_runner.trino", "redash.query_runner.presto", "redash.query_runner.databricks", "redash.query_runner.hive_ds", "redash.query_runner.impala_ds", "redash.query_runner.vertica", "redash.query_runner.clickhouse", "redash.query_runner.yandex_metrica", "redash.query_runner.rockset", "redash.query_runner.treasuredata", "redash.query_runner.sqlite", "redash.query_runner.dynamodb_sql", "redash.query_runner.mssql", "redash.query_runner.mssql_odbc", "redash.query_runner.memsql_ds", "redash.query_runner.mapd", "redash.query_runner.jql", "redash.query_runner.google_analytics", "redash.query_runner.axibase_tsd", "redash.query_runner.salesforce", "redash.query_runner.query_results", "redash.query_runner.prometheus", "redash.query_runner.qubole", "redash.query_runner.db2", "redash.query_runner.druid", "redash.query_runner.kylin", "redash.query_runner.drill", "redash.query_runner.uptycs", "redash.query_runner.snowflake", "redash.query_runner.phoenix", "redash.query_runner.json_ds", "redash.query_runner.cass", "redash.query_runner.dgraph", "redash.query_runner.azure_kusto", "redash.query_runner.exasol", "redash.query_runner.cloudwatch", "redash.query_runner.cloudwatch_insights", "redash.query_runner.corporate_memory", "redash.query_runner.sparql_endpoint", "redash.query_runner.excel", "redash.query_runner.csv", "redash.query_runner.firebolt" ] enabled_query_runners = array_from_string( os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)) ) additional_query_runners = array_from_string( os.environ.get("REDASH_ADDITIONAL_QUERY_RUNNERS", "") ) disabled_query_runners = array_from_string( os.environ.get("REDASH_DISABLED_QUERY_RUNNERS", "") ) QUERY_RUNNERS = remove( set(disabled_query_runners), distinct(enabled_query_runners + additional_query_runners), ) dynamic_settings = importlib.import_module( os.environ.get("REDASH_DYNAMIC_SETTINGS_MODULE", "redash.settings.dynamic_settings") ) # Destinations default_destinations = [ "redash.destinations.email", "redash.destinations.slack", "redash.destinations.webhook", "redash.destinations.hipchat", "redash.destinations.mattermost", "redash.destinations.chatwork", "redash.destinations.pagerduty", "redash.destinations.hangoutschat", "redash.destinations.lark", ] enabled_destinations = array_from_string( os.environ.get("REDASH_ENABLED_DESTINATIONS", ",".join(default_destinations)) ) additional_destinations = array_from_string( os.environ.get("REDASH_ADDITIONAL_DESTINATIONS", "") ) DESTINATIONS = distinct(enabled_destinations + additional_destinations) EVENT_REPORTING_WEBHOOKS = array_from_string( os.environ.get("REDASH_EVENT_REPORTING_WEBHOOKS", "") ) # Support for Sentry (https://getsentry.com/). Just set your Sentry DSN to enable it: SENTRY_DSN = os.environ.get("REDASH_SENTRY_DSN", "") SENTRY_ENVIRONMENT = os.environ.get("REDASH_SENTRY_ENVIRONMENT") # Client side toggles: ALLOW_SCRIPTS_IN_USER_INPUT = parse_boolean( os.environ.get("REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "false") ) DASHBOARD_REFRESH_INTERVALS = list( map( int, array_from_string( os.environ.get( "REDASH_DASHBOARD_REFRESH_INTERVALS", "60,300,600,1800,3600,43200,86400" ) ), ) ) QUERY_REFRESH_INTERVALS = list( map( int, array_from_string( os.environ.get( "REDASH_QUERY_REFRESH_INTERVALS", "60, 300, 600, 900, 1800, 3600, 7200, 10800, 14400, 18000, 21600, 25200, 28800, 32400, 36000, 39600, 43200, 86400, 604800, 1209600, 2592000", ) ), ) ) PAGE_SIZE = int(os.environ.get("REDASH_PAGE_SIZE", 20)) PAGE_SIZE_OPTIONS = list( map( int, array_from_string(os.environ.get("REDASH_PAGE_SIZE_OPTIONS", "5,10,20,50,100")), ) ) TABLE_CELL_MAX_JSON_SIZE = int(os.environ.get("REDASH_TABLE_CELL_MAX_JSON_SIZE", 50000)) # Features: VERSION_CHECK = parse_boolean(os.environ.get("REDASH_VERSION_CHECK", "true")) FEATURE_DISABLE_REFRESH_QUERIES = parse_boolean( os.environ.get("REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "false") ) FEATURE_SHOW_QUERY_RESULTS_COUNT = parse_boolean( os.environ.get("REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "true") ) FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS = parse_boolean( os.environ.get("REDASH_FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS", "false") ) FEATURE_AUTO_PUBLISH_NAMED_QUERIES = parse_boolean( os.environ.get("REDASH_FEATURE_AUTO_PUBLISH_NAMED_QUERIES", "true") ) FEATURE_EXTENDED_ALERT_OPTIONS = parse_boolean( os.environ.get("REDASH_FEATURE_EXTENDED_ALERT_OPTIONS", "false") ) # BigQuery BIGQUERY_HTTP_TIMEOUT = int(os.environ.get("REDASH_BIGQUERY_HTTP_TIMEOUT", "600")) # Allow Parameters in Embeds # WARNING: Deprecated! # See https://discuss.redash.io/t/support-for-parameters-in-embedded-visualizations/3337 for more details. ALLOW_PARAMETERS_IN_EMBEDS = parse_boolean( os.environ.get("REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "false") ) # Enhance schema fetching SCHEMA_RUN_TABLE_SIZE_CALCULATIONS = parse_boolean( os.environ.get("REDASH_SCHEMA_RUN_TABLE_SIZE_CALCULATIONS", "false") ) # kylin KYLIN_OFFSET = int(os.environ.get("REDASH_KYLIN_OFFSET", 0)) KYLIN_LIMIT = int(os.environ.get("REDASH_KYLIN_LIMIT", 50000)) KYLIN_ACCEPT_PARTIAL = parse_boolean( os.environ.get("REDASH_KYLIN_ACCEPT_PARTIAL", "false") ) # sqlparse SQLPARSE_FORMAT_OPTIONS = { "reindent": parse_boolean(os.environ.get("SQLPARSE_FORMAT_REINDENT", "true")), "keyword_case": os.environ.get("SQLPARSE_FORMAT_KEYWORD_CASE", "upper"), } # requests REQUESTS_ALLOW_REDIRECTS = parse_boolean( os.environ.get("REDASH_REQUESTS_ALLOW_REDIRECTS", "false") ) # Enforces CSRF token validation on API requests. # This is turned off by default to avoid breaking any existing deployments but it is highly recommended to turn this toggle on to prevent CSRF attacks. ENFORCE_CSRF = parse_boolean( os.environ.get("REDASH_ENFORCE_CSRF", "false") ) # Databricks CSRF_TIME_LIMIT = int(os.environ.get("REDASH_CSRF_TIME_LIMIT", 3600 * 6)) # Email blocked domains, use delimiter comma to separated multiple domains BLOCKED_DOMAINS = set_from_string(os.environ.get("REDASH_BLOCKED_DOMAINS", "qq.com"))
[]
[]
[ "REDASH_MAIL_MAX_EMAILS", "REDASH_LDAP_DISPLAY_NAME_KEY", "REDASH_COOKIES_SECURE", "REDASH_CORS_ACCESS_CONTROL_ALLOW_HEADERS", "REDASH_SEND_FAILURE_EMAIL_INTERVAL", "REDASH_LOG_STDOUT", "REDASH_PAGE_SIZE_OPTIONS", "REDASH_ENFORCE_HTTPS_PERMANENT", "SQLALCHEMY_ENABLE_POOL_PRE_PING", "REDASH_INVITATION_TOKEN_MAX_AGE", "REDASH_LDAP_BIND_DN", "REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "REDASH_ADHOC_QUERY_TIME_LIMIT", "REDASH_THROTTLE_PASS_RESET_PATTERN", "REDASH_GOOGLE_CLIENT_SECRET", "REDASH_RATELIMIT_ENABLED", "REDASH_LIMITER_STORAGE", "REDASH_REMEMBER_COOKIE_HTTPONLY", "REDASH_HSTS_MAX_AGE", "SQLPARSE_FORMAT_REINDENT", "REDASH_LOG_LEVEL", "REDASH_HOST", "REDASH_KYLIN_OFFSET", "REDASH_RQ_WORKER_JOB_LOG_FORMAT", "REDASH_REFERRER_POLICY", "REDASH_DATABASE_URL", "REDASH_REQUESTS_ALLOW_REDIRECTS", "REDASH_SAML_SCHEME_OVERRIDE", "REDASH_COOKIE_SECRET", "REDASH_LDAP_URL", "REDASH_MAIL_USE_TLS", "REDASH_LDAP_LOGIN_ENABLED", "REDASH_LDAP_AUTH_METHOD", "REDASH_ADDITIONAL_DESTINATIONS", "REDASH_REDIS_URL", "REDASH_LDAP_CUSTOM_USERNAME_PROMPT", "REDASH_HSTS_ENABLED", "REDASH_SENTRY_DSN", "REDASH_FEATURE_AUTO_PUBLISH_NAMED_QUERIES", "REDASH_ENFORCE_PRIVATE_IP_BLOCK", "REDASH_KYLIN_LIMIT", "REDASH_SENTRY_ENVIRONMENT", "REDASH_LOG_PREFIX", "DATABASE_URL", "REDASH_ENABLED_QUERY_RUNNERS", "REDASH_STATSD_PORT", "REDASH_SESSION_COOKIE_HTTPONLY", "REDASH_JOB_DEFAULT_FAILURE_TTL", "REDASH_PROXIES_COUNT", "REDASH_SESSION_EXPIRY_TIME", "REDASH_ENFORCE_FILE_SAVE", "REDASH_LDAP_SEARCH_DN", "REDASH_SCHEDULED_QUERY_TIME_LIMIT", "REDASH_MAIL_PASSWORD", "REDASH_CORS_ACCESS_CONTROL_REQUEST_METHOD", "REDASH_MAIL_ASCII_ATTACHMENTS", "REDASH_JOB_EXPIRY_TIME", "REDASH_ENFORCE_CSRF", "REDASH_HSTS_INCLUDE_SUBDOMAINS", "REDASH_ENFORCE_HTTPS", "REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE", "REDASH_TABLE_CELL_MAX_JSON_SIZE", "SQLALCHEMY_POOL_SIZE", "REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "REDASH_SAML_ENCRYPTION_CERT_PATH", "REDASH_GOOGLE_CLIENT_ID", "REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "REDASH_STATSD_HOST", "REDASH_FRAME_OPTIONS_ALLOW_FROM", "REDASH_STATSD_USE_TAGS", "REDASH_FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS", "REDASH_AUTH_TYPE", "REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN", "REDASH_STATIC_ASSETS_PATH", "REDASH_CONTENT_SECURITY_POLICY", "REDASH_SECRET_KEY", "REDASH_KYLIN_ACCEPT_PARTIAL", "REDASH_BIGQUERY_HTTP_TIMEOUT", "SQLALCHEMY_MAX_OVERFLOW", "REDASH_CONTENT_SECURITY_POLICY_NONCE_IN", "REDASH_BLOCKED_DOMAINS", "REDASH_MULTI_ORG", "REDASH_MAIL_USE_SSL", "REDASH_SEARCH_DN", "RQ_REDIS_URL", "SQLALCHEMY_DISABLE_POOL", "REDASH_ENABLED_DESTINATIONS", "REDASH_PAGE_SIZE", "REDASH_REMEMBER_COOKIE_DURATION", "REDASH_LDAP_SEARCH_TEMPLATE", "REDASH_REMOTE_USER_HEADER", "REDIS_URL", "REDASH_MAIL_SERVER", "REDASH_CORS_ACCESS_CONTROL_ALLOW_CREDENTIALS", "REDASH_CSRF_TIME_LIMIT", "REDASH_MAIL_USERNAME", "REDASH_REMEMBER_COOKIE_SECURE", "REDASH_FRAME_OPTIONS", "REDASH_LOG_FORMAT", "REDASH_SESSION_COOKIE_SECURE", "REDASH_LDAP_BIND_DN_PASSWORD", "REDASH_DASHBOARD_REFRESH_INTERVALS", "REDASH_QUERY_REFRESH_INTERVALS", "REDASH_STATSD_PREFIX", "REDASH_MAIL_PORT", "REDASH_EVENT_REPORTING_WEBHOOKS", "REDASH_SCHEMAS_REFRESH_SCHEDULE", "REDASH_SCHEMA_RUN_TABLE_SIZE_CALCULATIONS", "REDASH_CONTENT_SECURITY_POLICY_REPORT_URI", "REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "REDASH_REMOTE_USER_LOGIN_ENABLED", "REDASH_MAIL_DEFAULT_SENDER", "REDASH_ADDITIONAL_QUERY_RUNNERS", "REDASH_FLASK_TEMPLATE_PATH", "REDASH_LDAP_EMAIL_KEY", "REDASH_QUERY_RESULTS_CLEANUP_COUNT", "REDASH_DISABLED_QUERY_RUNNERS", "REDASH_LDAP_USE_SSL", "REDASH_VERSION_CHECK", "REDASH_CONTENT_SECURITY_POLICY_REPORT_ONLY", "REDASH_THROTTLE_LOGIN_PATTERN", "REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "REDASH_MAX_FAILURE_REPORTS_PER_QUERY", "REDASH_FEATURE_EXTENDED_ALERT_OPTIONS", "REDASH_HSTS_PRELOAD", "REDASH_SAML_ENCRYPTION_PEM_PATH", "SQLPARSE_FORMAT_KEYWORD_CASE", "REDASH_DYNAMIC_SETTINGS_MODULE" ]
[]
["REDASH_MAIL_MAX_EMAILS", "REDASH_LDAP_DISPLAY_NAME_KEY", "REDASH_COOKIES_SECURE", "REDASH_CORS_ACCESS_CONTROL_ALLOW_HEADERS", "REDASH_SEND_FAILURE_EMAIL_INTERVAL", "REDASH_LOG_STDOUT", "REDASH_PAGE_SIZE_OPTIONS", "REDASH_ENFORCE_HTTPS_PERMANENT", "SQLALCHEMY_ENABLE_POOL_PRE_PING", "REDASH_INVITATION_TOKEN_MAX_AGE", "REDASH_LDAP_BIND_DN", "REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "REDASH_ADHOC_QUERY_TIME_LIMIT", "REDASH_THROTTLE_PASS_RESET_PATTERN", "REDASH_GOOGLE_CLIENT_SECRET", "REDASH_RATELIMIT_ENABLED", "REDASH_LIMITER_STORAGE", "REDASH_REMEMBER_COOKIE_HTTPONLY", "REDASH_HSTS_MAX_AGE", "SQLPARSE_FORMAT_REINDENT", "REDASH_LOG_LEVEL", "REDASH_HOST", "REDASH_KYLIN_OFFSET", "REDASH_RQ_WORKER_JOB_LOG_FORMAT", "REDASH_REFERRER_POLICY", "REDASH_DATABASE_URL", "REDASH_REQUESTS_ALLOW_REDIRECTS", "REDASH_SAML_SCHEME_OVERRIDE", "REDASH_COOKIE_SECRET", "REDASH_LDAP_URL", "REDASH_MAIL_USE_TLS", "REDASH_LDAP_LOGIN_ENABLED", "REDASH_LDAP_AUTH_METHOD", "REDASH_ADDITIONAL_DESTINATIONS", "REDASH_REDIS_URL", "REDASH_LDAP_CUSTOM_USERNAME_PROMPT", "REDASH_HSTS_ENABLED", "REDASH_SENTRY_DSN", "REDASH_FEATURE_AUTO_PUBLISH_NAMED_QUERIES", "REDASH_ENFORCE_PRIVATE_IP_BLOCK", "REDASH_KYLIN_LIMIT", "REDASH_SENTRY_ENVIRONMENT", "REDASH_LOG_PREFIX", "DATABASE_URL", "REDASH_ENABLED_QUERY_RUNNERS", "REDASH_STATSD_PORT", "REDASH_SESSION_COOKIE_HTTPONLY", "REDASH_JOB_DEFAULT_FAILURE_TTL", "REDASH_PROXIES_COUNT", "REDASH_SESSION_EXPIRY_TIME", "REDASH_ENFORCE_FILE_SAVE", "REDASH_LDAP_SEARCH_DN", "REDASH_SCHEDULED_QUERY_TIME_LIMIT", "REDASH_MAIL_PASSWORD", "REDASH_CORS_ACCESS_CONTROL_REQUEST_METHOD", "REDASH_MAIL_ASCII_ATTACHMENTS", "REDASH_JOB_EXPIRY_TIME", "REDASH_ENFORCE_CSRF", "REDASH_HSTS_INCLUDE_SUBDOMAINS", "REDASH_ENFORCE_HTTPS", "REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE", "REDASH_TABLE_CELL_MAX_JSON_SIZE", "SQLALCHEMY_POOL_SIZE", "REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "REDASH_SAML_ENCRYPTION_CERT_PATH", "REDASH_GOOGLE_CLIENT_ID", "REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "REDASH_STATSD_HOST", "REDASH_FRAME_OPTIONS_ALLOW_FROM", "REDASH_STATSD_USE_TAGS", "REDASH_FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS", "REDASH_AUTH_TYPE", "REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN", "REDASH_STATIC_ASSETS_PATH", "REDASH_CONTENT_SECURITY_POLICY", "REDASH_SECRET_KEY", "REDASH_KYLIN_ACCEPT_PARTIAL", "REDASH_BIGQUERY_HTTP_TIMEOUT", "SQLALCHEMY_MAX_OVERFLOW", "REDASH_CONTENT_SECURITY_POLICY_NONCE_IN", "REDASH_BLOCKED_DOMAINS", "REDASH_MULTI_ORG", "REDASH_MAIL_USE_SSL", "REDASH_SEARCH_DN", "RQ_REDIS_URL", "SQLALCHEMY_DISABLE_POOL", "REDASH_ENABLED_DESTINATIONS", "REDASH_PAGE_SIZE", "REDASH_REMEMBER_COOKIE_DURATION", "REDASH_LDAP_SEARCH_TEMPLATE", "REDASH_REMOTE_USER_HEADER", "REDIS_URL", "REDASH_MAIL_SERVER", "REDASH_CORS_ACCESS_CONTROL_ALLOW_CREDENTIALS", "REDASH_CSRF_TIME_LIMIT", "REDASH_MAIL_USERNAME", "REDASH_REMEMBER_COOKIE_SECURE", "REDASH_FRAME_OPTIONS", "REDASH_LOG_FORMAT", "REDASH_SESSION_COOKIE_SECURE", "REDASH_LDAP_BIND_DN_PASSWORD", "REDASH_DASHBOARD_REFRESH_INTERVALS", "REDASH_QUERY_REFRESH_INTERVALS", "REDASH_STATSD_PREFIX", "REDASH_MAIL_PORT", "REDASH_EVENT_REPORTING_WEBHOOKS", "REDASH_SCHEMAS_REFRESH_SCHEDULE", "REDASH_SCHEMA_RUN_TABLE_SIZE_CALCULATIONS", "REDASH_CONTENT_SECURITY_POLICY_REPORT_URI", "REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "REDASH_REMOTE_USER_LOGIN_ENABLED", "REDASH_MAIL_DEFAULT_SENDER", "REDASH_ADDITIONAL_QUERY_RUNNERS", "REDASH_FLASK_TEMPLATE_PATH", "REDASH_LDAP_EMAIL_KEY", "REDASH_QUERY_RESULTS_CLEANUP_COUNT", "REDASH_DISABLED_QUERY_RUNNERS", "REDASH_LDAP_USE_SSL", "REDASH_VERSION_CHECK", "REDASH_CONTENT_SECURITY_POLICY_REPORT_ONLY", "REDASH_THROTTLE_LOGIN_PATTERN", "REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "REDASH_MAX_FAILURE_REPORTS_PER_QUERY", "REDASH_FEATURE_EXTENDED_ALERT_OPTIONS", "REDASH_HSTS_PRELOAD", "REDASH_SAML_ENCRYPTION_PEM_PATH", "SQLPARSE_FORMAT_KEYWORD_CASE", "REDASH_DYNAMIC_SETTINGS_MODULE"]
python
129
0
envsettings/base.py
import copy try: from importlib.util import find_spec except ImportError: from imp import find_module find_spec = False import os import re try: import urllib.parse as urlparse except ImportError: import urlparse try: from django.utils.encoding import smart_text except ImportError: from django.utils.encoding import smart_unicode as smart_text if find_spec: def is_importable(module_name): """ Test if a package (just the top-level) is importable, without actually importing it """ package = module_name.split('.')[0] return bool(find_spec(package)) else: # For Python < 3.4 def is_importable(module_name): package = module_name.split('.')[0] try: f = find_module(package)[0] if f: f.close() return True except ImportError: return False class EnvSettings(object): def __init__(self, env=os.environ): self.env = env def get(self, key, default=None): return smart_text(self.env.get(key, default)) def get_bool(self, key, default=None): return self.parse_bool(self.env.get(key, default)) def get_int(self, key, default=None): return int(self.env.get(key, default)) @staticmethod def parse_bool(value): # Accept bools as well as strings so we can pass them # as default values if value == 'True' or value == True: return True elif value == 'False' or value == False: return False else: raise ValueError( "invalid boolean {!r} (must be 'True' or " "'False')".format(value)) class URLSettingsBase(EnvSettings): """ Base class which from which all other URL-based configuration classes inherit """ CONFIG = {} def __init__(self, *args, **kwargs): super(URLSettingsBase, self).__init__(*args, **kwargs) # Each instance gets its own copy of the config so it # can be safely mutated self.CONFIG = copy.deepcopy(self.CONFIG) def get(self, key=None, default=None, auto_config=False): value = self.env.get(key) if key else None if value is None and auto_config: value = self.get_auto_config() if value is None: value = default return self.parse(value) def parse(self, url): """ Return a configuration dict from a URL """ parsed_url = urlparse.urlparse(url) try: default_config = self.CONFIG[parsed_url.scheme] except KeyError: raise ValueError( 'unrecognised URL scheme for {}: {}'.format( self.__class__.__name__, url)) handler = self.get_handler_for_scheme(parsed_url.scheme) config = copy.deepcopy(default_config) return handler(parsed_url, config) def get_handler_for_scheme(self, scheme): method_name = 'handle_{}_url'.format(re.sub('[\+\.\-]', '_', scheme)) return getattr(self, method_name, self.handle_url) def handle_url(self, parsed_url, config): # Default implementation does nothing return config def get_auto_config(self): """ Walk over all available auto_config methods, passing them the current environment and seeing if they return a configuration URL """ methods = [m for m in dir(self) if m.startswith('auto_config_')] for method_name in sorted(methods): auto_config_method = getattr(self, method_name) url = auto_config_method(self.env) if url: return url
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/docker/docker/cli/command/cli.go
package command import ( "errors" "fmt" "io" "net/http" "os" "path/filepath" "runtime" "github.com/docker/docker/api" cliflags "github.com/docker/docker/cli/flags" "github.com/docker/docker/cliconfig" "github.com/docker/docker/cliconfig/configfile" "github.com/docker/docker/cliconfig/credentials" "github.com/docker/docker/client" "github.com/docker/docker/dockerversion" dopts "github.com/docker/docker/opts" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "golang.org/x/net/context" ) // Streams is an interface which exposes the standard input and output streams type Streams interface { In() *InStream Out() *OutStream Err() io.Writer } // DockerCli represents the docker command line client. // Instances of the client can be returned from NewDockerCli. type DockerCli struct { configFile *configfile.ConfigFile in *InStream out *OutStream err io.Writer keyFile string client client.APIClient } // HasExperimental returns true if experimental features are accessible func (cli *DockerCli) HasExperimental() bool { if cli.client == nil { return false } enabled, _ := cli.client.Ping(context.Background()) return enabled } // Client returns the APIClient func (cli *DockerCli) Client() client.APIClient { return cli.client } // Out returns the writer used for stdout func (cli *DockerCli) Out() *OutStream { return cli.out } // Err returns the writer used for stderr func (cli *DockerCli) Err() io.Writer { return cli.err } // In returns the reader used for stdin func (cli *DockerCli) In() *InStream { return cli.in } // ConfigFile returns the ConfigFile func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { return cli.configFile } // CredentialsStore returns a new credentials store based // on the settings provided in the configuration file. func (cli *DockerCli) CredentialsStore() credentials.Store { if cli.configFile.CredentialsStore != "" { return credentials.NewNativeStore(cli.configFile) } return credentials.NewFileStore(cli.configFile) } // Initialize the dockerCli runs initialization that must happen after command // line flags are parsed. func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { cli.configFile = LoadDefaultConfigFile(cli.err) var err error cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) if err != nil { return err } if opts.Common.TrustKey == "" { cli.keyFile = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) } else { cli.keyFile = opts.Common.TrustKey } return nil } // NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} } // LoadDefaultConfigFile attempts to load the default config file and returns // an initialized ConfigFile struct if none is found. func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { configFile, e := cliconfig.Load(cliconfig.ConfigDir()) if e != nil { fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) } if !configFile.ContainsAuth() { credentials.DetectDefaultStore(configFile) } return configFile } // NewAPIClientFromFlags creates a new APIClient from command line flags func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { host, err := getServerHost(opts.Hosts, opts.TLSOptions) if err != nil { return &client.Client{}, err } customHeaders := configFile.HTTPHeaders if customHeaders == nil { customHeaders = map[string]string{} } customHeaders["User-Agent"] = UserAgent() verStr := api.DefaultVersion if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { verStr = tmpStr } httpClient, err := newHTTPClient(host, opts.TLSOptions) if err != nil { return &client.Client{}, err } return client.NewClient(host, verStr, httpClient, customHeaders) } func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { switch len(hosts) { case 0: host = os.Getenv("DOCKER_HOST") case 1: host = hosts[0] default: return "", errors.New("Please specify only one -H") } host, err = dopts.ParseHost(tlsOptions != nil, host) return } func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { if tlsOptions == nil { // let the api client configure the default transport. return nil, nil } config, err := tlsconfig.Client(*tlsOptions) if err != nil { return nil, err } tr := &http.Transport{ TLSClientConfig: config, } proto, addr, _, err := client.ParseHost(host) if err != nil { return nil, err } sockets.ConfigureTransport(tr, proto, addr) return &http.Client{ Transport: tr, }, nil } // UserAgent returns the user agent string used for making API requests func UserAgent() string { return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" }
[ "\"DOCKER_API_VERSION\"", "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST", "DOCKER_API_VERSION" ]
[]
["DOCKER_HOST", "DOCKER_API_VERSION"]
go
2
0
baoming/webapp/tests.py
from django.contrib.auth.hashers import make_password, check_password import os os.environ.update({"DJANGO_SETTINGS_MODULE": "baoming.settings"}) from django.test import TestCase class File2DB(object): """ 密码加密相关 """ @staticmethod def read(): """ 读文件 :param :return: """ with open('nation.txt', 'r', encoding='UTF-8') as file: for line in file.readlines(): nation_name = line.split(' ')[1] print(nation_name+"\t"+line) # nation = NationInfo() # nation.nation_name = nation_name # nation.explain = line # nation.save() class PwdTestCase(TestCase): def setUp(self): pass def test_animals_can_speak(self): pass def test_add_business(self): pass def test_encryption(self): """ 测试密码加密 """ File2DB.read()
[]
[]
[]
[]
[]
python
0
0
main.go
// Copyright 2022 Layer5 Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "os" "path" "strings" "path/filepath" "time" "github.com/layer5io/meshery-adapter-library/adapter" "github.com/layer5io/meshery-adapter-library/api/grpc" "github.com/layer5io/meshery-cilium/cilium" "github.com/layer5io/meshery-cilium/cilium/oam" "github.com/layer5io/meshery-cilium/internal/config" configprovider "github.com/layer5io/meshkit/config/provider" "github.com/layer5io/meshkit/logger" "github.com/layer5io/meshery-cilium/build" ) var ( serviceName = "cilium-adapter" version = "edge" gitsha = "none" ) func init() { // Create the config path if it doesn't exists as the entire adapter // expects that directory to exists, which may or may not be true if err := os.MkdirAll(path.Join(config.RootPath(), "bin"), 0750); err != nil { fmt.Println(err) os.Exit(1) } } // main is the entrypoint of the adaptor func main() { // Initialize Logger instance log, err := logger.New(serviceName, logger.Options{ Format: logger.SyslogLogFormat, DebugLevel: isDebug(), }) if err != nil { fmt.Println(err) os.Exit(1) } err = os.Setenv("KUBECONFIG", path.Join( config.KubeConfigDefaults[configprovider.FilePath], fmt.Sprintf("%s.%s", config.KubeConfigDefaults[configprovider.FileName], config.KubeConfigDefaults[configprovider.FileType])), ) if err != nil { // Fail silently log.Warn(err) } // Initialize application specific configs and dependencies // App and request config cfg, err := config.New(configprovider.ViperKey) if err != nil { log.Error(err) os.Exit(1) } service := &grpc.Service{} err = cfg.GetObject(adapter.ServerKey, service) if err != nil { log.Error(err) os.Exit(1) } kubeconfigHandler, err := config.NewKubeconfigBuilder(configprovider.ViperKey) if err != nil { log.Error(err) os.Exit(1) } // // Initialize Tracing instance // tracer, err := tracing.New(service.Name, service.TraceURL) // if err != nil { // log.Err("Tracing Init Failed", err.Error()) // os.Exit(1) // } // Initialize Handler intance handler := cilium.New(cfg, log, kubeconfigHandler) handler = adapter.AddLogger(log, handler) service.Handler = handler service.Channel = make(chan interface{}, 10) service.StartedAt = time.Now() service.Version = version service.GitSHA = gitsha go registerCapabilities(service.Port, log) //Registering static capabilities go registerDynamicCapabilities(service.Port, log) //Registering latest capabilities periodically // Server Initialization log.Info("Adaptor Listening at port: ", service.Port) err = grpc.Start(service, nil) if err != nil { log.Error(err) os.Exit(1) } } func isDebug() bool { return os.Getenv("DEBUG") == "true" } func mesheryServerAddress() string { meshReg := os.Getenv("MESHERY_SERVER") if meshReg != "" { if strings.HasPrefix(meshReg, "http") { return meshReg } return "http://" + meshReg } return "http://localhost:9081" } func serviceAddress() string { svcAddr := os.Getenv("SERVICE_ADDR") if svcAddr != "" { return svcAddr } return "localhost" } func registerCapabilities(port string, log logger.Handler) { // Register workloads log.Info("Registering static workloads...") if err := oam.RegisterWorkloads(mesheryServerAddress(), serviceAddress()+":"+port); err != nil { log.Info(err.Error()) } log.Info("Registering static workloads completed") // Register traits if err := oam.RegisterTraits(mesheryServerAddress(), serviceAddress()+":"+port); err != nil { log.Info(err.Error()) } } func registerDynamicCapabilities(port string, log logger.Handler) { registerWorkloads(port, log) //Start the ticker const reRegisterAfter = 24 ticker := time.NewTicker(reRegisterAfter * time.Hour) for { <-ticker.C registerWorkloads(port, log) } } func registerWorkloads(port string, log logger.Handler) { version := build.DefaultVersion url := build.DefaultURL gm := build.DefaultGenerationMethod // Prechecking to skip comp gen if os.Getenv("FORCE_DYNAMIC_REG") != "true" && oam.AvailableVersions[version] { log.Info("Components available statically for version ", version, ". Skipping dynamic component registeration") return } //If a URL is passed from env variable, it will be used for component generation with default method being "using manifests" // In case a helm chart URL is passed, COMP_GEN_METHOD env variable should be set to Helm otherwise the component generation fails if os.Getenv("COMP_GEN_URL") != "" && (os.Getenv("COMP_GEN_METHOD") == "Helm" || os.Getenv("COMP_GEN_METHOD") == "Manifest") { url = os.Getenv("COMP_GEN_URL") gm = os.Getenv("COMP_GEN_METHOD") log.Info("Registering workload components from url ", url, " using ", gm, " method...") } log.Info("Registering latest workload components for version ", version) for _, crd := range build.CRDNames { crdurl := url + crd log.Info("Registering ", crdurl) if err := adapter.CreateComponents(adapter.StaticCompConfig{ URL: crdurl, Method: gm, Path: build.WorkloadPath, DirName: version, Config: build.NewConfig(version), }); err != nil { log.Info(err.Error()) return } } //The below log is checked in the workflows. If you change this log, reflect that change in the workflow where components are generated log.Info("Component creation completed for version ", version) //Now we will register in case log.Info("Registering workloads with Meshery Server for version ", version) originalPath := oam.WorkloadPath oam.WorkloadPath = filepath.Join(originalPath, version) defer resetWorkloadPath(originalPath) if err := oam.RegisterWorkloads(mesheryServerAddress(), serviceAddress()+":"+port); err != nil { log.Error(err) return } log.Info("Latest workload components successfully registered for version ", version) } func resetWorkloadPath(orig string) { oam.WorkloadPath = orig }
[ "\"DEBUG\"", "\"MESHERY_SERVER\"", "\"SERVICE_ADDR\"", "\"FORCE_DYNAMIC_REG\"", "\"COMP_GEN_URL\"", "\"COMP_GEN_METHOD\"", "\"COMP_GEN_METHOD\"", "\"COMP_GEN_URL\"", "\"COMP_GEN_METHOD\"" ]
[]
[ "COMP_GEN_METHOD", "COMP_GEN_URL", "MESHERY_SERVER", "SERVICE_ADDR", "DEBUG", "FORCE_DYNAMIC_REG" ]
[]
["COMP_GEN_METHOD", "COMP_GEN_URL", "MESHERY_SERVER", "SERVICE_ADDR", "DEBUG", "FORCE_DYNAMIC_REG"]
go
6
0
src/test/java/com/github/harbby/ashtarte/example/PageRankTest.java
package com.github.harbby.ashtarte.example; import com.github.harbby.ashtarte.BatchContext; import com.github.harbby.ashtarte.api.DataSet; import com.github.harbby.ashtarte.api.KvDataSet; import com.github.harbby.gadtry.collection.tuple.Tuple2; import org.junit.Assert; import org.junit.Test; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.Collectors; /** * pageRank 由google创始人 拉里·佩奇(Larry Page)发明. * <p> * 该算法为迭代型,且结果收敛 * 迭代此时将影响收敛度 */ public class PageRankTest { private final BatchContext mppContext = BatchContext.builder() .setParallelism(2) .getOrCreate(); @Test public void pageRank4itersTest() { int iters = 1000; //迭代次数 String sparkHome = System.getenv("SPARK_HOME"); DataSet<String> lines = mppContext.textFile(sparkHome + "/data/mllib/pagerank_data.txt"); KvDataSet<String, Iterable<String>> links = lines.kvDataSet(s -> { String[] parts = s.split("\\s+"); return new Tuple2<>(parts[0], parts[1]); }).distinct().groupByKey().cache(); KvDataSet<String, Double> ranks = links.mapValues(v -> 1.0); for (int i = 1; i <= iters; i++) { DataSet<Tuple2<String, Double>> contribs = links.join(ranks).values().flatMapIterator(it -> { Collection<String> urls = (Collection<String>) it.f1(); Double rank = it.f2(); long size = urls.size(); return urls.stream().map(url -> new Tuple2<>(url, rank / size)).iterator(); }); ranks = KvDataSet.toKvDataSet(contribs).reduceByKey((x, y) -> x + y).mapValues(x -> 0.15 + 0.85 * x); } List<Tuple2<String, Double>> output = ranks.collect(); output.forEach(tup -> System.out.println(String.format("%s has rank: %s .", tup.f1(), tup.f2()))); Map<String, Double> data = output.stream().collect(Collectors.toMap(k -> k.f1(), v -> v.f2())); Assert.assertEquals(data.get("1"), 1.918918918918918D, 1e-7); Assert.assertEquals(data.get("2"), 0.6936936936936938, 1e-7); Assert.assertEquals(data.get("3"), 0.6936936936936938, 1e-7); Assert.assertEquals(data.get("4"), 0.6936936936936938, 1e-7); } }
[ "\"SPARK_HOME\"" ]
[]
[ "SPARK_HOME" ]
[]
["SPARK_HOME"]
java
1
0
validate_export.py
""" Validates exported dataset. """ import csv import logging import os import sys from collections import namedtuple import toolz def validate_export(export_path: str): logging.info(f'Validating export at "{export_path}"...') logging.info("Reading exported dataset...") with open(export_path) as infile: reader = csv.reader(infile) Tweet = namedtuple('Tweet', next(reader)) tweets = {int(line[0]): Tweet(*line) for line in reader if len(line) > 0} logging.info(f"Read {len(tweets)} tweets.") all_author_ids = set(t.author_id for t in tweets.values()) logging.info(f"Found {len(all_author_ids)} different authors.") orphans = [t for t in tweets.values() if len(t.response_tweet_id) == 0 and len(t.in_response_to_tweet_id) == 0] logging.info(f"Found {len(orphans)} orphan tweets.") first_requests = [ t for t in tweets.values() if t.in_response_to_tweet_id == '' ] logging.info(f"Found {len(first_requests)} conversation starts.") replies = list(toolz.concat( [tweets[int(tid)] for tid in t.response_tweet_id.split(',') if int(tid) in tweets] for t in first_requests if len(t.response_tweet_id) != '' )) non_cs_replies = [t for t in replies if not t.inbound] logging.info(f"Found {len(non_cs_replies)} non-inbound response tweets out of {len(replies)}.") if __name__ == '__main__': export_path = 'twcs.csv' if len(sys.argv) < 2 else sys.argv[1] logging.basicConfig( format='%(levelname)s:%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=getattr(logging, os.environ.get('LOG_LEVEL', 'INFO'))) validate_export(export_path)
[]
[]
[ "LOG_LEVEL" ]
[]
["LOG_LEVEL"]
python
1
0
testutil/fakestorage/fixtures.go
package fakestorage // import "github.com/docker/docker/testutil/fakestorage" import ( "context" "io" "io/ioutil" "os" "os/exec" "path/filepath" "sync" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" "gotest.tools/v3/assert" ) var ensureHTTPServerOnce sync.Once func ensureHTTPServerImage(t testing.TB) { t.Helper() var doIt bool ensureHTTPServerOnce.Do(func() { doIt = true }) if !doIt { return } defer testEnv.ProtectImage(t, "httpserver:latest") tmp, err := ioutil.TempDir("", "docker-http-server-test") if err != nil { t.Fatalf("could not build http server: %v", err) } defer os.RemoveAll(tmp) goos := testEnv.OSType if goos == "" { goos = "linux" } goarch := os.Getenv("DOCKER_ENGINE_GOARCH") if goarch == "" { goarch = "amd64" } cpCmd, lookErr := exec.LookPath("cp") if lookErr != nil { t.Fatalf("could not build http server: %v", lookErr) } if _, err = os.Stat("../contrib/httpserver/httpserver"); os.IsNotExist(err) { goCmd, lookErr := exec.LookPath("go") if lookErr != nil { t.Fatalf("could not build http server: %v", lookErr) } cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") cmd.Env = append(os.Environ(), []string{ "CGO_ENABLED=0", "GOOS=" + goos, "GOARCH=" + goarch, }...) var out []byte if out, err = cmd.CombinedOutput(); err != nil { t.Fatalf("could not build http server: %s", string(out)) } } else { if out, err := exec.Command(cpCmd, "../contrib/httpserver/httpserver", filepath.Join(tmp, "httpserver")).CombinedOutput(); err != nil { t.Fatalf("could not copy http server: %v", string(out)) } } if out, err := exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { t.Fatalf("could not build http server: %v", string(out)) } c := testEnv.APIClient() reader, err := archive.TarWithOptions(tmp, &archive.TarOptions{}) assert.NilError(t, err) resp, err := c.ImageBuild(context.Background(), reader, types.ImageBuildOptions{ Remove: true, ForceRemove: true, Tags: []string{"httpserver"}, }) assert.NilError(t, err) _, err = io.Copy(ioutil.Discard, resp.Body) assert.NilError(t, err) }
[ "\"DOCKER_ENGINE_GOARCH\"" ]
[]
[ "DOCKER_ENGINE_GOARCH" ]
[]
["DOCKER_ENGINE_GOARCH"]
go
1
0
python/src/cloudstorage/rest_api.py
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Base and helper classes for Google RESTful APIs.""" __all__ = ['add_sync_methods'] import logging import os import random import time from . import api_utils try: from google.appengine.api import app_identity from google.appengine.api import lib_config from google.appengine.ext import ndb except ImportError: from google.appengine.api import app_identity from google.appengine.api import lib_config from google.appengine.ext import ndb @ndb.tasklet def _make_token_async(scopes, service_account_id): """Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch. """ rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at)) class _ConfigDefaults(object): TOKEN_MAKER = _make_token_async _config = lib_config.register('cloudstorage', _ConfigDefaults.__dict__) def _make_sync_method(name): """Helper to synthesize a synchronous method from an async method name. Used by the @add_sync_methods class decorator below. Args: name: The name of the synchronous method. Returns: A method (with first argument 'self') that retrieves and calls self.<name>, passing its own arguments, expects it to return a Future, and then waits for and returns that Future's result. """ def sync_wrapper(self, *args, **kwds): method = getattr(self, name) future = method(*args, **kwds) return future.get_result() return sync_wrapper def add_sync_methods(cls): """Class decorator to add synchronous methods corresponding to async methods. This modifies the class in place, adding additional methods to it. If a synchronous method of a given name already exists it is not replaced. Args: cls: A class. Returns: The same class, modified in place. """ for name in cls.__dict__.keys(): if name.endswith('_async'): sync_name = name[:-6] if not hasattr(cls, sync_name): setattr(cls, sync_name, _make_sync_method(name)) return cls class _AE_TokenStorage_(ndb.Model): """Entity to store app_identity tokens in memcache.""" token = ndb.StringProperty() expires = ndb.FloatProperty() class _RestApi(object): """Base class for REST-based API wrapper classes. This class manages authentication tokens and request retries. All APIs are available as synchronous and async methods; synchronous methods are synthesized from async ones by the add_sync_methods() function in this module. WARNING: Do NOT directly use this api. It's an implementation detail and is subject to change at any release. """ def __init__(self, scopes, service_account_id=None, token_maker=None, retry_params=None): """Constructor. Args: scopes: A scope or a list of scopes. service_account_id: Internal use only. token_maker: An asynchronous function of the form (scopes, service_account_id) -> (token, expires). retry_params: An instance of api_utils.RetryParams. If None, the default for current thread will be used. """ if isinstance(scopes, basestring): scopes = [scopes] self.scopes = scopes self.service_account_id = service_account_id self.make_token_async = token_maker or _config.TOKEN_MAKER if not retry_params: retry_params = api_utils._get_default_retry_params() self.retry_params = retry_params self.user_agent = {'User-Agent': retry_params._user_agent} self.expiration_headroom = random.randint(60, 240) def __getstate__(self): """Store state as part of serialization/pickling.""" return {'scopes': self.scopes, 'id': self.service_account_id, 'a_maker': (None if self.make_token_async == _make_token_async else self.make_token_async), 'retry_params': self.retry_params, 'expiration_headroom': self.expiration_headroom} def __setstate__(self, state): """Restore state as part of deserialization/unpickling.""" self.__init__(state['scopes'], service_account_id=state['id'], token_maker=state['a_maker'], retry_params=state['retry_params']) self.expiration_headroom = state['expiration_headroom'] @ndb.tasklet def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None): """Issue one HTTP request. It performs async retries using tasklets. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. Yields: The async fetch of the url. """ retry_wrapper = api_utils._RetryWrapper( self.retry_params, retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS, should_retry=api_utils._should_retry) resp = yield retry_wrapper.run( self.urlfetch_async, url=url, method=method, headers=headers, payload=payload, deadline=deadline, callback=callback, follow_redirects=False) raise ndb.Return((resp.status_code, resp.headers, resp.content)) @ndb.tasklet def get_token_async(self, refresh=False): """Get an authentication token. The token is cached in memcache, keyed by the scopes argument. Uses a random token expiration headroom value generated in the constructor to eliminate a burst of GET_ACCESS_TOKEN API requests. Args: refresh: If True, ignore a cached token; default False. Yields: An authentication token. This token is guaranteed to be non-expired. """ key = '%s,%s' % (self.service_account_id, ','.join(self.scopes)) ts = yield _AE_TokenStorage_.get_by_id_async( key, use_cache=True, use_memcache=self.retry_params.memcache_access_token, use_datastore=self.retry_params.save_access_token) if refresh or ts is None or ts.expires < ( time.time() + self.expiration_headroom): token, expires_at = yield self.make_token_async( self.scopes, self.service_account_id) timeout = int(expires_at - time.time()) ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at) if timeout > 0: yield ts.put_async(memcache_timeout=timeout, use_datastore=self.retry_params.save_access_token, force_writes=True, use_cache=True, use_memcache=self.retry_params.memcache_access_token) raise ndb.Return(ts.token) @ndb.tasklet def urlfetch_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None, follow_redirects=False): """Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet! """ headers = {} if headers is None else dict(headers) headers.update(self.user_agent) try: self.token = yield self.get_token_async() except app_identity.InternalError, e: if os.environ.get('DATACENTER', '').endswith('sandman'): self.token = None logging.warning('Could not fetch an authentication token in sandman ' 'based Appengine devel setup; proceeding without one.') else: raise e if self.token: headers['authorization'] = 'OAuth ' + self.token deadline = deadline or self.retry_params.urlfetch_timeout ctx = ndb.get_context() resp = yield ctx.urlfetch( url, payload=payload, method=method, headers=headers, follow_redirects=follow_redirects, deadline=deadline, callback=callback) raise ndb.Return(resp) _RestApi = add_sync_methods(_RestApi)
[]
[]
[ "DATACENTER" ]
[]
["DATACENTER"]
python
1
0
test/com/facebook/buck/parser/ParserTest.java
/* * Copyright 2015-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.parser; import static com.facebook.buck.parser.ParserConfig.DEFAULT_BUILD_FILE_NAME; import static com.facebook.buck.testutil.WatchEventsForTests.createPathEvent; import static com.google.common.base.Charsets.UTF_8; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; import com.facebook.buck.cli.BuckConfig; import com.facebook.buck.cli.FakeBuckConfig; import com.facebook.buck.event.BuckEventBus; import com.facebook.buck.event.BuckEventBusFactory; import com.facebook.buck.event.FakeBuckEventListener; import com.facebook.buck.event.listener.BroadcastEventListener; import com.facebook.buck.io.MorePaths; import com.facebook.buck.io.ProjectFilesystem; import com.facebook.buck.json.BuildFileParseException; import com.facebook.buck.json.ParseBuckFileEvent; import com.facebook.buck.jvm.java.JavaLibrary; import com.facebook.buck.model.BuildTarget; import com.facebook.buck.model.BuildTargetException; import com.facebook.buck.model.BuildTargetFactory; import com.facebook.buck.model.HasBuildTarget; import com.facebook.buck.model.ImmutableFlavor; import com.facebook.buck.model.UnflavoredBuildTarget; import com.facebook.buck.rules.ActionGraphCache; import com.facebook.buck.rules.BuildRule; import com.facebook.buck.rules.BuildRuleResolver; import com.facebook.buck.rules.Cell; import com.facebook.buck.rules.ConstructorArgMarshaller; import com.facebook.buck.rules.TargetGraph; import com.facebook.buck.rules.TargetNode; import com.facebook.buck.rules.TestCellBuilder; import com.facebook.buck.rules.coercer.DefaultTypeCoercerFactory; import com.facebook.buck.shell.GenruleDescription; import com.facebook.buck.testutil.WatchEventsForTests; import com.facebook.buck.testutil.integration.TemporaryPaths; import com.facebook.buck.testutil.integration.TestDataHelper; import com.facebook.buck.util.HumanReadableException; import com.facebook.buck.util.ObjectMappers; import com.facebook.buck.util.environment.Platform; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.base.Predicates; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Iterables; import com.google.common.eventbus.Subscribe; import com.google.common.hash.HashCode; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardWatchEventKinds; import java.nio.file.WatchEvent; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.SortedMap; import java.util.concurrent.Executors; @RunWith(Parameterized.class) public class ParserTest { @Rule public TemporaryPaths tempDir = new TemporaryPaths(); @Rule public ExpectedException thrown = ExpectedException.none(); private final int threads; private final boolean parallelParsing; private Path defaultIncludeFile; private Path includedByIncludeFile; private Path includedByBuildFile; private Path testBuildFile; private Parser parser; private ProjectFilesystem filesystem; private Path cellRoot; private BuckEventBus eventBus; private Cell cell; private ParseEventStartedCounter counter; private ListeningExecutorService executorService; public ParserTest(int threads, boolean parallelParsing) { this.threads = threads; this.parallelParsing = parallelParsing; } @Parameterized.Parameters public static Collection<Object[]> generateData() { return Arrays.asList(new Object[][] { { 1, false, }, { 1, true, }, { 2, true, }, }); } @Before public void setUp() throws IOException, InterruptedException { tempDir.newFolder("java", "com", "facebook"); defaultIncludeFile = tempDir.newFile( "java/com/facebook/defaultIncludeFile").toRealPath(); Files.write(defaultIncludeFile, "\n".getBytes(UTF_8)); includedByIncludeFile = tempDir.newFile( "java/com/facebook/includedByIncludeFile").toRealPath(); Files.write(includedByIncludeFile, "\n".getBytes(UTF_8)); includedByBuildFile = tempDir.newFile( "java/com/facebook/includedByBuildFile").toRealPath(); Files.write( includedByBuildFile, "include_defs('//java/com/facebook/includedByIncludeFile')\n".getBytes(UTF_8)); testBuildFile = tempDir.newFile("java/com/facebook/BUCK").toRealPath(); Files.write( testBuildFile, ("include_defs('//java/com/facebook/includedByBuildFile')\n" + "java_library(name = 'foo')\n" + "java_library(name = 'bar')\n" + "genrule(name = 'baz', out = '')\n").getBytes(UTF_8)); tempDir.newFile("bar.py"); // Create a temp directory with some build files. Path root = tempDir.getRoot().toRealPath(); filesystem = new ProjectFilesystem(root); cellRoot = filesystem.getRootPath(); eventBus = BuckEventBusFactory.newInstance(); ImmutableMap.Builder<String, ImmutableMap<String, String>> configSectionsBuilder = ImmutableMap.builder(); configSectionsBuilder .put("buildfile", ImmutableMap.of("includes", "//java/com/facebook/defaultIncludeFile")); if (parallelParsing) { configSectionsBuilder.put( "project", ImmutableMap.of( "temp_files", ".*\\.swp$", "parallel_parsing", "true", "parsing_threads", Integer.toString(threads))); } else { configSectionsBuilder.put("project", ImmutableMap.of("temp_files", ".*\\.swp$")); } configSectionsBuilder.put("unknown_flavors_messages", ImmutableMap.of("macosx*", "This is an error message read by the .buckconfig")); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections(configSectionsBuilder.build()) .build(); cell = new TestCellBuilder() .setFilesystem(filesystem) .setBuckConfig(config) .build(); DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory( ObjectMappers.newDefaultInstance()); BroadcastEventListener broadcastEventListener = new BroadcastEventListener(); broadcastEventListener.addEventBus(eventBus); parser = new Parser( broadcastEventListener, new ParserConfig(cell.getBuckConfig()), typeCoercerFactory, new ConstructorArgMarshaller(typeCoercerFactory)); counter = new ParseEventStartedCounter(); eventBus.register(counter); executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(threads)); } @After public void tearDown() { executorService.shutdown(); } @Test @SuppressWarnings("unchecked") public void testParseBuildFilesForTargetsWithOverlappingTargets() throws Exception { // Execute buildTargetGraphForBuildTargets() with multiple targets that require parsing the same // build file. BuildTarget fooTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(); BuildTarget barTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "bar").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget, barTarget); // The EventBus should be updated with events indicating how parsing ran. FakeBuckEventListener listener = new FakeBuckEventListener(); eventBus.register(listener); TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph); BuildRule fooRule = resolver.requireRule(fooTarget); assertNotNull(fooRule); BuildRule barRule = resolver.requireRule(barTarget); assertNotNull(barRule); Iterable<ParseEvent> events = Iterables.filter(listener.getEvents(), ParseEvent.class); assertThat(events, Matchers.contains( Matchers.hasProperty("buildTargets", equalTo(buildTargets)), Matchers.allOf( Matchers.hasProperty("buildTargets", equalTo(buildTargets)), Matchers.hasProperty("graph", equalTo(Optional.of(targetGraph))) ))); } @Test public void testMissingBuildRuleInValidFile() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Execute buildTargetGraphForBuildTargets() with a target in a valid file but a bad rule name. BuildTarget fooTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(); BuildTarget razTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "raz").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget, razTarget); thrown.expectMessage( "No rule found when resolving target //java/com/facebook:raz in build file " + "//java/com/facebook/BUCK"); thrown.expectMessage( "Defined in file: " + filesystem.resolve(razTarget.getBasePath()).resolve(DEFAULT_BUILD_FILE_NAME)); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); } @Test public void testMissingBuildFile() throws InterruptedException, BuildFileParseException, IOException, BuildTargetException { BuildTarget target = BuildTarget.builder(cellRoot, "//path/to/nowhere", "nowhere").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(target); thrown.expect(Cell.MissingBuildFileException.class); thrown.expectMessage( "No build file at path/to/nowhere/BUCK when resolving target " + "//path/to/nowhere:nowhere"); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); } @Test public void shouldThrowAnExceptionIfConstructorArgMashallingFails() throws IOException, BuildFileParseException, InterruptedException { thrown.expect(HumanReadableException.class); thrown.expectMessage("found ////cake:walk"); Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, "genrule(name = 'cake', out = 'file.txt', cmd = '$(exe ////cake:walk) > $OUT')" .getBytes(UTF_8)); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); } @Test public void shouldThrowAnExceptionIfADepIsInAFileThatCannotBeParsed() throws IOException, InterruptedException, BuildTargetException, BuildFileParseException { thrown.expectMessage("Parse error for build file"); thrown.expectMessage(Paths.get("foo/BUCK").toString()); Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, "genrule(name = 'cake', out = 'foo.txt', cmd = '$(exe //foo:bar) > $OUT')".getBytes(UTF_8)); buckFile = cellRoot.resolve("foo/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, "I do not parse as python".getBytes(UTF_8)); parser.buildTargetGraph( eventBus, cell, false, executorService, Collections.singleton(BuildTargetFactory.newInstance(cell.getFilesystem(), "//:cake"))); } @Test public void shouldThrowAnExceptionIfMultipleTargetsAreDefinedWithTheSameName() throws IOException, BuildFileParseException, InterruptedException { thrown.expect(BuildFileParseException.class); thrown.expectMessage("Duplicate rule definition found."); Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, ("export_file(name = 'cake', src = 'hello.txt')\n" + "genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n").getBytes(UTF_8)); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); } @Test public void shouldThrowAnExceptionIfNameIsNone() throws IOException, BuildFileParseException, InterruptedException { thrown.expect(BuildFileParseException.class); thrown.expectMessage("rules 'name' field must be a string. Found None."); Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, ("genrule(name = None, out = 'file.txt', cmd = 'touch $OUT')\n").getBytes(UTF_8)); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); } @Test public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeen() throws BuildFileParseException, BuildTargetException, InterruptedException, IOException { BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo") .addFlavors(ImmutableFlavor.of("doesNotExist")) .build(); thrown.expect(HumanReadableException.class); thrown.expectMessage( "Unrecognized flavor in target //java/com/facebook:foo#doesNotExist while parsing " + "//java/com/facebook/BUCK"); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSortedSet.of(flavored)); } @Test public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeenAndShowSuggestionsDefault() throws BuildFileParseException, BuildTargetException, InterruptedException, IOException { BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo") .addFlavors(ImmutableFlavor.of("android-unknown")) .build(); thrown.expect(HumanReadableException.class); thrown.expectMessage( "Unrecognized flavor in target //java/com/facebook:foo#android-unknown while parsing " + "//java/com/facebook/BUCK\nHere are some things you can try to get the following " + "flavors to work::\nandroid-unknown : Make sure you have the Android SDK/NDK " + "installed and set up. " + "See https://buckbuild.com/setup/install.html#locate-android-sdk\n"); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSortedSet.of(flavored)); } @Test public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeenAndShowSuggestionsFromConfig() throws BuildFileParseException, BuildTargetException, InterruptedException, IOException { BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo") .addFlavors(ImmutableFlavor.of("macosx109sdk")) .build(); thrown.expect(HumanReadableException.class); thrown.expectMessage( "Unrecognized flavor in target //java/com/facebook:foo#macosx109sdk while parsing " + "//java/com/facebook/BUCK\nHere are some things you can try to get the following " + "flavors to work::\nmacosx109sdk : This is an error message read by the .buckconfig"); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSortedSet.of(flavored)); } @Test public void shouldThrowAnExceptionWhenAFlavorIsAskedOfATargetThatDoesntSupportFlavors() throws BuildFileParseException, BuildTargetException, InterruptedException, IOException { BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "baz") .addFlavors(JavaLibrary.SRC_JAR) .build(); thrown.expect(HumanReadableException.class); thrown.expectMessage( "Target //java/com/facebook:baz (type genrule) does not currently support flavors " + "(tried [src])"); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSortedSet.of(flavored)); } @Test public void testInvalidDepFromValidFile() throws IOException, BuildFileParseException, BuildTargetException, InterruptedException { // Ensure an exception with a specific message is thrown. thrown.expect(HumanReadableException.class); thrown.expectMessage( "Couldn't get dependency '//java/com/facebook/invalid/lib:missing_rule' of target " + "'//java/com/facebook/invalid:foo'"); // Execute buildTargetGraphForBuildTargets() with a target in a valid file but a bad rule name. tempDir.newFolder("java", "com", "facebook", "invalid"); Path testInvalidBuildFile = tempDir.newFile("java/com/facebook/invalid/BUCK"); Files.write( testInvalidBuildFile, ("java_library(name = 'foo', deps = ['//java/com/facebook/invalid/lib:missing_rule'])\n" + "java_library(name = 'bar')\n").getBytes(UTF_8)); tempDir.newFolder("java", "com", "facebook", "invalid", "lib"); tempDir.newFile("java/com/facebook/invalid/lib/BUCK"); BuildTarget fooTarget = BuildTarget.builder(cellRoot, "//java/com/facebook/invalid", "foo").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); } @Test public void whenAllRulesRequestedWithTrueFilterThenMultipleRulesReturned() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { ImmutableSet<BuildTarget> targets = filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), BuckEventBusFactory.newInstance(), executorService); ImmutableSet<BuildTarget> expectedTargets = ImmutableSet.of( BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(), BuildTarget.builder(cellRoot, "//java/com/facebook", "bar").build(), BuildTarget.builder(cellRoot, "//java/com/facebook", "baz").build()); assertEquals("Should have returned all rules.", expectedTargets, targets); } @Test public void whenAllRulesAreRequestedMultipleTimesThenRulesAreOnlyParsedOnce() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); assertEquals("Should have cached build rules.", 1, counter.calls); } @Test public void whenNotifiedOfNonPathEventThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call filterAllTargetsInProject to populate the cache. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Process event. WatchEvent<Object> event = WatchEventsForTests.createOverflowEvent(); parser.onFileSystemChange(event); // Call filterAllTargetsInProject to request cached rules. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void pathInvalidationWorksAfterOverflow() throws Exception { // Call filterAllTargetsInProject to populate the cache. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Send overflow event. parser.onFileSystemChange(WatchEventsForTests.createOverflowEvent()); // Call filterAllTargetsInProject to request cached rules. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); // Send a "file added" event. parser.onFileSystemChange( createPathEvent( Paths.get("java/com/facebook/Something.java"), StandardWatchEventKinds.ENTRY_CREATE)); // Call filterAllTargetsInProject to request cached rules. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Test that the third parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 3, counter.calls); } @Test public void whenEnvironmentChangesThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setEnvironment(ImmutableMap.of("Some Key", "Some Value", "PATH", System.getenv("PATH"))) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); // Call filterAllTargetsInProject to populate the cache. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Call filterAllTargetsInProject to request cached rules. config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setEnvironment( ImmutableMap.of("Some Key", "Some Other Value", "PATH", System.getenv("PATH"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenEnvironmentNotChangedThenCacheRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setEnvironment(ImmutableMap.of("Some Key", "Some Value", "PATH", System.getenv("PATH"))) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); // Call filterAllTargetsInProject to populate the cache. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Call filterAllTargetsInProject to request cached rules with identical environment. filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should not have invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfBuildFileAddThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfBuildFileChangeThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfBuildFileDeleteThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfIncludeFileAddThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfIncludeFileChangeThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); assertEquals("Should have parsed at all.", 1, counter.calls); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfIncludeFileDeleteThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOf2ndOrderIncludeFileAddThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOf2ndOrderIncludeFileChangeThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOf2ndOrderIncludeFileDeleteThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfDefaultIncludeFileAddThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfDefaultIncludeFileChangeThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfDefaultIncludeFileDeleteThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test // TODO(shs96c): avoid invalidation when arbitrary contained (possibly backup) files are added. public void whenNotifiedOfContainedFileAddThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( Paths.get("java/com/facebook/SomeClass.java"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfContainedFileAddCachedAncestorsAreInvalidatedWithoutBoundaryChecks() throws Exception { BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( "[buildfile]", "includes = //java/com/facebook/defaultIncludeFile", "[project]", "check_package_boundary = false", "temp_files = ''") .build(); Cell cell = new TestCellBuilder() .setFilesystem(filesystem) .setBuckConfig(config) .build(); Path testAncestorBuildFile = tempDir.newFile("java/BUCK").toRealPath(); Files.write(testAncestorBuildFile, "java_library(name = 'root')\n".getBytes(UTF_8)); // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testAncestorBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testAncestorBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfContainedFileChangeThenCacheRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call did not repopulate the cache. assertEquals("Should have not invalidated cache.", 1, counter.calls); } @Test // TODO(shs96c): avoid invalidation when arbitrary contained (possibly backup) files are deleted. public void whenNotifiedOfContainedFileDeleteThenCacheRulesAreInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenNotifiedOfContainedTempFileAddThenCachedRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should not have invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfContainedTempFileChangeThenCachedRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should not have invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfContainedTempFileDeleteThenCachedRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should not have invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfUnrelatedFileAddThenCacheRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("SomeClass.java__backup"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call did not repopulate the cache. assertEquals("Should have not invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfUnrelatedFileChangeThenCacheRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent(Paths.get("SomeClass.java__backup"), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call did not repopulate the cache. assertEquals("Should have not invalidated cache.", 1, counter.calls); } @Test public void whenNotifiedOfUnrelatedFileDeleteThenCacheRulesAreNotInvalidated() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { // Call parseBuildFile to populate the cache. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Process event. WatchEvent<Path> event = createPathEvent( Paths.get("SomeClass.java__backup"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(event); // Call parseBuildFile to request cached rules. parser.getRawTargetNodes( eventBus, cell, false, executorService, testBuildFile); // Test that the second parseBuildFile call did not repopulate the cache. assertEquals("Should have not invalidated cache.", 1, counter.calls); } @Test public void whenAllRulesAreRequestedWithDifferingIncludesThenRulesAreParsedTwice() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( ImmutableMap.of( ParserConfig.BUILDFILE_SECTION_NAME, ImmutableMap.of(ParserConfig.INCLUDES_PROPERTY_NAME, "//bar.py"))) .build(); Cell cell = new TestCellBuilder() .setFilesystem(filesystem) .setBuckConfig(config) .build(); filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); assertEquals("Should have invalidated cache.", 2, counter.calls); } @Test public void whenAllRulesAreRequestedWithDifferingCellsThenRulesAreParsedOnce() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); assertEquals("Should have parsed once.", 1, counter.calls); Path newTempDir = Files.createTempDirectory("junit-temp-path").toRealPath(); Files.createFile(newTempDir.resolve("bar.py")); ProjectFilesystem newFilesystem = new ProjectFilesystem(newTempDir); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(newFilesystem) .setSections( ImmutableMap.of( ParserConfig.BUILDFILE_SECTION_NAME, ImmutableMap.of(ParserConfig.INCLUDES_PROPERTY_NAME, "//bar.py"))) .build(); Cell cell = new TestCellBuilder() .setFilesystem(newFilesystem) .setBuckConfig(config) .build(); filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); assertEquals("Should not have invalidated cache.", 1, counter.calls); } @Test public void whenAllRulesThenSingleTargetRequestedThenRulesAreParsedOnce() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); BuildTarget foo = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableList.of(foo)); assertEquals("Should have cached build rules.", 1, counter.calls); } @Test public void whenSingleTargetThenAllRulesRequestedThenRulesAreParsedOnce() throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { BuildTarget foo = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableList.of(foo)); filterAllTargetsInProject( parser, cell, Predicates.<TargetNode<?>>alwaysTrue(), eventBus, executorService); assertEquals("Should have replaced build rules", 1, counter.calls); } @Test public void whenBuildFilePathChangedThenFlavorsOfTargetsInPathAreInvalidated() throws Exception { tempDir.newFolder("foo"); tempDir.newFolder("bar"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'foo', visibility=['PUBLIC'])\n".getBytes(UTF_8)); Path testBarBuckFile = tempDir.newFile("bar/BUCK"); Files.write( testBarBuckFile, ("java_library(name = 'bar',\n" + " deps = ['//foo:foo'])\n").getBytes(UTF_8)); // Fetch //bar:bar#src to put it in cache. BuildTarget barTarget = BuildTarget .builder(cellRoot, "//bar", "bar") .addFlavors(ImmutableFlavor.of("src")) .build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(barTarget); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); // Rewrite //bar:bar so it doesn't depend on //foo:foo any more. // Delete foo/BUCK and invalidate the cache, which should invalidate // the cache entry for //bar:bar#src. Files.delete(testFooBuckFile); Files.write(testBarBuckFile, "java_library(name = 'bar')\n".getBytes(UTF_8)); WatchEvent<Path> deleteEvent = createPathEvent( Paths.get("foo").resolve("BUCK"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(deleteEvent); WatchEvent<Path> modifyEvent = createPathEvent( Paths.get("bar").resolve("BUCK"), StandardWatchEventKinds.ENTRY_MODIFY); parser.onFileSystemChange(modifyEvent); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); } @Test public void targetWithSourceFileChangesHash() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n" .getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); HashCode original = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory( ObjectMappers.newDefaultInstance()); parser = new Parser( new BroadcastEventListener(), new ParserConfig(cell.getBuckConfig()), typeCoercerFactory, new ConstructorArgMarshaller(typeCoercerFactory)); Path testFooJavaFile = tempDir.newFile("foo/Foo.java"); Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8)); HashCode updated = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); assertNotEquals(original, updated); } @Test public void deletingSourceFileChangesHash() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n" .getBytes(UTF_8)); Path testFooJavaFile = tempDir.newFile("foo/Foo.java"); Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8)); Path testBarJavaFile = tempDir.newFile("foo/Bar.java"); Files.write(testBarJavaFile, "// Seriously, no Java here\n".getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); HashCode originalHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); Files.delete(testBarJavaFile); WatchEvent<Path> deleteEvent = createPathEvent( Paths.get("foo/Bar.java"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(deleteEvent); HashCode updatedHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); assertNotEquals(originalHash, updatedHash); } @Test public void renamingSourceFileChangesHash() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n" .getBytes(UTF_8)); Path testFooJavaFile = tempDir.newFile("foo/Foo.java"); Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); HashCode originalHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); Files.move(testFooJavaFile, testFooJavaFile.resolveSibling("Bar.java")); WatchEvent<Path> deleteEvent = createPathEvent( Paths.get("foo/Foo.java"), StandardWatchEventKinds.ENTRY_DELETE); WatchEvent<Path> createEvent = createPathEvent( Paths.get("foo/Bar.java"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(deleteEvent); parser.onFileSystemChange(createEvent); HashCode updatedHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); assertNotEquals(originalHash, updatedHash); } @Test public void twoBuildTargetHashCodesPopulatesCorrectly() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, ("java_library(name = 'lib', visibility=['PUBLIC'])\n" + "java_library(name = 'lib2', visibility=['PUBLIC'])\n").getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build(); ImmutableMap<BuildTarget, HashCode> hashes = buildTargetGraphAndGetHashCodes( parser, fooLibTarget, fooLib2Target); assertNotNull(hashes.get(fooLibTarget)); assertNotNull(hashes.get(fooLib2Target)); assertNotEquals(hashes.get(fooLibTarget), hashes.get(fooLib2Target)); } @Test public void addingDepToTargetChangesHashOfDependingTargetOnly() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, ("java_library(name = 'lib', deps = [], visibility=['PUBLIC'])\n" + "java_library(name = 'lib2', deps = [], visibility=['PUBLIC'])\n") .getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build(); ImmutableMap<BuildTarget, HashCode> hashes = buildTargetGraphAndGetHashCodes( parser, fooLibTarget, fooLib2Target); HashCode libKey = hashes.get(fooLibTarget); HashCode lib2Key = hashes.get(fooLib2Target); DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory( ObjectMappers.newDefaultInstance()); parser = new Parser( new BroadcastEventListener(), new ParserConfig(cell.getBuckConfig()), typeCoercerFactory, new ConstructorArgMarshaller(typeCoercerFactory)); Files.write( testFooBuckFile, ("java_library(name = 'lib', deps = [], visibility=['PUBLIC'])\n" + "java_library(name = 'lib2', deps = [':lib'], visibility=['PUBLIC'])\n").getBytes(UTF_8)); hashes = buildTargetGraphAndGetHashCodes( parser, fooLibTarget, fooLib2Target); assertEquals(libKey, hashes.get(fooLibTarget)); assertNotEquals(lib2Key, hashes.get(fooLib2Target)); } @Test public void loadedBuildFileWithoutLoadedTargetNodesLoadsAdditionalTargetNodes() throws IOException, InterruptedException, BuildFileParseException, BuildTargetException { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK").toRealPath(); Files.write( testFooBuckFile, "java_library(name = 'lib1')\njava_library(name = 'lib2')\n".getBytes(UTF_8)); BuildTarget fooLib1Target = BuildTarget.builder(cellRoot, "//foo", "lib1").build(); BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build(); // First, only load one target from the build file so the file is parsed, but only one of the // TargetNodes will be cached. TargetNode<?> targetNode = parser.getTargetNode( eventBus, cell, false, executorService, fooLib1Target); assertThat(targetNode.getBuildTarget(), equalTo(fooLib1Target)); // Now, try to load the entire build file and get all TargetNodes. ImmutableSet<TargetNode<?>> targetNodes = parser.getAllTargetNodes( eventBus, cell, false, executorService, testFooBuckFile); assertThat(targetNodes.size(), equalTo(2)); assertThat( FluentIterable.from(targetNodes) .transform( new Function<TargetNode<?>, BuildTarget>() { @Override public BuildTarget apply(TargetNode<?> targetNode) { return targetNode.getBuildTarget(); } }) .toList(), hasItems(fooLib1Target, fooLib2Target)); } @Test public void getOrLoadTargetNodeRules() throws IOException, InterruptedException, BuildFileParseException, BuildTargetException { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'lib')\n".getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); TargetNode<?> targetNode = parser.getTargetNode( eventBus, cell, false, executorService, fooLibTarget); assertThat(targetNode.getBuildTarget(), equalTo(fooLibTarget)); SortedMap<String, Object> rules = parser.getRawTargetNode( eventBus, cell, false, executorService, targetNode); assertThat(rules, Matchers.hasKey("name")); assertThat( (String) rules.get("name"), equalTo(targetNode.getBuildTarget().getShortName())); } @Test public void whenBuildFileContainsSourcesUnderSymLinkNewSourcesNotAddedUntilCacheCleaned() throws Exception { // This test depends on creating symbolic links which we cannot do on Windows. assumeTrue(Platform.detect() != Platform.WINDOWS); tempDir.newFolder("bar"); tempDir.newFile("bar/Bar.java"); tempDir.newFolder("foo"); Path rootPath = tempDir.getRoot().toRealPath(); Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar")); Path testBuckFile = rootPath.resolve("foo").resolve("BUCK"); Files.write( testBuckFile, "java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8)); // Fetch //:lib to put it in cache. BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget); { TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph); JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget); assertEquals(ImmutableSet.of(Paths.get("foo/bar/Bar.java")), libRule.getJavaSrcs()); } tempDir.newFile("bar/Baz.java"); WatchEvent<Path> createEvent = createPathEvent( Paths.get("bar/Baz.java"), StandardWatchEventKinds.ENTRY_CREATE); parser.onFileSystemChange(createEvent); { TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph); JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget); assertEquals( ImmutableSet.of(Paths.get("foo/bar/Bar.java"), Paths.get("foo/bar/Baz.java")), libRule.getJavaSrcs()); } } @Test public void whenBuildFileContainsSourcesUnderSymLinkDeletedSourcesNotRemovedUntilCacheCleaned() throws Exception { // This test depends on creating symbolic links which we cannot do on Windows. assumeTrue(Platform.detect() != Platform.WINDOWS); tempDir.newFolder("bar"); tempDir.newFile("bar/Bar.java"); tempDir.newFolder("foo"); Path bazSourceFile = tempDir.newFile("bar/Baz.java"); Path rootPath = tempDir.getRoot().toRealPath(); Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar")); Path testBuckFile = rootPath.resolve("foo").resolve("BUCK"); Files.write( testBuckFile, "java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8)); // Fetch //:lib to put it in cache. BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget); { TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph); JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget); assertEquals( ImmutableSortedSet.of(Paths.get("foo/bar/Bar.java"), Paths.get("foo/bar/Baz.java")), libRule.getJavaSrcs()); } Files.delete(bazSourceFile); WatchEvent<Path> deleteEvent = createPathEvent( Paths.get("bar/Baz.java"), StandardWatchEventKinds.ENTRY_DELETE); parser.onFileSystemChange(deleteEvent); { TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph); JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget); assertEquals( ImmutableSet.of(Paths.get("foo/bar/Bar.java")), libRule.getJavaSrcs()); } } @Test public void whenSymlinksForbiddenThenParseFailsOnSymlinkInSources() throws Exception { // This test depends on creating symbolic links which we cannot do on Windows. assumeTrue(Platform.detect() != Platform.WINDOWS); thrown.expect(HumanReadableException.class); thrown.expectMessage( "Target //foo:lib contains input files under a path which contains a symbolic link (" + "{foo/bar=bar}). To resolve this, use separate rules and declare dependencies instead of " + "using symbolic links."); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( "[project]", "allow_symlinks = forbid") .build(); cell = new TestCellBuilder().setBuckConfig(config).setFilesystem(filesystem).build(); tempDir.newFolder("bar"); tempDir.newFile("bar/Bar.java"); tempDir.newFolder("foo"); Path rootPath = tempDir.getRoot().toRealPath(); Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar")); Path testBuckFile = rootPath.resolve("foo").resolve("BUCK"); Files.write( testBuckFile, "java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8)); BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); } @Test public void whenSymlinksAreInReadOnlyPathsCachingIsNotDisabled() throws Exception { // This test depends on creating symbolic links which we cannot do on Windows. assumeTrue(Platform.detect() != Platform.WINDOWS); Path rootPath = tempDir.getRoot().toRealPath(); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( "[project]", "read_only_paths = " + rootPath.resolve("foo")) .build(); cell = new TestCellBuilder().setBuckConfig(config).setFilesystem(filesystem).build(); tempDir.newFolder("bar"); tempDir.newFile("bar/Bar.java"); tempDir.newFolder("foo"); Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar")); Path testBuckFile = rootPath.resolve("foo").resolve("BUCK"); Files.write( testBuckFile, "java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8)); BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget); parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargets); DaemonicParserState permState = parser.getPermState(); for (BuildTarget target : buildTargets) { assertTrue(permState .getOrCreateNodeCache(TargetNode.class) .lookupComputedNode(cell, target) .isPresent()); } } @Test public void buildTargetHashCodePopulatesCorrectly() throws Exception { tempDir.newFolder("foo"); Path testFooBuckFile = tempDir.newFile("foo/BUCK"); Files.write( testFooBuckFile, "java_library(name = 'lib', visibility=['PUBLIC'])\n".getBytes(UTF_8)); BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build(); // We can't precalculate the hash, since it depends on the buck version. Check for the presence // of a hash for the right key. HashCode hashCode = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget); assertNotNull(hashCode); } @Test public void readConfigReadsConfig() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); BuildTarget buildTarget = BuildTarget.of( UnflavoredBuildTarget.of( filesystem.getRootPath(), Optional.<String>absent(), "//", "cake")); Files.write( buckFile, Joiner.on("").join( ImmutableList.of( "genrule(\n" + "name = 'cake',\n" + "out = read_config('foo', 'bar', 'default') + '.txt',\n" + "cmd = 'touch $OUT'\n" + ")\n")) .getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); TargetNode<GenruleDescription.Arg> node = parser .getTargetNode(eventBus, cell, false, executorService, buildTarget) .castArg(GenruleDescription.Arg.class) .get(); assertThat(node.getConstructorArg().out, is(equalTo("default.txt"))); config = FakeBuckConfig.builder() .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value"))) .setFilesystem(filesystem) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); node = parser .getTargetNode(eventBus, cell, false, executorService, buildTarget) .castArg(GenruleDescription.Arg.class) .get(); assertThat(node.getConstructorArg().out, is(equalTo("value.txt"))); config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); node = parser .getTargetNode(eventBus, cell, false, executorService, buildTarget) .castArg(GenruleDescription.Arg.class) .get(); assertThat(node.getConstructorArg().out, is(equalTo("other value.txt"))); } @Test public void whenBuckConfigEntryChangesThenCachedRulesAreInvalidated() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, Joiner.on("").join( ImmutableList.of( "read_config('foo', 'bar')\n", "genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n")) .getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value"))) .setFilesystem(filesystem) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Call filterAllTargetsInProject to request cached rules. config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated.", 2, counter.calls); } @Test public void whenBuckConfigAddedThenCachedRulesAreInvalidated() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, Joiner.on("").join( ImmutableList.of( "read_config('foo', 'bar')\n", "genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n")) .getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Call filterAllTargetsInProject to request cached rules. config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated.", 2, counter.calls); } @Test public void whenBuckConfigEntryRemovedThenCachedRulesAreInvalidated() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, Joiner.on("").join( ImmutableList.of( "read_config('foo', 'bar')\n", "genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n")) .getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value"))) .setFilesystem(filesystem) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Call filterAllTargetsInProject to request cached rules. config = FakeBuckConfig.builder() .setFilesystem(filesystem) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should have invalidated.", 2, counter.calls); } @Test public void whenUnrelatedBuckConfigEntryChangesThenCachedRulesAreNotInvalidated() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); Files.write( buckFile, Joiner.on("").join( ImmutableList.of( "read_config('foo', 'bar')\n", "genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n")) .getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setSections( ImmutableMap.of( "foo", ImmutableMap.of( "bar", "value", "dead", "beef"))) .setFilesystem(filesystem) .build(); Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Call filterAllTargetsInProject to request cached rules. config = FakeBuckConfig.builder() .setSections( ImmutableMap.of( "foo", ImmutableMap.of( "bar", "value", "dead", "beef different"))) .setFilesystem(filesystem) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile); // Test that the second parseBuildFile call repopulated the cache. assertEquals("Should not have invalidated.", 1, counter.calls); } @Test(timeout = 20000) public void resolveTargetSpecsDoesNotHangOnException() throws Exception { Path buckFile = cellRoot.resolve("foo/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write(buckFile, "# empty".getBytes(UTF_8)); buckFile = cellRoot.resolve("bar/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, "I do not parse as python".getBytes(UTF_8)); thrown.expect(BuildFileParseException.class); thrown.expectMessage("Parse error for build file"); thrown.expectMessage(Paths.get("bar/BUCK").toString()); parser.resolveTargetSpecs( eventBus, cell, false, executorService, ImmutableList.of( TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("bar"), cell.getRoot())), TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("foo"), cell.getRoot()))), SpeculativeParsing.of(true), ParserConfig.ApplyDefaultFlavorsMode.ENABLED); } @Test public void resolveTargetSpecsPreservesOrder() throws Exception { BuildTarget foo = BuildTargetFactory.newInstance(filesystem, "//foo:foo"); Path buckFile = cellRoot.resolve("foo/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, "genrule(name='foo', out='foo', cmd='foo')".getBytes(UTF_8)); BuildTarget bar = BuildTargetFactory.newInstance(filesystem, "//bar:bar"); buckFile = cellRoot.resolve("bar/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, "genrule(name='bar', out='bar', cmd='bar')".getBytes(UTF_8)); ImmutableList<ImmutableSet<BuildTarget>> targets = parser.resolveTargetSpecs( eventBus, cell, false, executorService, ImmutableList.of( TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("bar"), cell.getRoot())), TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("foo"), cell.getRoot()))), SpeculativeParsing.of(true), ParserConfig.ApplyDefaultFlavorsMode.ENABLED); assertThat( targets, equalTo(ImmutableList.of(ImmutableSet.of(bar), ImmutableSet.of(foo)))); targets = parser.resolveTargetSpecs( eventBus, cell, false, executorService, ImmutableList.of( TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("foo"), cell.getRoot())), TargetNodePredicateSpec.of( Predicates.alwaysTrue(), BuildFileSpec.fromRecursivePath( Paths.get("bar"), cell.getRoot()))), SpeculativeParsing.of(true), ParserConfig.ApplyDefaultFlavorsMode.ENABLED); assertThat( targets, equalTo(ImmutableList.of(ImmutableSet.of(foo), ImmutableSet.of(bar)))); } @Test public void defaultFlavorsInRuleArgsAppliedToTarget() throws Exception { // We depend on Xcode platforms for this test. assumeTrue(Platform.detect() == Platform.MACOS); Path buckFile = cellRoot.resolve("lib/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, ("cxx_library(" + " name = 'lib', " + " srcs=glob(['*.c']), " + " defaults={'platform':'iphonesimulator-x86_64'}" + ")").getBytes(UTF_8)); ImmutableSet<BuildTarget> result = parser.buildTargetGraphForTargetNodeSpecs( eventBus, cell, false, executorService, ImmutableList.of( AbstractBuildTargetSpec.from( BuildTarget.builder(cellRoot, "//lib", "lib").build())), /* ignoreBuckAutodepsFiles */ false, ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets(); assertThat( result, hasItems( BuildTarget.builder(cellRoot, "//lib", "lib") .addFlavors( ImmutableFlavor.of("iphonesimulator-x86_64"), ImmutableFlavor.of("static")) .build())); } @Test public void defaultFlavorsInConfigAppliedToTarget() throws Exception { // We depend on Xcode platforms for this test. assumeTrue(Platform.detect() == Platform.MACOS); Path buckFile = cellRoot.resolve("lib/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, ("cxx_library(" + " name = 'lib', " + " srcs=glob(['*.c']) " + ")").getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( ImmutableMap.of( "defaults.cxx_library", ImmutableMap.of( "platform", "iphoneos-arm64", "type", "shared"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); ImmutableSet<BuildTarget> result = parser.buildTargetGraphForTargetNodeSpecs( eventBus, cell, false, executorService, ImmutableList.of( AbstractBuildTargetSpec.from( BuildTarget.builder(cellRoot, "//lib", "lib").build())), /* ignoreBuckAutodepsFiles */ false, ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets(); assertThat( result, hasItems( BuildTarget.builder(cellRoot, "//lib", "lib") .addFlavors( ImmutableFlavor.of("iphoneos-arm64"), ImmutableFlavor.of("shared")) .build())); } @Test public void defaultFlavorsInArgsOverrideDefaultsFromConfig() throws Exception { // We depend on Xcode platforms for this test. assumeTrue(Platform.detect() == Platform.MACOS); Path buckFile = cellRoot.resolve("lib/BUCK"); Files.createDirectories(buckFile.getParent()); Files.write( buckFile, ("cxx_library(" + " name = 'lib', " + " srcs=glob(['*.c']), " + " defaults={'platform':'macosx-x86_64'}" + ")").getBytes(UTF_8)); BuckConfig config = FakeBuckConfig.builder() .setFilesystem(filesystem) .setSections( ImmutableMap.of( "defaults.cxx_library", ImmutableMap.of( "platform", "iphoneos-arm64", "type", "shared"))) .build(); cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build(); ImmutableSet<BuildTarget> result = parser.buildTargetGraphForTargetNodeSpecs( eventBus, cell, false, executorService, ImmutableList.of( AbstractBuildTargetSpec.from( BuildTarget.builder(cellRoot, "//lib", "lib").build())), /* ignoreBuckAutodepsFiles */ false, ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets(); assertThat( result, hasItems( BuildTarget.builder(cellRoot, "//lib", "lib") .addFlavors( ImmutableFlavor.of("macosx-x86_64"), ImmutableFlavor.of("shared")) .build())); } @Test public void testGetCacheReturnsSame() throws Exception { assertEquals( parser.getPermState().getOrCreateNodeCache(TargetNode.class), parser.getPermState().getOrCreateNodeCache(TargetNode.class)); assertNotEquals( parser.getPermState().getOrCreateNodeCache(TargetNode.class), parser.getPermState().getOrCreateNodeCache(Map.class)); } @Test public void groupsAreExpanded() throws Exception { Path buckFile = cellRoot.resolve("BUCK"); Files.createDirectories(buckFile.getParent()); Path groupsData = TestDataHelper.getTestDataScenario(this, "groups"); Files.copy(groupsData.resolve("BUCK.fixture"), buckFile); BuildTarget fooTarget = BuildTargetFactory.newInstance(cellRoot, "//:foo"); BuildTarget barTarget = BuildTargetFactory.newInstance(cellRoot, "//:bar"); TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSet.of(barTarget)); assertThat(targetGraph.getGroupsContainingTarget(fooTarget).size(), is(2)); assertThat( targetGraph.get(fooTarget).isVisibleTo(targetGraph, targetGraph.get(barTarget)), is(true)); assertThat( targetGraph.get(barTarget).isVisibleTo(targetGraph, targetGraph.get(fooTarget)), is(false)); } @Test public void testVisibilityGetsChecked() throws Exception { Path visibilityData = TestDataHelper.getTestDataScenario(this, "visibility"); Path visibilityBuckFile = cellRoot.resolve("BUCK"); Path visibilitySubBuckFile = cellRoot.resolve("sub/BUCK"); Files.createDirectories(visibilityBuckFile.getParent()); Files.createDirectories(visibilitySubBuckFile.getParent()); Files.copy(visibilityData.resolve("BUCK.fixture"), visibilityBuckFile); Files.copy(visibilityData.resolve("sub/BUCK.fixture"), visibilitySubBuckFile); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_pass"))); parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_pass2"))); try { parser.buildTargetGraph( eventBus, cell, false, executorService, ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_fail"))); Assert.fail("did not expect to succeed parsing"); } catch (Exception e) { assertThat(e, instanceOf(HumanReadableException.class)); assertThat( e.getMessage(), containsString("//:should_fail depends on //sub:sub, which is not visible")); } } private BuildRuleResolver buildActionGraph(BuckEventBus eventBus, TargetGraph targetGraph) { return Preconditions.checkNotNull( ActionGraphCache.getFreshActionGraph(eventBus, targetGraph)).getResolver(); } /** * Populates the collection of known build targets that this Parser will use to construct an * action graph using all build files inside the given project root and returns an optionally * filtered set of build targets. * * @param filter if specified, applied to each rule in rules. All matching rules will be included * in the List returned by this method. If filter is null, then this method returns null. * @return The build targets in the project filtered by the given filter. */ public static synchronized ImmutableSet<BuildTarget> filterAllTargetsInProject( Parser parser, Cell cell, Predicate<TargetNode<?>> filter, BuckEventBus buckEventBus, ListeningExecutorService executor) throws BuildFileParseException, BuildTargetException, IOException, InterruptedException { return FluentIterable .from( parser.buildTargetGraphForTargetNodeSpecs( buckEventBus, cell, false, executor, ImmutableList.of( TargetNodePredicateSpec.of( filter, BuildFileSpec.fromRecursivePath( Paths.get(""), cell.getRoot()))), /* ignoreBuckAutodepsFiles */ false) .getTargetGraph().getNodes()) .filter(filter) .transform(HasBuildTarget.TO_TARGET) .toSet(); } private ImmutableMap<BuildTarget, HashCode> buildTargetGraphAndGetHashCodes( Parser parser, BuildTarget... buildTargets) throws Exception { // Build the target graph so we can access the hash code cache. ImmutableList<BuildTarget> buildTargetsList = ImmutableList.copyOf(buildTargets); TargetGraph targetGraph = parser.buildTargetGraph( eventBus, cell, false, executorService, buildTargetsList); ImmutableMap.Builder<BuildTarget, HashCode> toReturn = ImmutableMap.builder(); for (TargetNode<?> node : targetGraph.getNodes()) { toReturn.put(node.getBuildTarget(), node.getRawInputsHashCode()); } return toReturn.build(); } private static class ParseEventStartedCounter { int calls = 0; // We know that the ProjectBuildFileParser emits a Started event when it parses a build file. @Subscribe @SuppressWarnings("unused") public void call(ParseBuckFileEvent.Started parseEvent) { calls++; } } }
[ "\"PATH\"", "\"PATH\"", "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
java
1
0
octarine/install-octarine.go
// Copyright 2019 The Meshery Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package octarine import ( "io/ioutil" "math/rand" "os" "os/exec" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789") const ( accMgrUsername = "meshery" ) func randSeq(n int) string { b := make([]rune, n) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } func (oClient *Client) createCpObjects() error { oClient.octarineControlPlane = os.Getenv("OCTARINE_CP") oClient.octarineAccMgrPword = os.Getenv("OCTARINE_ACC_MGR_PASSWD") oClient.octarineCreatorPword = os.Getenv("OCTARINE_CREATOR_PASSWD") oClient.octarineDeleterPword = os.Getenv("OCTARINE_DELETER_PASSWD") oClient.octarineDomain = os.Getenv("OCTARINE_DOMAIN") dockerUser, userVar := os.LookupEnv("OCTARINE_DOCKER_USERNAME") dockerEmail, emailVar := os.LookupEnv("OCTARINE_DOCKER_EMAIL") dockerPassword, passwordVar := os.LookupEnv("OCTARINE_DOCKER_PASSWORD") if userVar { os.Setenv("OCTARINE_DOCKER.USERNAME", dockerUser) logrus.Debugf("Docker user %s", dockerUser) } if emailVar { os.Setenv("OCTARINE_DOCKER.EMAIL", dockerEmail) logrus.Debugf("Docker email %s", dockerEmail) } if passwordVar { os.Setenv("OCTARINE_DOCKER.PASSWORD", dockerPassword) logrus.Debugf("Docker password %s", dockerPassword) } cmd := exec.Command("octactl", "login", "creator@octarine", oClient.octarineControlPlane, "--password", oClient.octarineCreatorPword) logrus.Debugf("Login to namespace octarine") err := cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } oClient.octarineAccount = "meshery-" + randSeq(6) cmd = exec.Command("octactl", "account", "create", oClient.octarineAccount, accMgrUsername, oClient.octarineAccMgrPword) logrus.Debugf("Creating account %s", oClient.octarineAccount) err = cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } cmd = exec.Command("octactl", "login", accMgrUsername+"@"+oClient.octarineAccount, oClient.octarineControlPlane, "--password", oClient.octarineAccMgrPword) logrus.Debugf("Login to namespace %s", oClient.octarineAccount) err = cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } cmd = exec.Command("octactl", "domain", "create", oClient.octarineDomain) logrus.Debugf("Creating domain %s in namespace %s", oClient.octarineDomain, oClient.octarineAccount) err = cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } return nil } func (oClient *Client) deleteCpObjects() error { cmd := exec.Command("octactl", "login", "deleter@octarine", oClient.octarineControlPlane, "--password", oClient.octarineDeleterPword) logrus.Debugf("Login as deleter to account octarine") err := cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } cmd = exec.Command("octactl", "account", "delete", oClient.octarineAccount, "--force") logrus.Debugf("Deleting account %s", oClient.octarineAccount) err = cmd.Run() if err != nil { logrus.Errorf("Command finished with error: %v", err) return err } return nil } // For this function to work, OCTARINE_DOCKER_USERNAME, OCTARINE_DOCKER_EMAIL, OCTARINE_DOCKER_PASSWORD (based64) must be set. func (oClient *Client) getOctarineDataplaneYAML(namespace string) (string, error) { cmd := exec.Command("octactl", "dataplane", "install", "--k8s-namespace", namespace, oClient.octarineDomain) logrus.Debugf("Creating dataplane yaml for deployment %s in namespace %s", oClient.octarineDomain, namespace) dp, err := cmd.Output() if err != nil { logrus.Errorf("Command finished with error: %v", err) return "", err } return string(dp), nil } const ( bookInfoInstallFile = "/bookinfo.yaml" ) func (oClient *Client) getOctarineYAMLs(namespace string) (string, error) { dp, err := oClient.getOctarineDataplaneYAML(namespace) if err != nil { err = errors.Wrap(err, "unable to create dataplane yaml") logrus.Error(err) return "", err } return dp, nil } func (oClient *Client) getBookInfoAppYAML() (string, error) { b, err := ioutil.ReadFile(bookInfoInstallFile) if err != nil { err = errors.Wrap(err, "Failed to read bookinfo.yaml") logrus.Error(err) return "", err } str := string(b) return str, nil }
[ "\"OCTARINE_CP\"", "\"OCTARINE_ACC_MGR_PASSWD\"", "\"OCTARINE_CREATOR_PASSWD\"", "\"OCTARINE_DELETER_PASSWD\"", "\"OCTARINE_DOMAIN\"" ]
[]
[ "OCTARINE_CREATOR_PASSWD", "OCTARINE_CP", "OCTARINE_DELETER_PASSWD", "OCTARINE_DOMAIN", "OCTARINE_ACC_MGR_PASSWD" ]
[]
["OCTARINE_CREATOR_PASSWD", "OCTARINE_CP", "OCTARINE_DELETER_PASSWD", "OCTARINE_DOMAIN", "OCTARINE_ACC_MGR_PASSWD"]
go
5
0
comfort_gallery/settings.py
""" Django settings for comfort_gallery project. Generated by 'django-admin startproject' using Django 3.1.2. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path import os import cloudinary import django_heroku import dj_database_url #from decouple import Csv MODE=os.environ.get('MODE') #SECRET_KEY = config('SECRET_KEY') #DEBUG = config('DEBUG', default=False, cast=bool) # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS') # Application definition INSTALLED_APPS = [ 'gallery', 'bootstrap3', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', ] ROOT_URLCONF = 'comfort_gallery.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'comfort_gallery.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases if os.environ.get('MODE')=="dev": DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ.get('DB_NAME'), 'USER': os.environ.get('DB_USER'), 'PASSWORD': os.environ.get('DB_PASSWORD'), 'HOST': os.environ.get('DB_HOST','127.0.0.1'), 'PORT': os.environ.get('DB_PORT'), } } else: DATABASES = { 'default': dj_database_url.config(default=os.environ.get('DATABASE_URL')) } db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Africa/Nairobi' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATIC_URL = '/static/' # Extra places for collectstatic to find static files. STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) cloudinary.config( cloud_name = os.environ.get('CLOUDINARY_CLOUD_NAME'), api_key = os.environ.get('CLOUDINARY_API_KEY'), api_secret = os.environ.get('CLOUDINARY_API_SECRET'), ) STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # configuring the location for media MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Configure Django App for Heroku. django_heroku.settings(locals())
[]
[]
[ "CLOUDINARY_API_KEY", "ALLOWED_HOSTS", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DATABASE_URL", "CLOUDINARY_CLOUD_NAME", "DB_NAME", "MODE", "SECRET_KEY", "CLOUDINARY_API_SECRET", "DB_USER" ]
[]
["CLOUDINARY_API_KEY", "ALLOWED_HOSTS", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DATABASE_URL", "CLOUDINARY_CLOUD_NAME", "DB_NAME", "MODE", "SECRET_KEY", "CLOUDINARY_API_SECRET", "DB_USER"]
python
12
0
core/chaincode/platforms/golang/platform.go
package golang import ( "archive/tar" "fmt" pb "github.com/hyperledger/fabric/protos" "net/url" "os" "path/filepath" ) type Platform struct { } // Returns whether the given file or directory exists or not func pathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return true, err } func (self *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error { url, err := url.Parse(spec.ChaincodeID.Path) if err != nil || url == nil { return fmt.Errorf("invalid path: %s", err) } //we have no real good way of checking existence of remote urls except by downloading and testin //which we do later anyway. But we *can* - and *should* - test for existence of local paths. //Treat empty scheme as a local filesystem path if url.Scheme == "" { pathToCheck := filepath.Join(os.Getenv("GOPATH"), "src", spec.ChaincodeID.Path) exists, err := pathExists(pathToCheck) if err != nil { return fmt.Errorf("Error validating chaincode path: %s", err) } if !exists { return fmt.Errorf("Path to chaincode does not exist: %s", spec.ChaincodeID.Path) } } return nil } func (self *Platform) WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error { var err error spec.ChaincodeID.Name, err = generateHashcode(spec, tw) if err != nil { return err } err = writeChaincodePackage(spec, tw) if err != nil { return err } return nil }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
dynamo.py
#! /usr/bin/env python from __future__ import print_function import json import os import pprint import sys import traceback import boto3 import botocore import tables def get_client(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None, profile_name=None): """Return a DynamoDB ServiceResource. Use a profile name from ~/.aws/credentials if using 'profile_name'. """ _session = boto3.session.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, profile_name=profile_name ) return _session.resource('dynamodb') def create_tables(client): return tables.create_all(client) def batch_load_sampledata(client): sampledatadir = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'sampledata' ) responses = [] for _fn in os.listdir(sampledatadir): with open(os.path.join(sampledatadir, _fn)) as j: items = json.load(j) try: response = client.batch_write_item( RequestItems=items) except botocore.exceptions.ClientError as err: response = err responses.append(response) return responses def get_item(table, **kw): return table.get_item( Key=kw ) def __create(args): print('--- Creating tables ---') pprint.pprint(tables.create_all(args.client), indent=2) def __load(args): print('--- Writing data ---') pprint.pprint(batch_load_sampledata(args.client), indent=2) def __delete(args): print('--- Deleting tables ---') pprint.pprint(tables.delete_all(args.client), indent=2) def __get_forum(args): print('--- Getting Forum data ---') item = get_item(args.client.Table('Forum'), Name=args.Name) if 'Item' in item: pprint.pprint(item['Item'], indent=2) else: pprint.pprint(item, indent=2) def __get_reply(args): print('--- Getting Reply data ---') item = get_item( args.client.Table('Reply'), Id=args.Id, ReplyDateTime=args.ReplyDateTime ) if 'Item' in item: pprint.pprint(item['Item'], indent=2) else: pprint.pprint(item, indent=2) def __get_thread(args): print('--- Getting Thread data ---') item = get_item( args.client.Table('Thread'), ForumName=args.ForumName, Subject=args.Subject ) if 'Item' in item: pprint.pprint(item['Item'], indent=2) else: pprint.pprint(item, indent=2) def cli(): import argparse parser = argparse.ArgumentParser() parser.add_argument( '--profile', '-p', default=os.getenv('TRY_DYNAMO_PROFILE'), ) subparsers = parser.add_subparsers() # create create_parser = subparsers.add_parser( 'create', description='Create the dynamodb tables', ) create_parser.set_defaults(func=__create) # delete create_parser = subparsers.add_parser( 'delete', description='Delete the dynamodb tables', ) create_parser.set_defaults(func=__delete) # load load_parser = subparsers.add_parser( 'load', description='Perform batch load operation with sampledata/', ) load_parser.set_defaults(func=__load) # get-forum get_forum_parser = subparsers.add_parser( 'get-forum', description='Fetch data from the Forum table in dynamodb', ) get_forum_parser.add_argument( 'Name', help='Name of the Forum to get' ) get_forum_parser.set_defaults(func=__get_forum) # get-reply get_reply_parser = subparsers.add_parser( 'get-reply', description='Fetch data from the Reply table in dynamodb' ) get_reply_parser.add_argument( 'Id', help='Id of the Reply to get' ) get_reply_parser.add_argument( 'ReplyDateTime', help='Id of the Reply to get' ) get_reply_parser.set_defaults(func=__get_reply) # get-thread get_thread_parser = subparsers.add_parser( 'get-thread', description='Fetch data from the Thread table in dynamodb', ) get_thread_parser.set_defaults(func=__get_thread) get_thread_parser.add_argument( 'ForumName', help='ForumName of the Thread to get.' ) get_thread_parser.add_argument( 'Subject', help='Subject of the Thread to get.' ) args = parser.parse_args() args.client = get_client(profile_name=args.profile) args.func(args) if __name__ == '__main__': cli()
[]
[]
[ "TRY_DYNAMO_PROFILE" ]
[]
["TRY_DYNAMO_PROFILE"]
python
1
0
Packs/CovalenceForSecurityProviders/Integrations/CovalenceForSecurityProviders/CovalenceForSecurityProviders.py
import os import requests import json import traceback import dateparser from datetime import datetime, timedelta from bs4 import BeautifulSoup from CommonServerPython import * from typing import List import demistomock as demisto DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' HOST = demisto.params().get('host') BROKER = argToBoolean(demisto.params().get('broker', False)) USERNAME = demisto.params().get('credentials')['identifier'] PASSWORD = demisto.params().get('credentials')['password'] VERIFY_SSL = demisto.params().get('verify_ssl') TIMEOUT = int(demisto.params().get('timeout')) FIRST_RUN_TIME_RANGE = int(demisto.params().get('first_run_time_range').strip()) FETCH_LIMIT = int(demisto.params().get('fetch_limit')) PROXY = demisto.params().get('proxy') if not demisto.params().get('proxy', False): del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] def find_covs(client_name): url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) covs = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): if client_name == link.contents[0]: href = link.get('href', '') if href: covs.append(href.split('/index/', 1)[-1]) return covs def build_host(host): host = host.rstrip('/') if not host.startswith('https:') and not host.startswith('http:'): host = 'https://' + host if host.startswith('https:') and not host.endswith('/CovalenceWebUI/services'): host += '/CovalenceWebUI/services' elif not host.endswith('/services'): host += '/services' return host def login(host=HOST, cov_id=None, username=USERNAME, password=PASSWORD, verify_ssl=VERIFY_SSL): if not username: raise Exception('Username must be supplied') if not password: raise Exception('Password must be supplied') if not host: raise Exception('Host must be supplied') host = build_host(host) if not verify_ssl: # Disable the warnings if we're not verifying ssl import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) s = requests.Session() if BROKER and cov_id: url = f'https://{HOST}/index/{cov_id}' s.get(url, verify=verify_ssl) p = {'username': username, 'password': password} r = s.post(host + '/rest/login', data=p, verify=verify_ssl) if 200 != r.status_code: raise Exception("Failed to login to %s - %d" % (host, r.status_code)) if not s.cookies: raise Exception("Failed to retrieve cookie") return s def send_request(method, api_endpoint, target_org=None, host=HOST, headers=None, params=None, data=None, json=None): cov_ids = [] BROKER = argToBoolean(demisto.params().get('broker', False)) if BROKER: if target_org: cov_ids = find_covs(target_org) if not cov_ids: raise ValueError(f'Unknown organization {target_org}') else: raise ValueError('Target organization is required in broker mode') else: cov_ids.append(None) result = [] for cov_id in cov_ids: s = login(cov_id=cov_id) host = build_host(host) url = f'{host}{api_endpoint}' req = requests.Request(method, url, headers=headers, params=params, data=data, json=json) prepped = s.prepare_request(req) try: resp = s.send(prepped, stream=None, verify=VERIFY_SSL, proxies=PROXY, cert=None, timeout=TIMEOUT ) resp.raise_for_status() except Exception: return_error('Error in API call [%d] - %s' % (resp.status_code, resp.reason)) else: # when having several covs # merging each response from each covs into one if isinstance(resp.json(), dict): result.append(resp.json()) elif isinstance(resp.json(), list): result = result + resp.json() else: result.append(resp.json()) return result def fetch_incidents(last_run, first_run_time_range): target_orgs = [] if BROKER: orgs = list_org() for org in orgs: target_orgs.append(org['org_name']) else: target_orgs.append(None) next_run = {} incidents = [] for target_org in target_orgs: if target_org: last_fetch = last_run.get(f'{target_org}_last_fetch', None) last_alert_id = last_run.get(f'{target_org}_last_alert_id', None) else: last_fetch = last_run.get('last_fetch', None) last_alert_id = last_run.get('last_alert_id', None) alert_time_max = datetime.utcnow() if last_fetch is None: alert_time_min = alert_time_max - timedelta(days=first_run_time_range) else: alert_time_min = dateparser.parse(last_fetch) # type: ignore assert alert_time_min is not None cov_alerts = list_alerts(target_org=target_org, max_count=FETCH_LIMIT, alert_time_min=alert_time_min.strftime(DATE_FORMAT), alert_time_max=alert_time_max.strftime(DATE_FORMAT), details='true') latest_created_time = alert_time_min for a in cov_alerts: if a['id'] != last_alert_id: created_time = datetime.utcfromtimestamp(a.get('createdTime', 0)) created_time_str = created_time.strftime(DATE_FORMAT) if BROKER: incident_name = f'''[{target_org}] [{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' else: incident_name = f'''[{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' incident: Dict[str, Any] = { 'name': incident_name, 'occured': created_time_str, 'rawJSON': json.dumps(a) } if a.get('severity', None): # XSOAR mapping # Unknown: 0 # Informational: 0.5 # Low: 1 # Medium: 2 # High: 3 # Critical: 4 severity_from_portal = a['severity'] if severity_from_portal == 'Informational': incident['severity'] = 0.5 elif severity_from_portal == 'Warning': incident['severity'] = 1 elif severity_from_portal == 'Low': incident['severity'] = 1 elif severity_from_portal == 'Medium': incident['severity'] = 2 elif severity_from_portal == 'High': incident['severity'] = 3 elif severity_from_portal == 'Critical': incident['severity'] = 4 else: incident['severity'] = 0 if a.get('analystDescription', None): incident['details'] = a['analystDescription'] incidents.append(incident) if created_time > latest_created_time: latest_created_time = created_time last_alert_id = a['id'] if BROKER: next_run[f'{target_org}_last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run[f'{target_org}_last_alert_id'] = last_alert_id else: next_run['last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run['last_alert_id'] = last_alert_id return next_run, incidents def list_alerts(target_org=None, max_count=None, initial_index=None, alert_type=None, alert_time_min=None, alert_time_max=None, advanced_filter=None, details=None): if target_org is None: target_org = demisto.args().get('target_org', None) if max_count is None: max_count = demisto.args().get('max_count', 1000) if initial_index is None: initial_index = demisto.args().get('initial_index', None) if alert_type is None: alert_type = demisto.args().get('alert_type', None) if alert_time_min is None: alert_time_min = demisto.args().get('alert_time_min', None) if alert_time_max is None: alert_time_max = demisto.args().get('alert_time_max', None) if advanced_filter is None: advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if alert_type: params['alertType'] = alert_type if alert_time_min: params['alertTimeMin'] = alert_time_min if alert_time_max: params['alertTimeMax'] = alert_time_max if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/alerts', target_org=target_org, params=params) if details is None: details = argToBoolean(demisto.args().get('details', 'false')) keys = ['acknowledgedStatus', 'analystDescription', 'analystTitle', 'destIp', 'sourceIp', 'subType', 'title', 'type'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def get_health(): if BROKER: # must do health check on all cov health_check_resp = [] orgs = list_org() for org in orgs: health_check_resp.append( send_request('GET', '/rest/v1/health', target_org=org['org_name']) ) # "logical and" accross all health checks return all(health_check_resp) else: return send_request('GET', '/rest/v1/health') def list_sensors(): target_org = demisto.args().get('target_org', None) r = send_request('GET', '/rest/v1/sensors', target_org=target_org) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['isAuthorized', 'isNetflowGenerator', 'name'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: for s in r: del s['lastActive'] return r def get_sensor(): target_org = demisto.args().get('target_org', None) sensor_id = demisto.args().get('sensor_id') r = send_request('GET', f'/rest/v1/sensors/{sensor_id}', target_org=target_org) for sensor in r: del sensor['lastActive'] return r def connections_summary_by_ip(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/ipsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'clientServerRelationship', 'destinationIpAddress', 'dstDomainName', 'serverPorts', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def connections_summary_by_port(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/portsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'destinationIpAddress', 'dstDomainName', 'serverPort', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_dns_resolutions(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) request_time_after = demisto.args().get('request_time_after', None) request_time_before = demisto.args().get('request_time_before', None) domain_name = demisto.args().get('domain_name', None) resolved_ip = demisto.args().get('resolved_ip', None) request_origin_ip = demisto.args().get('request_origin_ip', None) nameserver_ip = demisto.args().get('nameserver_ip', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if request_time_after: params['requestTimeAfter'] = request_time_after if request_time_before: params['requestTimeBefore'] = request_time_before if domain_name: params['domainName'] = domain_name if resolved_ip: params['resolvedIp'] = resolved_ip if request_origin_ip: params['requestOriginIp'] = request_origin_ip if nameserver_ip: params['nameserverIp'] = nameserver_ip if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/dns/resolutions', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['domainName', 'requestOriginIp', 'requestTime', 'resolvedIp'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_internal_networks(): target_org = demisto.args().get('target_org', None) return send_request('GET', '/rest/v1/internal_networks', target_org=target_org) def set_internal_networks(): if BROKER: ValueError(f'{demisto.command()} is not available in broker mode') target_org = demisto.args().get('target_org', None) cidr = demisto.args().get('cidr', None) notes = demisto.args().get('notes', None) networks = [] networks.append( { 'cidr': cidr, 'notes': notes } ) send_request('PUT', '/rest/v1/internal_networks', target_org=target_org, json=networks) return cidr, notes def list_endpoint_agents(): target_org = demisto.args().get('target_org', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['hardwareVendor', 'hostName', 'ipAddress', 'isConnected', 'lastSessionUser', 'operatingSystem', 'serialNumber'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def find_endpoint_by_user(): target_org = demisto.args().get('target_org', None) user = demisto.args().get('user', None) params = {} params['advancedFilter'] = f'lastSessionUser={user}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def find_endpoint_by_uuid(): target_org = demisto.args().get('target_org', None) uuid = demisto.args().get('uuid', None) params = {} params['advancedFilter'] = f'agentUuid={uuid}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def search_endpoint_process(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/process/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['commandLine', 'firstSeenTime', 'lastSeenTime', 'processPath', 'username'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def search_endpoint_installed_software(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) version = demisto.args().get('version', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if version: params['version'] = version if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/software/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['installTimestamp', 'name', 'uninstallTimestamp', 'vendor', 'version'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_org(): if not BROKER: ValueError(f'{demisto.command()} is only available in broker mode') url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) org_names: List[dict] = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): org_name = link.contents[0] if org_name: if org_name not in [i['org_name'] for i in org_names]: org_names.append({'org_name': org_name}) return org_names def main(): demisto.info(f'{demisto.command()} is called') try: if demisto.command() == 'test-module': if get_health(): return_results('ok') else: return_results('nok') elif demisto.command() == 'fetch-incidents': next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_run_time_range=FIRST_RUN_TIME_RANGE) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'cov-secpr-list-alerts': r = list_alerts() if r: readable_output = tableToMarkdown('Alerts', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No alerts found' results = CommandResults( outputs_prefix='Covalence.Alert', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-sensors': r = list_sensors() if r: readable_output = tableToMarkdown('Sensors', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No sensors found' results = CommandResults( outputs_prefix='Covalence.Sensors', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-get-sensor': r = get_sensor() if r: readable_output = tableToMarkdown('Sensor', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'None sensor found' results = CommandResults( outputs_prefix='Covalence.Sensor', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-ip': r = connections_summary_by_ip() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-port': r = connections_summary_by_port() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-dns-resolutions': r = list_dns_resolutions() if r: readable_output = tableToMarkdown('DNS Resolutions', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No DNS resolutions found' results = CommandResults( outputs_prefix='Covalence.DNSResolutions', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-internal-networks': r = list_internal_networks() if r: readable_output = tableToMarkdown('Internal Networks', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No internal networks found' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-set-internal-networks': r = set_internal_networks() cidr = r[0] notes = r[1] readable_output = f'Internal network set as {cidr} with notes "{notes}"' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-endpoint-agents': r = list_endpoint_agents() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-user': r = find_endpoint_by_user() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-uuid': r = find_endpoint_by_uuid() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-process': r = search_endpoint_process() if r: readable_output = tableToMarkdown('Endpoint Process', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint process found' results = CommandResults( outputs_prefix='Covalence.EndpointProcess', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-installed-software': r = search_endpoint_installed_software() if r: readable_output = tableToMarkdown('Endpoint Software', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint software found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-organizations': r = list_org() if r: readable_output = tableToMarkdown('Organizations', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No organizations found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) else: msg = f'Unknown command {demisto.command()}' demisto.error(msg) except Exception as e: demisto.error(traceback.format_exc()) return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}\n{traceback.format_exc()}') if __name__ in ('__main__', '__builtin__', 'builtins'): main()
[]
[]
[ "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy" ]
[]
["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"]
python
4
0
src/main.go
package main import ( "fmt" "os" "github.com/gin-gonic/gin" ) func main() { if os.Getenv("APP_ENVIRONMENT") == "dev" { fmt.Println("\n****************** APP READY\n") } r := gin.Default() r.GET("/", onRoot) r.Run(":" + os.Getenv("HTTP_PORT")) } func onRoot(c *gin.Context) { c.JSON(200, gin.H{"message": "pong"}) }
[ "\"APP_ENVIRONMENT\"", "\"HTTP_PORT\"" ]
[]
[ "APP_ENVIRONMENT", "HTTP_PORT" ]
[]
["APP_ENVIRONMENT", "HTTP_PORT"]
go
2
0
tests/conftest.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Shared fixtures for :mod:`pytest`.""" from __future__ import print_function, absolute_import import os try: from pathlib import Path except ImportError: from pathlib2 import Path import pytest # noqa import gryaml import py2neo import yaml from py2neo_compat import py2neo_ver pytest.mark.skip_py2neo1 = pytest.mark.skipif(py2neo_ver == 1, reason='py2neo v1 not supported') pytest.mark.todo = pytest.mark.xfail(reason='TODO', run=True, strict=True) def pytest_report_header(config, startdir): """Add versions & config info to test output on terminal.""" lines = [] if 'NEO4J_URI' in os.environ: lines.append('Neo4J URI: %s' % os.environ['NEO4J_URI']) lines.append('py2neo: {0.__version__}' ' pyyaml: {1.__version__}' ' libyaml: {1.__with_libyaml__}'.format(py2neo, yaml)) lines.append('forked: %s' % config.getoption('forked', default=False)) return lines @pytest.fixture def graphdb(): # type: () -> py2neo.Graph """Fixture connecting to graphdb.""" if 'NEO4J_URI' not in os.environ: os.environ['NEO4J_URI'] = 'http://localhost:7474/db/data' graphdb = gryaml.connect(uri=os.environ['NEO4J_URI']) graphdb.delete_all() return graphdb @pytest.fixture def graphdb_offline(): # type: () -> None """Ensure the database is not connected.""" if py2neo_ver < 2: pytest.skip('Offline not supported in py2neo < 2') neo4j_uri_env = os.environ.get('NEO4J_URI', None) if neo4j_uri_env: del os.environ['NEO4J_URI'] old_graphdb = gryaml._py2neo.graphdb gryaml._py2neo.graphdb = None yield gryaml._py2neo.graphdb = old_graphdb if neo4j_uri_env: os.environ['NEO4J_URI'] = neo4j_uri_env @pytest.fixture(scope='session') def samples_path(): # type: () -> Path return Path(__file__).parent / 'samples' @pytest.fixture(scope='session') def sample_file(samples_path): # type: (Path) -> Callable[[str], str] def sample_file(fname): # type: (str) -> str fname = samples_path / fname with fname.open() as f: return f.read() return sample_file @pytest.fixture(scope='session') def sample_yaml(sample_file): # type: (Callable[[str], str]) -> Callable[[str], str] def sample_yaml(fname): # type: (str) -> str if not (fname.endswith('.yaml') or fname.endswith('.yml')): fname += '.yaml' return sample_file(fname) return sample_yaml
[]
[]
[ "NEO4J_URI" ]
[]
["NEO4J_URI"]
python
1
0
frontend/amundsen_application/deprecations.py
# Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 import os import warnings from flask import Flask warnings.simplefilter('always', DeprecationWarning) # Deprecation Warnings def process_deprecations(app: Flask) -> None: if os.getenv('APP_WRAPPER') or os.getenv('APP_WRAPPER_CLASS'): warnings.warn("'APP_WRAPPER' and 'APP_WRAPPER_CLASS' variables are deprecated since version (3.9.0), " "and will be removed in version 4. " "Please use 'FLASK_APP_MODULE_NAME' and 'FLASK_APP_CLASS_NAME' instead", DeprecationWarning) if os.getenv('APP_WRAPPER_ARGS'): warnings.warn("'APP_WRAPPER_ARGS' variable is deprecated since version (3.9.0), " "and will be removed in version 4. " "Please use 'FLASK_APP_KWARGS_DICT' instead", DeprecationWarning) if app.config.get("POPULAR_TABLE_COUNT", None) is not None: app.config["POPULAR_RESOURCES_COUNT"] = app.config["POPULAR_TABLE_COUNT"] warnings.warn("'POPULAR_TABLE_COUNT' variable is deprecated since version (3.9.0), " "and will be removed in version 4. " "Please use 'POPULAR_RESOURCES_COUNT' instead", DeprecationWarning) if app.config.get("POPULAR_TABLE_PERSONALIZATION", None) is not None: app.config["POPULAR_RESOURCES_PERSONALIZATION"] = app.config["POPULAR_TABLE_PERSONALIZATION"] warnings.warn("'POPULAR_TABLE_PERSONALIZATION' variable is deprecated since version (3.9.0), " "and will be removed in version 4. " "Please use 'POPULAR_RESOURCES_PERSONALIZATION' instead", DeprecationWarning)
[]
[]
[ "APP_WRAPPER", "APP_WRAPPER_ARGS", "APP_WRAPPER_CLASS" ]
[]
["APP_WRAPPER", "APP_WRAPPER_ARGS", "APP_WRAPPER_CLASS"]
python
3
0
services/crawler/models/houses.py
""" Module for houses detailed data parsing """ import os import re import uuid from typing import Optional, Tuple from loguru import logger from requests_html import HTMLSession from bs4 import BeautifulSoup logger.add("house_parse.log", level="DEBUG") # pylint: disable= R0902 class House: def __init__(self, url: str, title: str, data): self.url = url self.title = title self.phone = data["linkInfo"]["mobile"] self.city = data["breadcrumb"][0]["name"].replace("租屋", "市") self.district = data["breadcrumb"][1]["name"].replace("租屋", "市") self.house_status = data["breadcrumb"][2]["name"].replace("租屋", "市") self.lessor, self.lessor_gender, self.lessor_identity = self._get_lessor_info( data ) self.sold = None self.house_type = self._get_house_type(data) self.gender_requirement = self._get_gender_requirement(data) self.house_condition = self._get_house_condition(data) @staticmethod def _get_lessor_info(data) -> Tuple: """[summary] Args: html ([type]): [description] Returns: Tuple: [description] """ lessor_gender: Optional[str] = None lessor_identity: Optional[str] = None lessor = data["linkInfo"]["name"] pattern_after_colon = r":\s*(.*)" lessor = re.findall(pattern_after_colon, lessor)[0].strip() lessor_identity = data["linkInfo"]["name"].replace(f": {lessor}", "") if lessor: if "先生" in lessor: lessor_gender = "男" elif "小姐" in lessor: lessor_gender = "女" return lessor, lessor_gender, lessor_identity @staticmethod def _get_house_type(data) -> Optional[str]: """parse the "型態" value from house page Args: html (object): the html object generate by request_html Returns: Optional[str]: the "型態" field. e.g. "電梯大樓" """ for item in data["infoData"]["data"]: if item["name"] == "型態": return item["value"] return None @staticmethod def _get_gender_requirement(data) -> Optional[str]: """parse the "性別要求" value from house page Args: html ([type]): the html object generate by request_html Returns: Optional[str]: the "性別要求" value. e.g. "男女生皆可" """ rule = data["service"]["rule"] if "限男生" in rule: return "男生" if "限女生" in rule: return "女生" return "男女生皆可" @staticmethod def _get_house_condition(data) -> Optional[str]: """parse the "屋況說明" value from house page Args: html ([type]): the html object generate by request_html Returns: Optional[str]: the "屋況說明" value """ house_condition = data["remark"]["content"] soup = BeautifulSoup(house_condition, features="html.parser") return soup.get_text() if house_condition else None def to_dict(self) -> dict: return { "url": self.url, "title": self.title, "city": self.city, "district": self.district, "lessor": self.lessor, "lessor_gender": self.lessor_gender, "lessor_identity": self.lessor_identity, "house_type": self.house_type, "house_status": self.house_status, "sold": self.sold, "phone": self.phone, "gender_requirement": self.gender_requirement, "house_condition": self.house_condition, } # pylint: enable= R0902 def parse_single_house(url, title, proxy=None) -> Optional[dict]: """[summary] Args: url ([type]): the url of this house title ([type]): the title of this house proxy ([type], optional): the proxy IP. Defaults to None. Returns: Optional[dict]: the house detailed data """ session_arg = {"browser_args": [f"--proxy-server={proxy}"]} if proxy else {} headers = { "device": "pc", "deviceid": str(uuid.uuid4()), } house_id = url.replace(os.environ.get("WEB_URL_PREFIX"), "").replace(".html", "") url = f"{os.environ.get('API_WEB_URL')}/tw/v1/house/rent/detail?id={house_id}&isOnline=1" res = HTMLSession(**session_arg).get(url, headers=headers) status = res.status_code logger.info(f"Parse: {url} {status}") if status != 200: logger.error(status, res.text) return None try: return House(url, title, res.json()["data"]).to_dict() except AttributeError as error: logger.warning(f"{url}\n{error}") return None
[]
[]
[ "WEB_URL_PREFIX", "API_WEB_URL" ]
[]
["WEB_URL_PREFIX", "API_WEB_URL"]
python
2
0
save_connection.go
package main import ( "bytes" "encoding/gob" "errors" "github.com/1egoman/slick/gateway" "io/ioutil" "log" "os" "path" "strings" ) const CONFIG_FILE_NAME = ".slickrc" func GetConfigFileContents() map[string]string { configFiles := make(map[string]string) homeFilename := path.Join(os.Getenv("HOME"), CONFIG_FILE_NAME) crawledHome := false pathElements := strings.Split(os.Getenv("PWD"), "/") for index := 0; index <= len(pathElements); index++ { filename := "/" + path.Join(path.Join(pathElements[:index]...), CONFIG_FILE_NAME) log.Println("Searching config path", filename) data, err := ioutil.ReadFile(filename) if err != nil { continue } log.Println("Config exists!", filename) // When traversing through the tree, did we come across the `~/.slickrc`? if homeFilename == filename { crawledHome = true } configFiles[filename] = string(data) } // Finally, look for ~/.slickrc last, if applicable. if !crawledHome { data, err := ioutil.ReadFile(homeFilename) if err == nil { configFiles[homeFilename] = string(data) } } return configFiles } // // STORAGE OF SAVED CONNECTIONS // type SerializedConnection struct { MessageHistory []gateway.Message Channels []gateway.Channel SelectedChannel gateway.Channel Self gateway.User Team gateway.Team } type SerializedGlobalState struct { ActiveConnectionIndex int SelectedMessageIndex int BottomDisplayedItem int } func PathToSavedConnections() string { return PathToCache() + "connections/" } func PathToCache() string { return os.Getenv("HOME") + "/.slickcache/" } func SaveConnection(conn gateway.Connection) error { if conn.SelectedChannel() == nil { return errors.New("Can't save connection with no selected channel!") } var buf bytes.Buffer enc := gob.NewEncoder(&buf) err := enc.Encode(SerializedConnection{ MessageHistory: conn.MessageHistory(), Channels: conn.Channels(), SelectedChannel: *conn.SelectedChannel(), Self: *conn.Self(), Team: *conn.Team(), }) if err != nil { return err } err = ioutil.WriteFile(PathToSavedConnections()+conn.Name(), buf.Bytes(), 0777) if err != nil { return err } return nil } func ApplySaveToConnection(name string, conn *gateway.Connection) error { byt, err := ioutil.ReadFile(PathToSavedConnections() + name) if err != nil { return err } var serialized SerializedConnection buf := bytes.NewBuffer(byt) dec := gob.NewDecoder(buf) err = dec.Decode(&serialized) if err != nil { return err } if conn == nil { return errors.New("Passed connection was nil!") } (*conn).SetSelectedChannel(&serialized.SelectedChannel) (*conn).SetChannels(serialized.Channels) (*conn).SetMessageHistory(serialized.MessageHistory) (*conn).SetSelf(serialized.Self) (*conn).SetTeam(serialized.Team) return nil } func SaveGlobalState(state *State) error { // Get the index of the active connection var activeConnectionIndex int = 0 activeConnection := state.ActiveConnection() for ct, i := range state.Connections { if i == activeConnection { activeConnectionIndex = ct break } } var buf bytes.Buffer enc := gob.NewEncoder(&buf) err := enc.Encode(SerializedGlobalState{ ActiveConnectionIndex: activeConnectionIndex, SelectedMessageIndex: state.SelectedMessageIndex, BottomDisplayedItem: state.BottomDisplayedItem, }) if err != nil { return err } err = ioutil.WriteFile(PathToCache()+"globalstate", buf.Bytes(), 0777) if err != nil { return err } return nil } func ApplyGlobalStateToState(state *State) error { byt, err := ioutil.ReadFile(PathToCache()+"globalstate") if err != nil { return err } var serialized SerializedGlobalState buf := bytes.NewBuffer(byt) dec := gob.NewDecoder(buf) err = dec.Decode(&serialized) if err != nil { return err } if state == nil { return errors.New("Passed state object was nil!") } // If the user added or removed connections and this connection index wouldn't work, then don't // use it. if serialized.ActiveConnectionIndex < len(state.Connections) { state.SetActiveConnection(serialized.ActiveConnectionIndex) } state.SelectedMessageIndex = serialized.SelectedMessageIndex state.BottomDisplayedItem = serialized.BottomDisplayedItem return nil }
[ "\"HOME\"", "\"PWD\"", "\"HOME\"" ]
[]
[ "PWD", "HOME" ]
[]
["PWD", "HOME"]
go
2
0
cmd/server/shared/redis.go
package shared import ( "os" "path/filepath" "text/template" "github.com/sourcegraph/sourcegraph/cmd/server/shared/assets" ) var redisConfTmpl = template.Must(template.New("redis.conf").Parse(assets.MustAssetString("redis.conf.tmpl"))) func maybeRedisProcFile() (string, error) { // Redis is already configured. See envvars used in pkg/redispool. if os.Getenv("REDIS_ENDPOINT") != "" { return "", nil } store := os.Getenv("REDIS_STORE_ENDPOINT") != "" cache := os.Getenv("REDIS_CACHE_ENDPOINT") != "" if store && cache { return "", nil } // Create a redis.conf if it doesn't exist path := filepath.Join(os.Getenv("CONFIG_DIR"), "redis.conf") if _, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { return "", err } dataDir := filepath.Join(os.Getenv("DATA_DIR"), "redis") err := os.MkdirAll(dataDir, os.FileMode(0755)) if err != nil { return "", err } f, err := os.Create(path) if err != nil { return "", err } err = redisConfTmpl.Execute(f, struct{ Dir string }{ Dir: dataDir, }) f.Close() if err != nil { os.Remove(path) return "", err } } // Run and use a local redis SetDefaultEnv("REDIS_ENDPOINT", "127.0.0.1:6379") // Redis is noiser than we prefer even at the most quiet setting "warning" // so we only output the last log line when redis stops in case it stopped unexpectly // and the log contains the reason why it stopped. return "redis: redis-server " + path + " | tail -n 1", nil }
[ "\"REDIS_ENDPOINT\"", "\"REDIS_STORE_ENDPOINT\"", "\"REDIS_CACHE_ENDPOINT\"", "\"CONFIG_DIR\"", "\"DATA_DIR\"" ]
[]
[ "CONFIG_DIR", "REDIS_STORE_ENDPOINT", "DATA_DIR", "REDIS_CACHE_ENDPOINT", "REDIS_ENDPOINT" ]
[]
["CONFIG_DIR", "REDIS_STORE_ENDPOINT", "DATA_DIR", "REDIS_CACHE_ENDPOINT", "REDIS_ENDPOINT"]
go
5
0
git2go/diff/diff.go
package main import ( "log" "os" "path/filepath" git "github.com/libgit2/git2go" ) /* github.com/libgit2/git2go commit 80cf533fe4e48ddfab3015d9570f2833951c1dea Author: David Pierce <[email protected]> Date: Sat Sep 26 15:37:48 2015 -0700 Config#LookupString uses git_buf to load value config.go | 8 +++++--- config_test.go | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 3 deletions(-) */ func main() { // After go get -v github.com/libgit2/git2go // path to git2go repository in your $GOPATH repoPath := filepath.Join(os.Getenv("GOPATH"), "src/github.com/libgit2/git2go") gitRepo, err := git.OpenRepository(repoPath) if err != nil { log.Fatal(err) } // commit SHA-1 checksum commitID := `80cf533fe4e48ddfab3015d9570f2833951c1dea` commitOid, err := git.NewOid(commitID) if err != nil { log.Fatal(err) } commit, err := gitRepo.LookupCommit(commitOid) if err != nil { log.Fatal(err) } commitTree, err := commit.Tree() if err != nil { log.Fatal(err) } options, err := git.DefaultDiffOptions() if err != nil { log.Fatal(err) } options.IdAbbrev = 40 var parentTree *git.Tree if commit.ParentCount() > 0 { parentTree, err = commit.Parent(0).Tree() if err != nil { log.Fatal(err) } } gitDiff, err := gitRepo.DiffTreeToTree(parentTree, commitTree, &options) if err != nil { log.Fatal(err) } // Show all file patch diffs in a commit. numDeltas, err := gitDiff.NumDeltas() if err != nil { log.Fatal(err) } for d := 0; d < numDeltas; d++ { patch, err := gitDiff.Patch(d) if err != nil { log.Fatal(err) } patchString, err := patch.String() if err != nil { log.Fatal(err) } log.Printf("\n%s", patchString) patch.Free() } }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
api/v1alpha3/types.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 // NetworkInterface holds the network interface information like subnet id. type NetworkInterface struct { // Subnet ID of the network interface Subnet string `json:"subnet,omitempty"` } // Subnet describes a subnet type Subnet struct { Ipv4CidrBlock *string `json:"cidr"` Name *string `json:"name"` ID *string `json:"id"` Zone *string `json:"zone"` } // VPCEndpoint describes a VPCEndpoint type VPCEndpoint struct { Address *string `json:"address"` FIPID *string `json:"floatingIPID"` }
[]
[]
[]
[]
[]
go
null
null
null
utils/utils.py
############################################################ # File: utils.py # # Created: 2019-11-18 20:50:50 # # Author : wvinzh # # Email : [email protected] # # ------------------------------------------ # # Description:utils.py # # Copyright@2019 wvinzh, HUST # ############################################################ import os import random import numpy as np import torch import shutil import logging def getLogger(name='logger',filename=''): # 使用一个名字为fib的logger logger = logging.getLogger(name) # 设置logger的level为DEBUG logger.setLevel(logging.DEBUG) # 创建一个输出日志到控制台的StreamHandler if filename: if os.path.exists(filename): os.remove(filename) hdl = logging.FileHandler(filename, mode='a', encoding='utf-8', delay=False) else: hdl = logging.StreamHandler() format = '%(asctime)s [%(levelname)s] at %(filename)s,%(lineno)d: %(message)s' datefmt = '%Y-%m-%d(%a)%H:%M:%S' formatter = logging.Formatter(format,datefmt) hdl.setFormatter(formatter) # 给logger添加上handler logger.addHandler(hdl) return logger def set_seed(seed=0): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def _init_fn(worker_id): set_seed(worker_id) # np.random.seed() def get_lr(optimizer): for param_group in optimizer.param_groups: old_lr = float(param_group['lr']) return old_lr def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def save_checkpoint(state, is_best, path='checkpoint', filename='checkpoint.pth.tar'): if not os.path.exists(path): os.makedirs(path) full_path = os.path.join(path, filename) torch.save(state, full_path) if is_best: shutil.copyfile(full_path, os.path.join(path, 'model_best.pth.tar')) print("Save best model at %s==" % os.path.join(path, 'model_best.pth.tar'))
[]
[]
[ "PYTHONHASHSEED" ]
[]
["PYTHONHASHSEED"]
python
1
0
moto/core/models.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import import functools import inspect import os import re import six from io import BytesIO from collections import defaultdict from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse import mock from moto import settings import responses from moto.packages.httpretty import HTTPretty from .utils import ( convert_httpretty_response, convert_regex_to_flask_path, convert_flask_to_responses_response, ) ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") class BaseMockAWS(object): nested_count = 0 def __init__(self, backends): self.backends = backends self.backends_for_urls = {} from moto.backends import BACKENDS default_backends = { "instance_metadata": BACKENDS["instance_metadata"]["global"], "moto_api": BACKENDS["moto_api"]["global"], } self.backends_for_urls.update(self.backends) self.backends_for_urls.update(default_backends) # "Mock" the AWS credentials as they can't be mocked in Botocore currently FAKE_KEYS = { "AWS_ACCESS_KEY_ID": "foobar_key", "AWS_SECRET_ACCESS_KEY": "foobar_secret", } self.default_session_mock = mock.patch("boto3.DEFAULT_SESSION", None) self.env_variables_mocks = mock.patch.dict(os.environ, FAKE_KEYS) if self.__class__.nested_count == 0: self.reset() def __call__(self, func, reset=True): if inspect.isclass(func): return self.decorate_class(func) return self.decorate_callable(func, reset) def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self, reset=True): self.default_session_mock.start() self.env_variables_mocks.start() self.__class__.nested_count += 1 if reset: for backend in self.backends.values(): backend.reset() self.enable_patching() def stop(self): self.default_session_mock.stop() self.env_variables_mocks.stop() self.__class__.nested_count -= 1 if self.__class__.nested_count < 0: raise RuntimeError("Called stop() before start().") if self.__class__.nested_count == 0: self.disable_patching() def decorate_callable(self, func, reset): def wrapper(*args, **kwargs): self.start(reset=reset) try: result = func(*args, **kwargs) finally: self.stop() return result functools.update_wrapper(wrapper, func) wrapper.__wrapped__ = func return wrapper def decorate_class(self, klass): for attr in dir(klass): if attr.startswith("_"): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue # Check if this is a classmethod. If so, skip patching if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue # Check if this is a staticmethod. If so, skip patching for cls in inspect.getmro(klass): if attr_value.__name__ not in cls.__dict__: continue bound_attr_value = cls.__dict__[attr_value.__name__] if not isinstance(bound_attr_value, staticmethod): break else: # It is a staticmethod, skip patching continue try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: # Sometimes we can't set this for built-in types continue return klass class HttprettyMockAWS(BaseMockAWS): def reset(self): HTTPretty.reset() def enable_patching(self): if not HTTPretty.is_enabled(): HTTPretty.enable() for method in HTTPretty.METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): HTTPretty.register_uri( method=method, uri=re.compile(key), body=convert_httpretty_response(value), ) def disable_patching(self): HTTPretty.disable() HTTPretty.reset() RESPONSES_METHODS = [ responses.GET, responses.DELETE, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT, ] class CallbackResponse(responses.CallbackResponse): """ Need to subclass so we can change a couple things """ def get_response(self, request): """ Need to override this so we can pass decode_content=False """ headers = self.get_headers() result = self.callback(request) if isinstance(result, Exception): raise result status, r_headers, body = result body = responses._handle_body(body) headers.update(r_headers) return responses.HTTPResponse( status=status, reason=six.moves.http_client.responses.get(status), body=body, headers=headers, preload_content=False, # Need to not decode_content to mimic requests decode_content=False, ) def _url_matches(self, url, other, match_querystring=False): """ Need to override this so we can fix querystrings breaking regex matching """ if not match_querystring: other = other.split("?", 1)[0] if responses._is_string(url): if responses._has_unicode(url): url = responses._clean_unicode(url) if not isinstance(other, six.text_type): other = other.encode("ascii").decode("utf8") return self._url_matches_strict(url, other) elif isinstance(url, responses.Pattern) and url.match(other): return True else: return False botocore_mock = responses.RequestsMock( assert_all_requests_are_fired=False, target="botocore.vendored.requests.adapters.HTTPAdapter.send", ) responses_mock = responses._default_mock # Add passthrough to allow any other requests to work # Since this uses .startswith, it applies to http and https requests. responses_mock.add_passthru("http") BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] class MockRawResponse(BytesIO): def __init__(self, input): if isinstance(input, six.text_type): input = input.encode("utf-8") super(MockRawResponse, self).__init__(input) def stream(self, **kwargs): contents = self.read() while contents: yield contents contents = self.read() class BotocoreStubber(object): def __init__(self): self.enabled = False self.methods = defaultdict(list) def reset(self): self.methods.clear() def register_response(self, method, pattern, response): matchers = self.methods[method] matchers.append((pattern, response)) def __call__(self, event_name, request, **kwargs): if not self.enabled: return None response = None response_callback = None found_index = None matchers = self.methods.get(request.method) base_url = request.url.split("?", 1)[0] for i, (pattern, callback) in enumerate(matchers): if pattern.match(base_url): if found_index is None: found_index = i response_callback = callback else: matchers.pop(found_index) break if response_callback is not None: for header, value in request.headers.items(): if isinstance(value, six.binary_type): request.headers[header] = value.decode("utf-8") status, headers, body = response_callback( request, request.url, request.headers ) body = MockRawResponse(body) response = AWSResponse(request.url, status, headers, body) return response botocore_stubber = BotocoreStubber() BUILTIN_HANDLERS.append(("before-send", botocore_stubber)) def not_implemented_callback(request): status = 400 headers = {} response = "The method is not implemented" return status, headers, response class BotocoreEventMockAWS(BaseMockAWS): def reset(self): botocore_stubber.reset() responses_mock.reset() def enable_patching(self): botocore_stubber.enabled = True for method in BOTOCORE_HTTP_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): pattern = re.compile(key) botocore_stubber.register_response(method, pattern, value) if not hasattr(responses_mock, "_patcher") or not hasattr( responses_mock._patcher, "target" ): responses_mock.start() for method in RESPONSES_METHODS: # for backend in default_backends.values(): for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): responses_mock.add( CallbackResponse( method=method, url=re.compile(key), callback=convert_flask_to_responses_response(value), stream=True, match_querystring=False, ) ) responses_mock.add( CallbackResponse( method=method, url=re.compile("https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, ) ) botocore_mock.add( CallbackResponse( method=method, url=re.compile("https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, ) ) def disable_patching(self): botocore_stubber.enabled = False self.reset() try: responses_mock.stop() except RuntimeError: pass MockAWS = BotocoreEventMockAWS class ServerModeMockAWS(BaseMockAWS): def reset(self): import requests requests.post("http://localhost:5000/moto-api/reset") def enable_patching(self): if self.__class__.nested_count == 1: # Just started self.reset() from boto3 import client as real_boto3_client, resource as real_boto3_resource import mock def fake_boto3_client(*args, **kwargs): if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) def fake_boto3_resource(*args, **kwargs): if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_resource(*args, **kwargs) def fake_httplib_send_output(self, message_body=None, *args, **kwargs): def _convert_to_bytes(mixed_buffer): bytes_buffer = [] for chunk in mixed_buffer: if isinstance(chunk, six.text_type): bytes_buffer.append(chunk.encode("utf-8")) else: bytes_buffer.append(chunk) msg = b"\r\n".join(bytes_buffer) return msg self._buffer.extend((b"", b"")) msg = _convert_to_bytes(self._buffer) del self._buffer[:] if isinstance(message_body, bytes): msg += message_body message_body = None self.send(msg) # if self._expect_header_set: # read, write, exc = select.select([self.sock], [], [self.sock], 1) # if read: # self._handle_expect_response(message_body) # return if message_body is not None: self.send(message_body) self._client_patcher = mock.patch("boto3.client", fake_boto3_client) self._resource_patcher = mock.patch("boto3.resource", fake_boto3_resource) if six.PY2: self._httplib_patcher = mock.patch( "httplib.HTTPConnection._send_output", fake_httplib_send_output ) self._client_patcher.start() self._resource_patcher.start() if six.PY2: self._httplib_patcher.start() def disable_patching(self): if self._client_patcher: self._client_patcher.stop() self._resource_patcher.stop() if six.PY2: self._httplib_patcher.stop() class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) cls.__models__ = {} for name, value in namespace.items(): model = getattr(value, "__returns_model__", False) if model is not False: cls.__models__[model] = name for base in bases: cls.__models__.update(getattr(base, "__models__", {})) return cls @staticmethod def prop(model_name): """ decorator to mark a class method as returning model values """ def dec(f): f.__returns_model__ = model_name return f return dec model_data = defaultdict(dict) class InstanceTrackerMeta(type): def __new__(meta, name, bases, dct): cls = super(InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) if name == "BaseModel": return cls service = cls.__module__.split(".")[1] if name not in model_data[service]: model_data[service][name] = cls cls.instances = [] return cls @six.add_metaclass(InstanceTrackerMeta) class BaseModel(object): def __new__(cls, *args, **kwargs): instance = super(BaseModel, cls).__new__(cls) cls.instances.append(instance) return instance class BaseBackend(object): def _reset_model_refs(self): # Remove all references to the models stored for service, models in model_data.items(): for model_name, model in models.items(): model.instances = [] def reset(self): self._reset_model_refs() self.__dict__ = {} self.__init__() @property def _url_module(self): backend_module = self.__class__.__module__ backend_urls_module_name = backend_module.replace("models", "urls") backend_urls_module = __import__( backend_urls_module_name, fromlist=["url_bases", "url_paths"] ) return backend_urls_module @property def urls(self): """ A dictionary of the urls to be mocked with this service and the handlers that should be called in their place """ url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls @property def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths @property def url_bases(self): """ A list containing the url_bases extracted from urls.py """ return self._url_module.url_bases @property def flask_paths(self): """ The url paths that will be used for the flask server """ paths = {} for url_path, handler in self.url_paths.items(): url_path = convert_regex_to_flask_path(url_path) paths[url_path] = handler return paths def decorator(self, func=None): if settings.TEST_SERVER_MODE: mocked_backend = ServerModeMockAWS({"global": self}) else: mocked_backend = MockAWS({"global": self}) if func: return mocked_backend(func) else: return mocked_backend def deprecated_decorator(self, func=None): if func: return HttprettyMockAWS({"global": self})(func) else: return HttprettyMockAWS({"global": self}) # def list_config_service_resources(self, resource_ids, resource_name, limit, next_token): # """For AWS Config. This will list all of the resources of the given type and optional resource name and region""" # raise NotImplementedError() class ConfigQueryModel(object): def __init__(self, backends): """Inits based on the resource type's backends (1 for each region if applicable)""" self.backends = backends def list_config_service_resources( self, resource_ids, resource_name, limit, next_token, backend_region=None, resource_region=None, ): """For AWS Config. This will list all of the resources of the given type and optional resource name and region. This supports both aggregated and non-aggregated listing. The following notes the difference: - Non-Aggregated Listing - This only lists resources within a region. The way that this is implemented in moto is based on the region for the resource backend. You must set the `backend_region` to the region that the API request arrived from. resource_region can be set to `None`. - Aggregated Listing - This lists resources from all potential regional backends. For non-global resource types, this should collect a full list of resources from all the backends, and then be able to filter from the resource region. This is because an aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation from all resources in all regions for a given resource type*. The `backend_region` should be set to `None` for these queries, and the `resource_region` should optionally be set to the `Filters` region parameter to filter out resources that reside in a specific region. For aggregated listings, pagination logic should be set such that the next page can properly span all the region backends. As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter from there. It may be valuable to make this a concatenation of the region and resource name. :param resource_ids: A list of resource IDs :param resource_name: The individual name of a resource :param limit: How many per page :param next_token: The item that will page on :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a non-aggregated query. :return: This should return a list of Dicts that have the following fields: [ { 'type': 'AWS::The AWS Config data type', 'name': 'The name of the resource', 'id': 'The ID of the resource', 'region': 'The region of the resource -- if global, then you may want to have the calling logic pass in the aggregator region in for the resource region -- or just us-east-1 :P' } , ... ] """ raise NotImplementedError() def get_config_resource( self, resource_id, resource_name=None, backend_region=None, resource_region=None ): """For AWS Config. This will query the backend for the specific resource type configuration. This supports both aggregated, and non-aggregated fetching -- for batched fetching -- the Config batching requests will call this function N times to fetch the N objects needing to be fetched. - Non-Aggregated Fetching - This only fetches a resource config within a region. The way that this is implemented in moto is based on the region for the resource backend. You must set the `backend_region` to the region that the API request arrived from. `resource_region` should be set to `None`. - Aggregated Fetching - This fetches resources from all potential regional backends. For non-global resource types, this should collect a full list of resources from all the backends, and then be able to filter from the resource region. This is because an aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation from all resources in all regions for a given resource type*. ... :param resource_id: :param resource_name: :param backend_region: :param resource_region: :return: """ raise NotImplementedError() class base_decorator(object): mock_backend = MockAWS def __init__(self, backends): self.backends = backends def __call__(self, func=None): if self.mock_backend != HttprettyMockAWS and settings.TEST_SERVER_MODE: mocked_backend = ServerModeMockAWS(self.backends) else: mocked_backend = self.mock_backend(self.backends) if func: return mocked_backend(func) else: return mocked_backend class deprecated_base_decorator(base_decorator): mock_backend = HttprettyMockAWS class MotoAPIBackend(BaseBackend): def reset(self): from moto.backends import BACKENDS for name, backends in BACKENDS.items(): if name == "moto_api": continue for region_name, backend in backends.items(): backend.reset() self.__init__() moto_api_backend = MotoAPIBackend()
[]
[]
[ "MOTO_ACCOUNT_ID" ]
[]
["MOTO_ACCOUNT_ID"]
python
1
0
src/main/java/DB.java
import org.sql2o.*; import java.net.URI; import java.net.URISyntaxException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DB { // public static Sql2o sql2o = new Sql2o("jdbc:postgresql://localhost:5432/hair_salon", "abdilatif", "6823"); private static URI dbUri; public static Sql2o sql2o; static { Logger logger = LoggerFactory.getLogger(DB.class); try { if (System.getenv("DATABASE_URL") == null) { dbUri = new URI("postgres://localhost:5432/hair_salon"); } else { dbUri = new URI(System.getenv("DATABASE_URL")); } int port = dbUri.getPort(); String host = dbUri.getHost(); String path = dbUri.getPath(); String username = (dbUri.getUserInfo() == null) ? "abdilatif" : dbUri.getUserInfo().split(":")[0]; String password = (dbUri.getUserInfo() == null) ? "6823" : dbUri.getUserInfo().split(":")[1]; sql2o = new Sql2o("jdbc:postgresql://" + host + ":" + port + path, username, password); } catch (URISyntaxException e ) { logger.error("Unable to connect to database."); } } }
[ "\"DATABASE_URL\"", "\"DATABASE_URL\"" ]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
java
1
0
app.py
#!/usr/bin/python # -*- coding: utf-8 -*- import os from logging import StreamHandler from flask import Flask from api import create_api API_VERSION = "v1" def create_app(username, password, matrikkel_url, matrikkel_user, matrikkel_pass): app = Flask(__name__) app.config['BASIC_AUTH_FORCE'] = True app.config['BASIC_AUTH_USERNAME'] = username app.config['BASIC_AUTH_PASSWORD'] = password create_api(app, API_VERSION, matrikkel_url, matrikkel_user, matrikkel_pass) if not app.debug: stream_handler = StreamHandler() app.logger.addHandler(stream_handler) return app if __name__ == "__main__": port = int(os.environ.get('PORT', 5500)) username = os.environ["FLOD_MATRIKKEL_USER"] password = os.environ["FLOD_MATRIKKEL_PASS"] matrikkel_base_url = os.environ["MATRIKKEL_BASE_URL"] matrikkel_user = os.environ["MATRIKKEL_USERNAME"] matrikkel_password = os.environ["MATRIKKEL_PASSWORD"] app = create_app( username, password, matrikkel_base_url, matrikkel_user, matrikkel_password ) app.run(host='0.0.0.0', port=port, debug=False)
[]
[]
[ "PORT", "MATRIKKEL_PASSWORD", "MATRIKKEL_USERNAME", "FLOD_MATRIKKEL_USER", "FLOD_MATRIKKEL_PASS", "MATRIKKEL_BASE_URL" ]
[]
["PORT", "MATRIKKEL_PASSWORD", "MATRIKKEL_USERNAME", "FLOD_MATRIKKEL_USER", "FLOD_MATRIKKEL_PASS", "MATRIKKEL_BASE_URL"]
python
6
0
core/prediction_model/train_model.py
import numpy as np from keras.models import Sequential,Model from keras.layers import Dense, Flatten, Reshape, Input from keras.layers import LSTM from keras.initializers import RandomNormal from keras.callbacks import ModelCheckpoint, EarlyStopping import json import csv import os os.environ["CUDA_VISIBLE_DEVICES"]="1" file_path = "D:\\邢老师数据集\\singlerack_all.csv" server_list = [] conditioner_outlet_temp = [] conditioner_inlet_temp = [] class Server(object): def __init__(self, i): self.id = i self.inlet_temp = [] self.outlet_temp = [] self.cpu = [] self.memory = [] def strided_app(a, L, S): # Window len = L, Stride len/stepsize = S nrows = ((a.size - L) // S) + 1 n = a.strides[0] return np.lib.stride_tricks.as_strided(a, shape=(nrows, L), strides=(S * n, n), writeable=False) def process_data(): for i in range(15): server_list.append(Server(i)) with open(file_path, "r", encoding='utf-8') as datacsv: csvr = csv.reader(datacsv) for row in csvr: i = 1 for server in server_list: server.outlet_temp.append(float(row[i])) i = i + 1 for server in server_list: server.inlet_temp.append(float(row[46 - i])) i = i + 1 conditioner_outlet_temp.append(float(row[i])) i = i + 1 conditioner_inlet_temp.append(float(row[i])) i = i + 6 for server in server_list: # if(server.id<10): # server.cpu.append(float(row[i])/10) # else: # server.cpu.append(float(row[i])) server.cpu.append(float(row[i]) /100) i = i + 6 def format_dataset(): predict_server_inlet_model_x = [] predict_server_inlet_model_y = [] predict_conditioner_inlet_model_x = [] predict_conditioner_inlet_model_y = conditioner_inlet_temp for i in range(len(conditioner_outlet_temp)): x_row = [] y_row = [] x_row.append(conditioner_outlet_temp[i]) for server in server_list: x_row.append(server.cpu[i]) y_row.append(server.inlet_temp[i]) predict_server_inlet_model_x.append(x_row) predict_server_inlet_model_y.append(y_row) predict_conditioner_inlet_model_x.append(y_row) return np.array(predict_server_inlet_model_x), np.array(predict_server_inlet_model_y), np.array( predict_conditioner_inlet_model_x), np.array(predict_conditioner_inlet_model_y) def train_server_model(predict_server_inlet_model_x, predict_server_inlet_model_y): server_model = Sequential() # server_model.add(LSTM(10, activation="relu", input_shape=(train_x.shape[1], train_x.shape[2]), return_sequences=True, # kernel_initializer=RandomNormal())) # server_model.add(Flatten()) server_model.add(Dense(16, activation="relu")) server_model.add(Dense(100, activation="relu")) # server_model.add(Dense(500, activation="relu")) # server_model.add(Dense(100, activation="relu")) server_model.add(Dense(15)) server_model.compile(loss='mean_absolute_error', optimizer='Adadelta') checkpoint1 = ModelCheckpoint( "./model/predict_server_inlet_1ConditionerOutletTemp+15ServerCpuUsage_15out_{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list1 = [checkpoint1] history1 = server_model.fit(predict_server_inlet_model_x, predict_server_inlet_model_y, epochs=10000, batch_size=256, validation_split=0.2, verbose=2, callbacks=callbacks_list1) def train_cpu_usage_model(): timestep = 60 predict_horizon=60 train_x = [] train_y = [] for server in server_list: cpu = np.array(server.cpu) data = strided_app(cpu, timestep + +predict_horizon+1, 1) x = data[:, :-1-predict_horizon] y = data[:, -1] if isinstance(train_x,list): train_x = x train_y = y else: train_x = np.concatenate((train_x, x), axis=0) train_y = np.concatenate((train_y, y), axis=0) input=Input(shape=(timestep,)) re=Reshape(target_shape=(timestep, 1))(input) lstm=LSTM(120, return_sequences=True)(re) lstm1 = LSTM(120)(lstm) flatten=lstm1 dense=Dense(10,activation='relu')(flatten) output=Dense(1)(dense) model=Model(inputs=[input],outputs=[output]) model.summary() model.compile(loss='mean_absolute_error', optimizer='Adam') checkpoint = ModelCheckpoint("./model/predict_cpu_usage_ts60_ph60_{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, save_best_only=True, mode='min') early_stopping = EarlyStopping(monitor='val_loss', mode='min', min_delta=0.002, patience=3, verbose=1) callbacks_list = [checkpoint, early_stopping] history = model.fit(train_x, train_y, epochs=10000, batch_size=128, validation_split=0.02, verbose=1, callbacks=callbacks_list) def train_conditioner_model(predict_conditioner_inlet_model_x, predict_conditioner_inlet_model_y): conditioner_model = Sequential() conditioner_model.add(Dense(15, activation="relu")) conditioner_model.add(Dense(100, activation="relu")) conditioner_model.add(Dense(1)) conditioner_model.compile(loss='mse', optimizer='Adadelta') checkpoint2 = ModelCheckpoint("./model/predict_condition_inlet_15ServerInletTempin_1out_{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, save_best_only=True, mode='min') early_stopping = EarlyStopping(monitor='val_loss', mode='min', min_delta=0.002, patience=10, verbose=1) callbacks_list2 = [checkpoint2] history2 = conditioner_model.fit(predict_conditioner_inlet_model_x, predict_conditioner_inlet_model_y, epochs=10000, batch_size=256, validation_split=0.2, verbose=2, callbacks=callbacks_list2) if __name__ == "__main__": process_data() # predict_server_inlet_model_x,\ # predict_server_inlet_model_y, \ # predict_conditioner_inlet_model_x,\ # predict_conditioner_inlet_model_y = format_dataset() # # train_server_model(predict_server_inlet_model_x, predict_server_inlet_model_y) # train_conditioner_model(predict_conditioner_inlet_model_x,predict_conditioner_inlet_model_y) # res_y=server_model.predict(predict_server_inlet_model_x) # while(True): # pass train_cpu_usage_model()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
appengine/cloudsql/src/main/java/com/example/managedvms/cloudsql/CloudSqlServlet.java
/** * Copyright 2015 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.managedvms.cloudsql; import java.io.IOException; import java.io.PrintWriter; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.util.Date; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; // [START example] @SuppressWarnings("serial") @WebServlet(name = "cloudsql", value = "/*") public class CloudSqlServlet extends HttpServlet { @Override public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException, ServletException { final String createTableSql = "CREATE TABLE IF NOT EXISTS visits ( visit_id INT NOT NULL " + "AUTO_INCREMENT, user_ip VARCHAR(46) NOT NULL, timestamp DATETIME NOT NULL, " + "PRIMARY KEY (visit_id) )"; final String createVisitSql = "INSERT INTO visits (user_ip, timestamp) VALUES (?, ?)"; final String selectSql = "SELECT user_ip, timestamp FROM visits ORDER BY timestamp DESC " + "LIMIT 10"; PrintWriter out = resp.getWriter(); resp.setContentType("text/plain"); String url = System.getenv("SQL_DATABASE_URL"); try (Connection conn = DriverManager.getConnection(url); PreparedStatement statementCreateVisit = conn.prepareStatement(createVisitSql)) { conn.createStatement().executeUpdate(createTableSql); statementCreateVisit.setString(1, req.getRemoteAddr()); statementCreateVisit.setTimestamp(2, new Timestamp(new Date().getTime())); statementCreateVisit.executeUpdate(); try (ResultSet rs = conn.prepareStatement(selectSql).executeQuery()) { out.print("Last 10 visits:\n"); while (rs.next()) { String userIp = rs.getString("user_ip"); String timeStamp = rs.getString("timestamp"); out.print("Time: " + timeStamp + " Addr: " + userIp + "\n"); } } } catch (SQLException e) { throw new ServletException("SQL error", e); } } } // [END example]
[ "\"SQL_DATABASE_URL\"" ]
[]
[ "SQL_DATABASE_URL" ]
[]
["SQL_DATABASE_URL"]
java
1
0
features/environment.py
import django import os from django.core.management import call_command from splinter.browser import Browser from features.helpers import initiate_test_data from peeldb.models import User os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobsp.settings_local") django.setup() def before_all(context): User.objects.filter(email="[email protected]").delete() context.browser = Browser("firefox") context.server_url = "http://test.peeljobs.com:8000" def after_all(context): context.browser.quit() context.browser = None
[]
[]
[]
[]
[]
python
0
0
src/webui/_project.py
import logging import json import os import shutil import subprocess import sys # PyArmor in the parent path PYARMOR_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) os.environ['PYARMOR_PATH'] = PYARMOR_PATH sys.path.insert(0, PYARMOR_PATH) from config import version, config_filename, capsule_filename from project import Project project_base_path = os.path.join(PYARMOR_PATH, 'projects') project_index_name = 'index.json' project_capsule_name = capsule_filename project_config_name = config_filename def call_armor(args): p = subprocess.Popen([sys.executable, 'pyarmor.py'] + list(args), cwd=PYARMOR_PATH) p.wait() if p.returncode != 0: raise RuntimeError('Call pyarmor failed, see the details in console window') def _check_trial_license(): filename = os.path.join(PYARMOR_PATH, 'license.lic') if not os.path.exists(filename): shutil.copy(os.path.join(PYARMOR_PATH, 'license.tri'), filename) return os.path.getsize(filename) == 256 def _check_project_index(): filename = os.path.join(project_base_path, project_index_name) if not os.path.exists(filename): if not os.path.exists(project_base_path): os.makedirs(project_base_path) with open(filename, 'w') as fp: json.dump(dict(counter=0, projects={}), fp) return filename def _create_default_project(**kwargs): return Project(**kwargs) def newProject(args=None): ''' >>> p = newProject() >>> p['message'] 'Project has been created' ''' filename = _check_project_index() with open(filename, 'r') as fp: pindexes = json.load(fp) counter = pindexes['counter'] + 1 name = 'project-%d' % counter path = os.path.join(project_base_path, name) if os.path.exists(path): logging.warning('Project path %s has been exists', path) else: logging.info('Make project path %s', path) os.mkdir(path) args = ['init', '--src', path, path] call_pyarmor(args) pindexes['projects'][name] = os.path.abspath(path) pindexes['counter'] = counter with open(filename, 'w') as fp: json.dump(pindexes, fp) project = Project() project.open(path) project['name'] = name project['title'] = name project['output'] = 'dist' return dict(project=project, message='Project has been created') def updateProject(args): ''' >>> p = newProject()['project'] >>> updateProject(title='My Project') 'Update project OK' ''' name = args['name'] path = os.path.join(project_base_path, name) project = Project() project.open(path) if not args['output']: args['output'] = 'dist' project._update(args) project.save(path) return 'Update project OK' def buildProject(args): ''' >>> p = newProject()['project'] >>> p['src'] = '' >>> p['output'] = os.path.join('projects', 'build') >>> buildProject(p) 'Build project OK.' ''' name = args['name'] path = os.path.join(project_base_path, name) call_pyarmor(['build', path]) return 'Build project OK.' def removeProject(args): ''' >>> p1 = newProject()['project'] >>> m = removeProject(p1) >>> m == 'Remove project %s OK' % p1['name'] True ''' filename = _check_project_index() with open(filename, 'r') as fp: pindexes = json.load(fp) name = args['name'] try: pindexes['projects'].pop(name) except KeyError: pass with open(filename, 'w') as fp: json.dump(pindexes, fp) shutil.rmtree(os.path.join(project_base_path, name)) return 'Remove project %s OK' % name def queryProject(args=None): ''' >>> r = queryProject() >>> len(r) > 1 True ''' if args is not None and args.get('name') is not None: name = args.get('name') path = os.path.join(project_base_path, name) project = Project() project.open(path) return dict(project=project, message='Got project %s' % name) filename = _check_project_index() with open(filename, 'r') as fp: pindexes = json.load(fp) result = [] for name, filename in pindexes['projects'].items(): path = os.path.join(project_base_path, name) project = Project() project.open(path) item = dict(name=name, title=project['title']) result.append(item) return result def queryVersion(args=None): ''' >>> r = queryVersion() >>> r['version'][0] == '3' True >>> r['rcode'] == '' True ''' rcode = '' if _check_trial_license() else 'PyArmor' return dict(version=version, rcode=rcode) def newLicense(args): ''' >>> p = newProject()['project'] >>> p['rcode'] = 'Curstomer-Tom' >>> a1 = newLicense(p) >>> p['expired'] = '2017-11-20' >>> a2 = newLicense(p) ''' name = args['name'] path = os.path.join(project_base_path, name) title = args['rcode'].strip() params = ['licenses', '--project', path] for opt in ('expired', 'bind_disk', 'bind_ipv4', 'bind_mac'): if args[opt]: params.extend(['--%s' % opt.replace('_', '-'), args[opt]]) params.append(title) call_pyarmor(params) output = os.path.join(path, 'licenses', title, 'license.lic') return dict(title=title, filename=output) def obfuscateScripts(args): params = ['obfuscate'] for opt in ['output']: if args[opt]: params.extend(['--%s' % opt, args[opt]]) params.append(args['entry']) call_armor(params) output = args['output'] if args['output'] \ else os.path.join(PYARMOR_PATH, 'dist') return dict(output=output) def generateLicenses(args): params = ['licenses', '--output', PYARMOR_PATH] for opt in ('expired', 'bind_disk', 'bind_ipv4', 'bind_mac'): if args[opt]: params.extend(['--%s' % opt.replace('_', '-'), args[opt]]) rcode = args['rcode'].strip() params.append(rcode) call_armor(params) return dict(output=os.path.join( PYARMOR_PATH, 'licenses', rcode, 'license.lic')) def packObfuscatedScripts(args): params = ['pack', '--type', args['type'], args['entry']] if args['output']: params[3:3] = ['--output', args['output']] if args['setup']: params[3:3] = ['--setup', args['setup']] call_armor(params) return dict(output=args['output']) if __name__ == '__main__': import doctest doctest.testmod()
[]
[]
[ "PYARMOR_PATH" ]
[]
["PYARMOR_PATH"]
python
1
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoreact.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
app/app/settings.py
""" Django settings for app project. Generated by 'django-admin startproject' using Django 2.1.15. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'lxb!(o00)qtw0p+6q_vs$01&wtsw(m*s!ol0_6^v*flo^!&ek&' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'core', 'user', 'recipe', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'HOST': os.environ.get('DB_HOST'), 'NAME': os.environ.get('DB_NAME'), 'USER': os.environ.get('DB_USER'), 'PASSWORD': os.environ.get('DB_PASS'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = '/vol/web/media' STATIC_ROOT = '/vol/web/static' AUTH_USER_MODEL = 'core.User'
[]
[]
[ "DB_PASS", "DB_USER", "DB_NAME", "DB_HOST" ]
[]
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
python
4
0
frappe/utils/__init__.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: MIT. See LICENSE import functools import hashlib import io import json import os import re import sys import traceback import typing from email.header import decode_header, make_header from email.utils import formataddr, parseaddr from gzip import GzipFile from typing import Generator, Iterable from urllib.parse import quote, urlparse from werkzeug.test import Client from redis.exceptions import ConnectionError from collections.abc import MutableMapping, MutableSequence, Sequence import frappe # utility functions like cint, int, flt, etc. from frappe.utils.data import * from frappe.utils.html_utils import sanitize_html default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by', 'parent', 'parentfield', 'parenttype', 'idx', 'docstatus'] def get_fullname(user=None): """get the full name (first name + last name) of the user from User""" if not user: user = frappe.session.user if not hasattr(frappe.local, "fullnames"): frappe.local.fullnames = {} if not frappe.local.fullnames.get(user): p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True) if p: frappe.local.fullnames[user] = " ".join(filter(None, [p.get('first_name'), p.get('last_name')])) or user else: frappe.local.fullnames[user] = user return frappe.local.fullnames.get(user) def get_email_address(user=None): """get the email address of the user from User""" if not user: user = frappe.session.user return frappe.db.get_value("User", user, "email") def get_formatted_email(user, mail=None): """get Email Address of user formatted as: `John Doe <[email protected]>`""" fullname = get_fullname(user) method = get_hook_method('get_sender_details') if method: sender_name, mail = method() # if method exists but sender_name is "" fullname = sender_name or fullname if not mail: mail = get_email_address(user) or validate_email_address(user) if not mail: return '' else: return cstr(make_header(decode_header(formataddr((fullname, mail))))) def extract_email_id(email): """fetch only the email part of the Email Address""" email_id = parse_addr(email)[1] if email_id and isinstance(email_id, str) and not isinstance(email_id, str): email_id = email_id.decode("utf-8", "ignore") return email_id def validate_phone_number(phone_number, throw=False): """Returns True if valid phone number""" if not phone_number: return False phone_number = phone_number.strip() match = re.match(r"([0-9\ \+\_\-\,\.\*\#\(\)]){1,20}$", phone_number) if not match and throw: frappe.throw(frappe._("{0} is not a valid Phone Number").format(phone_number), frappe.InvalidPhoneNumberError) return bool(match) def validate_name(name, throw=False): """Returns True if the name is valid valid names may have unicode and ascii characters, dash, quotes, numbers anything else is considered invalid """ if not name: return False name = name.strip() match = re.match(r"^[\w][\w\'\-]*( \w[\w\'\-]*)*$", name) if not match and throw: frappe.throw(frappe._("{0} is not a valid Name").format(name), frappe.InvalidNameError) return bool(match) def validate_email_address(email_str, throw=False): """Validates the email string""" email = email_str = (email_str or "").strip() def _check(e): _valid = True if not e: _valid = False if 'undisclosed-recipient' in e: return False elif " " in e and "<" not in e: # example: "[email protected] [email protected]" will return "[email protected]" after parseaddr!!! _valid = False else: email_id = extract_email_id(e) match = re.match( r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email_id.lower() ) if email_id else None if not match: _valid = False else: matched = match.group(0) if match: match = matched==email_id.lower() if not _valid: if throw: invalid_email = frappe.utils.escape_html(e) frappe.throw(frappe._("{0} is not a valid Email Address").format(invalid_email), frappe.InvalidEmailAddressError) return None else: return matched out = [] for e in email_str.split(','): email = _check(e.strip()) if email: out.append(email) return ', '.join(out) def split_emails(txt): email_list = [] # emails can be separated by comma or newline s = re.sub(r'[\t\n\r]', ' ', cstr(txt)) for email in re.split(r'[,\n](?=(?:[^"]|"[^"]*")*$)', s): email = strip(cstr(email)) if email: email_list.append(email) return email_list def validate_url(txt, throw=False, valid_schemes=None): """ Checks whether `txt` has a valid URL string Parameters: throw (`bool`): throws a validationError if URL is not valid valid_schemes (`str` or `list`): if provided checks the given URL's scheme against this Returns: bool: if `txt` represents a valid URL """ url = urlparse(txt) is_valid = bool(url.netloc) # Handle scheme validation if isinstance(valid_schemes, str): is_valid = is_valid and (url.scheme == valid_schemes) elif isinstance(valid_schemes, (list, tuple, set)): is_valid = is_valid and (url.scheme in valid_schemes) if not is_valid and throw: frappe.throw( frappe._("'{0}' is not a valid URL").format(frappe.bold(txt)) ) return is_valid def random_string(length): """generate a random string""" import string from random import choice return ''.join(choice(string.ascii_letters + string.digits) for i in range(length)) def has_gravatar(email): '''Returns gravatar url if user has set an avatar at gravatar.com''' import requests if (frappe.flags.in_import or frappe.flags.in_install or frappe.flags.in_test): # no gravatar if via upload # since querying gravatar for every item will be slow return '' hexdigest = hashlib.md5(frappe.as_unicode(email).encode('utf-8')).hexdigest() gravatar_url = "https://secure.gravatar.com/avatar/{hash}?d=404&s=200".format(hash=hexdigest) try: res = requests.get(gravatar_url) if res.status_code==200: return gravatar_url else: return '' except requests.exceptions.ConnectionError: return '' def get_gravatar_url(email): return "https://secure.gravatar.com/avatar/{hash}?d=mm&s=200".format(hash=hashlib.md5(email.encode('utf-8')).hexdigest()) def get_gravatar(email): from frappe.utils.identicon import Identicon gravatar_url = has_gravatar(email) if not gravatar_url: gravatar_url = Identicon(email).base64() return gravatar_url def get_traceback() -> str: """ Returns the traceback of the Exception """ exc_type, exc_value, exc_tb = sys.exc_info() if not any([exc_type, exc_value, exc_tb]): return "" trace_list = traceback.format_exception(exc_type, exc_value, exc_tb) bench_path = get_bench_path() + "/" return "".join(cstr(t) for t in trace_list).replace(bench_path, "") def log(event, details): frappe.logger().info(details) def dict_to_str(args, sep = '&'): """ Converts a dictionary to URL """ t = [] for k in list(args): t.append(str(k)+'='+quote(str(args[k] or ''))) return sep.join(t) def list_to_str(seq, sep = ', '): """Convert a sequence into a string using seperator. Same as str.join, but does type conversion and strip extra spaces. """ return sep.join(map(str.strip, map(str, seq))) # Get Defaults # ============================================================================== def get_defaults(key=None): """ Get dictionary of default values from the defaults, or a value if key is passed """ return frappe.db.get_defaults(key) def set_default(key, val): """ Set / add a default value to defaults` """ return frappe.db.set_default(key, val) def remove_blanks(d): """ Returns d with empty ('' or None) values stripped """ empty_keys = [] for key in d: if d[key]=='' or d[key]==None: # del d[key] raises runtime exception, using a workaround empty_keys.append(key) for key in empty_keys: del d[key] return d def strip_html_tags(text): """Remove html tags from text""" return re.sub(r"\<[^>]*\>", "", text) def get_file_timestamp(fn): """ Returns timestamp of the given file """ from frappe.utils import cint try: return str(cint(os.stat(fn).st_mtime)) except OSError as e: if e.args[0]!=2: raise else: return None # to be deprecated def make_esc(esc_chars): """ Function generator for Escaping special characters """ return lambda s: ''.join('\\' + c if c in esc_chars else c for c in s) # esc / unescape characters -- used for command line def esc(s, esc_chars): """ Escape special characters """ if not s: return "" for c in esc_chars: esc_str = '\\' + c s = s.replace(c, esc_str) return s def unesc(s, esc_chars): """ UnEscape special characters """ for c in esc_chars: esc_str = '\\' + c s = s.replace(esc_str, c) return s def execute_in_shell(cmd, verbose=0, low_priority=False): # using Popen instead of os.system - as recommended by python docs import tempfile from subprocess import Popen with tempfile.TemporaryFile() as stdout: with tempfile.TemporaryFile() as stderr: kwargs = { "shell": True, "stdout": stdout, "stderr": stderr } if low_priority: kwargs["preexec_fn"] = lambda: os.nice(10) p = Popen(cmd, **kwargs) p.wait() stdout.seek(0) out = stdout.read() stderr.seek(0) err = stderr.read() if verbose: if err: print(err) if out: print(out) return err, out def get_path(*path, **kwargs): base = kwargs.get('base') if not base: base = frappe.local.site_path return os.path.join(base, *path) def get_site_base_path(): return frappe.local.site_path def get_site_path(*path): return get_path(base=get_site_base_path(), *path) def get_files_path(*path, **kwargs): return get_site_path("private" if kwargs.get("is_private") else "public", "files", *path) def get_bench_path(): return os.path.realpath(os.path.join(os.path.dirname(frappe.__file__), '..', '..', '..')) def get_bench_id(): return frappe.get_conf().get('bench_id', get_bench_path().strip('/').replace('/', '-')) def get_site_id(site=None): return f"{site or frappe.local.site}@{get_bench_id()}" def get_backups_path(): return get_site_path("private", "backups") def get_request_site_address(full_address=False): return get_url(full_address=full_address) def get_site_url(site): return 'http://{site}:{port}'.format( site=site, port=frappe.get_conf(site).webserver_port ) def encode_dict(d, encoding="utf-8"): for key in d: if isinstance(d[key], str) and isinstance(d[key], str): d[key] = d[key].encode(encoding) return d def decode_dict(d, encoding="utf-8"): for key in d: if isinstance(d[key], str) and not isinstance(d[key], str): d[key] = d[key].decode(encoding, "ignore") return d @functools.lru_cache() def get_site_name(hostname): return hostname.split(':')[0] def get_disk_usage(): """get disk usage of files folder""" files_path = get_files_path() if not os.path.exists(files_path): return 0 err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path)) return cint(out.split("\n")[-2].split("\t")[0]) def touch_file(path): with open(path, 'a'): os.utime(path, None) return path def get_test_client(): from frappe.app import application return Client(application) def get_hook_method(hook_name, fallback=None): method = frappe.get_hooks().get(hook_name) if method: method = frappe.get_attr(method[0]) return method if fallback: return fallback def call_hook_method(hook, *args, **kwargs): out = None for method_name in frappe.get_hooks(hook): out = out or frappe.get_attr(method_name)(*args, **kwargs) return out def is_cli() -> bool: """Returns True if current instance is being run via a terminal """ invoked_from_terminal = False try: invoked_from_terminal = bool(os.get_terminal_size()) except Exception: invoked_from_terminal = sys.stdin.isatty() return invoked_from_terminal def update_progress_bar(txt, i, l): if os.environ.get("CI"): if i == 0: sys.stdout.write(txt) sys.stdout.write(".") sys.stdout.flush() return if not getattr(frappe.local, 'request', None) or is_cli(): lt = len(txt) try: col = 40 if os.get_terminal_size().columns > 80 else 20 except OSError: # in case function isn't being called from a terminal col = 40 if lt < 36: txt = txt + " "*(36-lt) complete = int(float(i+1) / l * col) completion_bar = ("=" * complete).ljust(col, ' ') percent_complete = str(int(float(i+1) / l * 100)) sys.stdout.write("\r{0}: [{1}] {2}%".format(txt, completion_bar, percent_complete)) sys.stdout.flush() def get_html_format(print_path): html_format = None if os.path.exists(print_path): with open(print_path, "r") as f: html_format = f.read() for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format): for app_name in frappe.get_installed_apps(): include_path = frappe.get_app_path(app_name, *path.split(os.path.sep)) if os.path.exists(include_path): with open(include_path, "r") as f: html_format = html_format.replace(include_directive, f.read()) break return html_format def is_markdown(text): if "<!-- markdown -->" in text: return True elif "<!-- html -->" in text: return False else: return not re.search(r"<p[\s]*>|<br[\s]*>", text) def get_sites(sites_path=None): if not sites_path: sites_path = getattr(frappe.local, 'sites_path', None) or '.' sites = [] for site in os.listdir(sites_path): path = os.path.join(sites_path, site) if (os.path.isdir(path) and not os.path.islink(path) and os.path.exists(os.path.join(path, 'site_config.json'))): # is a dir and has site_config.json sites.append(site) return sorted(sites) def get_request_session(max_retries=5): import requests from urllib3.util import Retry session = requests.Session() http_adapter = requests.adapters.HTTPAdapter(max_retries=Retry(total=max_retries, status_forcelist=[500])) session.mount("http://", http_adapter) session.mount("https://", http_adapter) return session def markdown(text, sanitize=True, linkify=True): html = text if is_html(text) else frappe.utils.md_to_html(text) if sanitize: html = html.replace("<!-- markdown -->", "") html = sanitize_html(html, linkify=linkify) return html def sanitize_email(emails): sanitized = [] for e in split_emails(emails): if not validate_email_address(e): continue full_name, email_id = parse_addr(e) sanitized.append(formataddr((full_name, email_id))) return ", ".join(sanitized) def parse_addr(email_string): """ Return email_id and user_name based on email string Raise error if email string is not valid """ name, email = parseaddr(email_string) if check_format(email): name = get_name_from_email_string(email_string, email, name) return (name, email) else: email_regex = re.compile(r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)") email_list = re.findall(email_regex, email_string) if len(email_list) > 0 and check_format(email_list[0]): #take only first email address email = email_list[0] name = get_name_from_email_string(email_string, email, name) return (name, email) return (None, email) def check_format(email_id): """ Check if email_id is valid. valid email:[email protected] String check ensures that email_id contains both '.' and '@' and index of '@' is less than '.' """ is_valid = False try: pos = email_id.rindex("@") is_valid = pos > 0 and (email_id.rindex(".") > pos) and (len(email_id) - pos > 4) except Exception: #print(e) pass return is_valid def get_name_from_email_string(email_string, email_id, name): name = email_string.replace(email_id, '') name = re.sub(r'[^A-Za-z0-9\u00C0-\u024F\/\_\' ]+', '', name).strip() if not name: name = email_id return name def get_installed_apps_info(): out = [] from frappe.utils.change_log import get_versions for app, version_details in get_versions().items(): out.append({ 'app_name': app, 'version': version_details.get('branch_version') or version_details.get('version'), 'branch': version_details.get('branch') }) return out def get_site_info(): from frappe.core.doctype.user.user import STANDARD_USERS from frappe.email.queue import get_emails_sent_this_month from frappe.utils.user import get_system_managers # only get system users users = frappe.get_all('User', filters={'user_type': 'System User', 'name': ('not in', STANDARD_USERS)}, fields=['name', 'enabled', 'last_login', 'last_active', 'language', 'time_zone']) system_managers = get_system_managers(only_name=True) for u in users: # tag system managers u.is_system_manager = 1 if u.name in system_managers else 0 u.full_name = get_fullname(u.name) u.email = u.name del u['name'] system_settings = frappe.db.get_singles_dict('System Settings') space_usage = frappe._dict((frappe.local.conf.limits or {}).get('space_usage', {})) kwargs = {"fields": ["user", "creation", "full_name"], "filters":{"Operation": "Login", "Status": "Success"}, "limit": "10"} site_info = { 'installed_apps': get_installed_apps_info(), 'users': users, 'country': system_settings.country, 'language': system_settings.language or 'english', 'time_zone': system_settings.time_zone, 'setup_complete': cint(system_settings.setup_complete), 'scheduler_enabled': system_settings.enable_scheduler, # usage 'emails_sent': get_emails_sent_this_month(), 'space_used': flt((space_usage.total or 0) / 1024.0, 2), 'database_size': space_usage.database_size, 'backup_size': space_usage.backup_size, 'files_size': space_usage.files_size, 'last_logins': frappe.get_all("Activity Log", **kwargs) } # from other apps for method_name in frappe.get_hooks('get_site_info'): site_info.update(frappe.get_attr(method_name)(site_info) or {}) # dumps -> loads to prevent datatype conflicts return json.loads(frappe.as_json(site_info)) def parse_json(val): """ Parses json if string else return """ if isinstance(val, str): val = json.loads(val) if isinstance(val, dict): val = frappe._dict(val) return val def get_db_count(*args): """ Pass a doctype or a series of doctypes to get the count of docs in them Parameters: *args: Variable length argument list of doctype names whose doc count you need Returns: dict: A dict with the count values. Example: via terminal: bench --site erpnext.local execute frappe.utils.get_db_count --args "['DocType', 'Communication']" """ db_count = {} for doctype in args: db_count[doctype] = frappe.db.count(doctype) return json.loads(frappe.as_json(db_count)) def call(fn, *args, **kwargs): """ Pass a doctype or a series of doctypes to get the count of docs in them Parameters: fn: frappe function to be called Returns: based on the function you call: output of the function you call Example: via terminal: bench --site erpnext.local execute frappe.utils.call --args '''["frappe.get_all", "Activity Log"]''' --kwargs '''{"fields": ["user", "creation", "full_name"], "filters":{"Operation": "Login", "Status": "Success"}, "limit": "10"}''' """ return json.loads(frappe.as_json(frappe.call(fn, *args, **kwargs))) # Following methods are aken as-is from Python 3 codebase # since gzip.compress and gzip.decompress are not available in Python 2.7 def gzip_compress(data, compresslevel=9): """Compress data in one shot and return the compressed string. Optional argument is the compression level, in range of 0-9. """ buf = io.BytesIO() with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f: f.write(data) return buf.getvalue() def gzip_decompress(data): """Decompress a gzip compressed string in one shot. Return the decompressed string. """ with GzipFile(fileobj=io.BytesIO(data)) as f: return f.read() def get_safe_filters(filters): try: filters = json.loads(filters) if isinstance(filters, (int, float)): filters = frappe.as_unicode(filters) except (TypeError, ValueError): # filters are not passed, not json pass return filters def create_batch(iterable: Iterable, size: int) -> Generator[Iterable, None, None]: """Convert an iterable to multiple batches of constant size of batch_size Args: iterable (Iterable): Iterable object which is subscriptable size (int): Maximum size of batches to be generated Yields: Generator[List]: Batched iterable of maximum length `size` """ total_count = len(iterable) for i in range(0, total_count, size): yield iterable[i : min(i + size, total_count)] def set_request(**kwargs): from werkzeug.test import EnvironBuilder from werkzeug.wrappers import Request builder = EnvironBuilder(**kwargs) frappe.local.request = Request(builder.get_environ()) def get_html_for_route(route): from frappe.website.serve import get_response set_request(method='GET', path=route) response = get_response() html = frappe.safe_decode(response.get_data()) return html def get_file_size(path, format=False): num = os.path.getsize(path) if not format: return num suffix = 'B' for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024: return "{0:3.1f}{1}{2}".format(num, unit, suffix) num /= 1024 return "{0:.1f}{1}{2}".format(num, 'Yi', suffix) def get_build_version(): try: return str(os.path.getmtime(os.path.join(frappe.local.sites_path, '.build'))) except OSError: # .build can sometimes not exist # this is not a major problem so send fallback return frappe.utils.random_string(8) def get_assets_json(): if not hasattr(frappe.local, "assets_json"): cache = frappe.cache() # using .get instead of .get_value to avoid pickle.loads try: assets_json = cache.get("assets_json") except ConnectionError: assets_json = None # if value found, decode it if assets_json is not None: try: assets_json = assets_json.decode('utf-8') except (UnicodeDecodeError, AttributeError): assets_json = None if not assets_json: assets_json = frappe.read_file("assets/assets.json") cache.set_value("assets_json", assets_json, shared=True) frappe.local.assets_json = frappe.safe_decode(assets_json) return frappe.parse_json(frappe.local.assets_json) def get_bench_relative_path(file_path): """Fixes paths relative to the bench root directory if exists and returns the absolute path Args: file_path (str, Path): Path of a file that exists on the file system Returns: str: Absolute path of the file_path """ if not os.path.exists(file_path): base_path = '..' elif file_path.startswith(os.sep): base_path = os.sep else: base_path = '.' file_path = os.path.join(base_path, file_path) if not os.path.exists(file_path): print('Invalid path {0}'.format(file_path[3:])) sys.exit(1) return os.path.abspath(file_path) def groupby_metric(iterable: typing.Dict[str, list], key: str): """ Group records by a metric. Usecase: Lets assume we got country wise players list with the ranking given for each player(multiple players in a country can have same ranking aswell). We can group the players by ranking(can be any other metric) using this function. >>> d = { 'india': [{'id':1, 'name': 'iplayer-1', 'ranking': 1}, {'id': 2, 'ranking': 1, 'name': 'iplayer-2'}, {'id': 2, 'ranking': 2, 'name': 'iplayer-3'}], 'Aus': [{'id':1, 'name': 'aplayer-1', 'ranking': 1}, {'id': 2, 'ranking': 1, 'name': 'aplayer-2'}, {'id': 2, 'ranking': 2, 'name': 'aplayer-3'}] } >>> groupby(d, key='ranking') {1: {'Aus': [{'id': 1, 'name': 'aplayer-1', 'ranking': 1}, {'id': 2, 'name': 'aplayer-2', 'ranking': 1}], 'india': [{'id': 1, 'name': 'iplayer-1', 'ranking': 1}, {'id': 2, 'name': 'iplayer-2', 'ranking': 1}]}, 2: {'Aus': [{'id': 2, 'name': 'aplayer-3', 'ranking': 2}], 'india': [{'id': 2, 'name': 'iplayer-3', 'ranking': 2}]}} """ records = {} for category, items in iterable.items(): for item in items: records.setdefault(item[key], {}).setdefault(category, []).append(item) return records def get_table_name(table_name: str) -> str: return f"tab{table_name}" if not table_name.startswith("__") else table_name def squashify(what): if isinstance(what, Sequence) and len(what) == 1: return what[0] return what def safe_json_loads(*args): results = [] for arg in args: try: arg = json.loads(arg) except Exception: pass results.append(arg) return squashify(results) def dictify(arg): if isinstance(arg, MutableSequence): for i, a in enumerate(arg): arg[i] = dictify(a) elif isinstance(arg, MutableMapping): arg = frappe._dict(arg) return arg
[]
[]
[ "CI" ]
[]
["CI"]
python
1
0
cmd/nodejs/yarn/main.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Implements nodejs/yarn buildpack. // The npm buildpack installs dependencies using yarn and installs yarn itself if not present. package main import ( "fmt" "os" "path/filepath" "github.com/GoogleCloudPlatform/buildpacks/pkg/cache" "github.com/GoogleCloudPlatform/buildpacks/pkg/devmode" gcp "github.com/GoogleCloudPlatform/buildpacks/pkg/gcpbuildpack" "github.com/GoogleCloudPlatform/buildpacks/pkg/nodejs" "github.com/buildpacks/libcnb" ) const ( cacheTag = "prod dependencies" yarnVersion = "1.22.5" yarnURL = "https://github.com/yarnpkg/yarn/releases/download/v%[1]s/yarn-v%[1]s.tar.gz" versionKey = "version" ) func main() { gcp.Main(detectFn, buildFn) } func detectFn(ctx *gcp.Context) (gcp.DetectResult, error) { if !ctx.FileExists(nodejs.YarnLock) { return gcp.OptOutFileNotFound("yarn.lock"), nil } if !ctx.FileExists("package.json") { return gcp.OptOutFileNotFound("package.json"), nil } return gcp.OptIn("found yarn.lock and package.json"), nil } func buildFn(ctx *gcp.Context) error { if err := installYarn(ctx); err != nil { return fmt.Errorf("installing Yarn: %w", err) } ml := ctx.Layer("yarn", gcp.BuildLayer, gcp.CacheLayer) nm := filepath.Join(ml.Path, "node_modules") ctx.RemoveAll("node_modules") nodeEnv := nodejs.NodeEnv() cached, err := nodejs.CheckCache(ctx, ml, cache.WithStrings(nodeEnv), cache.WithFiles("package.json", nodejs.YarnLock)) if err != nil { return fmt.Errorf("checking cache: %w", err) } if cached { ctx.CacheHit(cacheTag) // Restore cached node_modules. ctx.Exec([]string{"cp", "--archive", nm, "node_modules"}, gcp.WithUserTimingAttribution) } else { ctx.CacheMiss(cacheTag) // Clear cached node_modules to ensure we don't end up with outdated dependencies. ctx.ClearLayer(ml) } // Always run yarn install to run preinstall/postinstall scripts. cmd := []string{"yarn", "install", "--non-interactive"} if lf := nodejs.LockfileFlag(ctx); lf != "" { cmd = append(cmd, lf) } ctx.Exec(cmd, gcp.WithEnv("NODE_ENV="+nodeEnv), gcp.WithUserAttribution) if !cached { // Ensure node_modules exists even if no dependencies were installed. ctx.MkdirAll("node_modules", 0755) ctx.Exec([]string{"cp", "--archive", "node_modules", nm}, gcp.WithUserTimingAttribution) } el := ctx.Layer("env", gcp.BuildLayer, gcp.LaunchLayer) el.SharedEnvironment.Default("PATH", filepath.Join(ctx.ApplicationRoot(), "node_modules", ".bin")) el.SharedEnvironment.Default("NODE_ENV", nodeEnv) // Configure the entrypoint for production. cmd = []string{"yarn", "run", "start"} if !devmode.Enabled(ctx) { ctx.AddWebProcess(cmd) return nil } // Configure the entrypoint and metadata for dev mode. devmode.AddFileWatcherProcess(ctx, devmode.Config{ RunCmd: cmd, Ext: devmode.NodeWatchedExtensions, }) devmode.AddSyncMetadata(ctx, devmode.NodeSyncRules) return nil } func installYarn(ctx *gcp.Context) error { // Skip installation if yarn is already installed. if result := ctx.Exec([]string{"bash", "-c", "command -v yarn || true"}); result.Stdout != "" { ctx.Debugf("Yarn is already installed, skipping installation.") return nil } yarnLayer := "yarn_install" yrl := ctx.Layer(yarnLayer, gcp.BuildLayer, gcp.CacheLayer, gcp.LaunchLayer) // Check the metadata in the cache layer to determine if we need to proceed. metaVersion := ctx.GetMetadata(yrl, versionKey) if yarnVersion == metaVersion { ctx.CacheHit(yarnLayer) ctx.Logf("Yarn cache hit, skipping installation.") } else { ctx.CacheMiss(yarnLayer) ctx.ClearLayer(yrl) // Download and install yarn in layer. ctx.Logf("Installing Yarn v%s", yarnVersion) archiveURL := fmt.Sprintf(yarnURL, yarnVersion) command := fmt.Sprintf("curl --fail --show-error --silent --location --retry 3 %s | tar xz --directory %s --strip-components=1", archiveURL, yrl.Path) ctx.Exec([]string{"bash", "-c", command}, gcp.WithUserAttribution) } // Store layer flags and metadata. ctx.SetMetadata(yrl, versionKey, yarnVersion) ctx.Setenv("PATH", filepath.Join(yrl.Path, "bin")+":"+os.Getenv("PATH")) ctx.AddBOMEntry(libcnb.BOMEntry{ Name: yarnLayer, Metadata: map[string]interface{}{"version": yarnVersion}, }) return nil }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
tests/unit/test_venv.py
import os import sys import py import pytest import tox from tox.interpreters import NoInterpreterInfo from tox.session.commands.run.sequential import installpkg, runtestenv from tox.venv import ( CreationConfig, VirtualEnv, getdigest, prepend_shebang_interpreter, tox_testenv_create, tox_testenv_install_deps, ) def test_getdigest(tmpdir): assert getdigest(tmpdir) == "0" * 32 def test_getsupportedinterpreter(monkeypatch, newconfig, mocksession): config = newconfig( [], """ [testenv:python] basepython={} """.format( sys.executable ), ) mocksession.new_config(config) venv = mocksession.getvenv("python") interp = venv.getsupportedinterpreter() # realpath needed for debian symlinks assert py.path.local(interp).realpath() == py.path.local(sys.executable).realpath() monkeypatch.setattr(tox.INFO, "IS_WIN", True) monkeypatch.setattr(venv.envconfig, "basepython", "jython") with pytest.raises(tox.exception.UnsupportedInterpreter): venv.getsupportedinterpreter() monkeypatch.undo() monkeypatch.setattr(venv.envconfig, "envname", "py1") monkeypatch.setattr(venv.envconfig, "basepython", "notexistingpython") with pytest.raises(tox.exception.InterpreterNotFound): venv.getsupportedinterpreter() monkeypatch.undo() # check that we properly report when no version_info is present info = NoInterpreterInfo(name=venv.name) info.executable = "something" monkeypatch.setattr(config.interpreters, "get_info", lambda *args, **kw: info) with pytest.raises(tox.exception.InvocationError): venv.getsupportedinterpreter() def test_create(mocksession, newconfig): config = newconfig( [], """ [testenv:py123] """, ) envconfig = config.envconfigs["py123"] mocksession.new_config(config) venv = mocksession.getvenv("py123") assert venv.path == envconfig.envdir assert not venv.path.check() with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) >= 1 args = pcalls[0].args assert "virtualenv" == str(args[2]) if not tox.INFO.IS_WIN: # realpath is needed for stuff like the debian symlinks our_sys_path = py.path.local(sys.executable).realpath() assert our_sys_path == py.path.local(args[0]).realpath() # assert Envconfig.toxworkdir in args assert venv.getcommandpath("easy_install", cwd=py.path.local()) interp = venv._getliveconfig().base_resolved_python_path assert interp == venv.envconfig.python_info.executable assert venv.path_config.check(exists=False) def test_commandpath_venv_precedence(tmpdir, monkeypatch, mocksession, newconfig): config = newconfig( [], """ [testenv:py123] """, ) mocksession.new_config(config) venv = mocksession.getvenv("py123") envconfig = venv.envconfig tmpdir.ensure("easy_install") monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep) envconfig.envbindir.ensure("easy_install") p = venv.getcommandpath("easy_install") assert py.path.local(p).relto(envconfig.envbindir), p def test_create_sitepackages(mocksession, newconfig): config = newconfig( [], """ [testenv:site] sitepackages=True [testenv:nosite] sitepackages=False """, ) mocksession.new_config(config) venv = mocksession.getvenv("site") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) >= 1 args = pcalls[0].args assert "--system-site-packages" in map(str, args) mocksession._clearmocks() venv = mocksession.getvenv("nosite") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) >= 1 args = pcalls[0].args assert "--system-site-packages" not in map(str, args) assert "--no-site-packages" not in map(str, args) def test_install_deps_wildcard(newmocksession): mocksession = newmocksession( [], """ [tox] distshare = {toxworkdir}/distshare [testenv:py123] deps= {distshare}/dep1-* """, ) venv = mocksession.getvenv("py123") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 distshare = venv.envconfig.config.distshare distshare.ensure("dep1-1.0.zip") distshare.ensure("dep1-1.1.zip") tox_testenv_install_deps(action=action, venv=venv) assert len(pcalls) == 2 args = pcalls[-1].args assert pcalls[-1].cwd == venv.envconfig.config.toxinidir assert py.path.local.sysfind("python") == args[0] assert ["-m", "pip"] == args[1:3] assert args[3] == "install" args = [arg for arg in args if str(arg).endswith("dep1-1.1.zip")] assert len(args) == 1 def test_install_deps_indexserver(newmocksession): mocksession = newmocksession( [], """ [tox] indexserver = abc = ABC abc2 = ABC [testenv:py123] deps= dep1 :abc:dep2 :abc2:dep3 """, ) venv = mocksession.getvenv("py123") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 pcalls[:] = [] tox_testenv_install_deps(action=action, venv=venv) # two different index servers, two calls assert len(pcalls) == 3 args = " ".join(pcalls[0].args) assert "-i " not in args assert "dep1" in args args = " ".join(pcalls[1].args) assert "-i ABC" in args assert "dep2" in args args = " ".join(pcalls[2].args) assert "-i ABC" in args assert "dep3" in args def test_install_deps_pre(newmocksession): mocksession = newmocksession( [], """ [testenv] pip_pre=true deps= dep1 """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 pcalls[:] = [] tox_testenv_install_deps(action=action, venv=venv) assert len(pcalls) == 1 args = " ".join(pcalls[0].args) assert "--pre " in args assert "dep1" in args def test_installpkg_indexserver(newmocksession, tmpdir): mocksession = newmocksession( [], """ [tox] indexserver = default = ABC """, ) venv = mocksession.getvenv("python") pcalls = mocksession._pcalls p = tmpdir.ensure("distfile.tar.gz") installpkg(venv, p) # two different index servers, two calls assert len(pcalls) == 1 args = " ".join(pcalls[0].args) assert "-i ABC" in args def test_install_recreate(newmocksession, tmpdir): pkg = tmpdir.ensure("package.tar.gz") mocksession = newmocksession( ["--recreate"], """ [testenv] deps=xyz """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "update") as action: venv.update(action) installpkg(venv, pkg) mocksession.report.expect("verbosity0", "*create*") venv.update(action) mocksession.report.expect("verbosity0", "*recreate*") def test_install_sdist_extras(newmocksession): mocksession = newmocksession( [], """ [testenv] extras = testing development """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 pcalls[:] = [] venv.installpkg("distfile.tar.gz", action=action) assert "distfile.tar.gz[testing,development]" in pcalls[-1].args def test_develop_extras(newmocksession, tmpdir): mocksession = newmocksession( [], """ [testenv] extras = testing development """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 pcalls[:] = [] venv.developpkg(tmpdir, action=action) expected = "{}[testing,development]".format(tmpdir.strpath) assert expected in pcalls[-1].args def test_env_variables_added_to_needs_reinstall(tmpdir, mocksession, newconfig, monkeypatch): tmpdir.ensure("setup.py") monkeypatch.setenv("TEMP_PASS_VAR", "123") monkeypatch.setenv("TEMP_NOPASS_VAR", "456") config = newconfig( [], """ [testenv:python] passenv = temp_pass_var setenv = CUSTOM_VAR = 789 """, ) mocksession.new_config(config) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "hello") as action: venv._needs_reinstall(tmpdir, action) pcalls = mocksession._pcalls assert len(pcalls) == 2 env = pcalls[0].env # should have access to setenv vars assert "CUSTOM_VAR" in env assert env["CUSTOM_VAR"] == "789" # should have access to passenv vars assert "TEMP_PASS_VAR" in env assert env["TEMP_PASS_VAR"] == "123" # should also have access to full invocation environment, # for backward compatibility, and to match behavior of venv.run_install_command() assert "TEMP_NOPASS_VAR" in env assert env["TEMP_NOPASS_VAR"] == "456" def test_test_hashseed_is_in_output(newmocksession, monkeypatch): seed = "123456789" monkeypatch.setattr("tox.config.make_hashseed", lambda: seed) mocksession = newmocksession([], "") venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "update") as action: venv.update(action) tox.venv.tox_runtest_pre(venv) mocksession.report.expect("verbosity0", "run-test-pre: PYTHONHASHSEED='{}'".format(seed)) def test_test_runtests_action_command_is_in_output(newmocksession): mocksession = newmocksession( [], """ [testenv] commands = echo foo bar """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "update") as action: venv.update(action) venv.test() mocksession.report.expect("verbosity0", "*runtests*commands?0? | echo foo bar") def test_install_error(newmocksession): mocksession = newmocksession( ["--recreate"], """ [testenv] deps=xyz commands= qwelkqw """, ) venv = mocksession.getvenv("python") venv.test() mocksession.report.expect("error", "*not find*qwelkqw*") assert venv.status == "commands failed" def test_install_command_not_installed(newmocksession): mocksession = newmocksession( ["--recreate"], """ [testenv] commands= pytest """, ) venv = mocksession.getvenv("python") venv.status = 0 venv.test() mocksession.report.expect("warning", "*test command found but not*") assert venv.status == 0 def test_install_command_whitelisted(newmocksession): mocksession = newmocksession( ["--recreate"], """ [testenv] whitelist_externals = pytest xy* commands= pytest xyz """, ) venv = mocksession.getvenv("python") venv.test() mocksession.report.expect("warning", "*test command found but not*", invert=True) assert venv.status == "commands failed" def test_install_command_not_installed_bash(newmocksession): mocksession = newmocksession( ["--recreate"], """ [testenv] commands= bash """, ) venv = mocksession.getvenv("python") venv.test() mocksession.report.expect("warning", "*test command found but not*") def test_install_python3(newmocksession): if not py.path.local.sysfind("python3"): pytest.skip("needs python3") mocksession = newmocksession( [], """ [testenv:py123] basepython=python3 deps= dep1 dep2 """, ) venv = mocksession.getvenv("py123") with mocksession.newaction(venv.name, "getenv") as action: tox_testenv_create(action=action, venv=venv) pcalls = mocksession._pcalls assert len(pcalls) == 1 args = pcalls[0].args assert str(args[2]) == "virtualenv" pcalls[:] = [] with mocksession.newaction(venv.name, "hello") as action: venv._install(["hello"], action=action) assert len(pcalls) == 1 args = pcalls[0].args assert py.path.local.sysfind("python") == args[0] assert ["-m", "pip"] == args[1:3] for _ in args: assert "--download-cache" not in args, args class TestCreationConfig: def test_basic(self, newconfig, mocksession, tmpdir): config = newconfig([], "") mocksession.new_config(config) venv = mocksession.getvenv("python") cconfig = venv._getliveconfig() assert cconfig.matches(cconfig) path = tmpdir.join("configdump") cconfig.writeconfig(path) newconfig = CreationConfig.readconfig(path) assert newconfig.matches(cconfig) assert cconfig.matches(newconfig) def test_matchingdependencies(self, newconfig, mocksession): config = newconfig( [], """ [testenv] deps=abc """, ) mocksession.new_config(config) venv = mocksession.getvenv("python") cconfig = venv._getliveconfig() config = newconfig( [], """ [testenv] deps=xyz """, ) mocksession.new_config(config) venv = mocksession.getvenv("python") otherconfig = venv._getliveconfig() assert not cconfig.matches(otherconfig) def test_matchingdependencies_file(self, newconfig, mocksession): config = newconfig( [], """ [tox] distshare={toxworkdir}/distshare [testenv] deps=abc {distshare}/xyz.zip """, ) xyz = config.distshare.join("xyz.zip") xyz.ensure() mocksession.new_config(config) venv = mocksession.getvenv("python") cconfig = venv._getliveconfig() assert cconfig.matches(cconfig) xyz.write("hello") newconfig = venv._getliveconfig() assert not cconfig.matches(newconfig) def test_matchingdependencies_latest(self, newconfig, mocksession): config = newconfig( [], """ [tox] distshare={toxworkdir}/distshare [testenv] deps={distshare}/xyz-* """, ) config.distshare.ensure("xyz-1.2.0.zip") xyz2 = config.distshare.ensure("xyz-1.2.1.zip") mocksession.new_config(config) venv = mocksession.getvenv("python") cconfig = venv._getliveconfig() md5, path = cconfig.deps[0] assert path == xyz2 assert md5 == path.computehash() def test_python_recreation(self, tmpdir, newconfig, mocksession): pkg = tmpdir.ensure("package.tar.gz") config = newconfig(["-v"], "") mocksession.new_config(config) venv = mocksession.getvenv("python") create_config = venv._getliveconfig() with mocksession.newaction(venv.name, "update") as action: venv.update(action) assert not venv.path_config.check() installpkg(venv, pkg) assert venv.path_config.check() assert mocksession._pcalls args1 = map(str, mocksession._pcalls[0].args) assert "virtualenv" in " ".join(args1) mocksession.report.expect("*", "*create*") # modify config and check that recreation happens mocksession._clearmocks() with mocksession.newaction(venv.name, "update") as action: venv.update(action) mocksession.report.expect("*", "*reusing*") mocksession._clearmocks() with mocksession.newaction(venv.name, "update") as action: create_config.base_resolved_python_path = py.path.local("balla") create_config.writeconfig(venv.path_config) venv.update(action) mocksession.report.expect("verbosity0", "*recreate*") def test_dep_recreation(self, newconfig, mocksession): config = newconfig([], "") mocksession.new_config(config) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "update") as action: venv.update(action) cconfig = venv._getliveconfig() cconfig.deps[:] = [("1" * 32, "xyz.zip")] cconfig.writeconfig(venv.path_config) mocksession._clearmocks() with mocksession.newaction(venv.name, "update") as action: venv.update(action) mocksession.report.expect("*", "*recreate*") def test_develop_recreation(self, newconfig, mocksession): config = newconfig([], "") mocksession.new_config(config) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "update") as action: venv.update(action) cconfig = venv._getliveconfig() cconfig.usedevelop = True cconfig.writeconfig(venv.path_config) mocksession._clearmocks() with mocksession.newaction(venv.name, "update") as action: venv.update(action) mocksession.report.expect("verbosity0", "*recreate*") class TestVenvTest: def test_envbindir_path(self, newmocksession, monkeypatch): monkeypatch.setenv("PIP_RESPECT_VIRTUALENV", "1") mocksession = newmocksession( [], """ [testenv:python] commands=abc """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: monkeypatch.setenv("PATH", "xyz") sysfind_calls = [] monkeypatch.setattr( "py.path.local.sysfind", classmethod(lambda *args, **kwargs: sysfind_calls.append(kwargs) or 0 / 0), ) with pytest.raises(ZeroDivisionError): venv._install(list("123"), action=action) assert sysfind_calls.pop()["paths"] == [venv.envconfig.envbindir] with pytest.raises(ZeroDivisionError): venv.test(action) assert sysfind_calls.pop()["paths"] == [venv.envconfig.envbindir] with pytest.raises(ZeroDivisionError): venv.run_install_command(["qwe"], action=action) assert sysfind_calls.pop()["paths"] == [venv.envconfig.envbindir] monkeypatch.setenv("PIP_RESPECT_VIRTUALENV", "1") monkeypatch.setenv("PIP_REQUIRE_VIRTUALENV", "1") monkeypatch.setenv("__PYVENV_LAUNCHER__", "1") with pytest.raises(ZeroDivisionError): venv.run_install_command(["qwe"], action=action) assert "PIP_RESPECT_VIRTUALENV" not in os.environ assert "PIP_REQUIRE_VIRTUALENV" not in os.environ assert "__PYVENV_LAUNCHER__" not in os.environ assert os.environ["PIP_USER"] == "0" assert os.environ["PIP_NO_DEPS"] == "0" def test_pythonpath_remove(self, newmocksession, monkeypatch, caplog): monkeypatch.setenv("PYTHONPATH", "/my/awesome/library") mocksession = newmocksession( [], """ [testenv:python] commands=abc """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: venv.run_install_command(["qwe"], action=action) assert "PYTHONPATH" not in os.environ mocksession.report.expect("warning", "*Discarding $PYTHONPATH from environment*") pcalls = mocksession._pcalls assert len(pcalls) == 1 assert "PYTHONPATH" not in pcalls[0].env def test_pythonpath_keep(self, newmocksession, monkeypatch, caplog): # passenv = PYTHONPATH allows PYTHONPATH to stay in environment monkeypatch.setenv("PYTHONPATH", "/my/awesome/library") mocksession = newmocksession( [], """ [testenv:python] commands=abc passenv = PYTHONPATH """, ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: venv.run_install_command(["qwe"], action=action) mocksession.report.not_expect("warning", "*Discarding $PYTHONPATH from environment*") assert "PYTHONPATH" in os.environ pcalls = mocksession._pcalls assert len(pcalls) == 1 assert pcalls[0].env["PYTHONPATH"] == "/my/awesome/library" def test_env_variables_added_to_pcall(tmpdir, mocksession, newconfig, monkeypatch): monkeypatch.delenv("PYTHONPATH", raising=False) pkg = tmpdir.ensure("package.tar.gz") monkeypatch.setenv("X123", "123") monkeypatch.setenv("YY", "456") config = newconfig( [], """ [testenv:python] commands=python -V passenv = x123 setenv = ENV_VAR = value PYTHONPATH = value """, ) mocksession._clearmocks() mocksession.new_config(config) venv = mocksession.getvenv("python") installpkg(venv, pkg) venv.test() pcalls = mocksession._pcalls assert len(pcalls) == 2 for x in pcalls: env = x.env assert env is not None assert "ENV_VAR" in env assert env["ENV_VAR"] == "value" assert env["VIRTUAL_ENV"] == str(venv.path) assert env["X123"] == "123" assert "PYTHONPATH" in env assert env["PYTHONPATH"] == "value" # all env variables are passed for installation assert pcalls[0].env["YY"] == "456" assert "YY" not in pcalls[1].env assert {"ENV_VAR", "VIRTUAL_ENV", "PYTHONHASHSEED", "X123", "PATH"}.issubset(pcalls[1].env) # setenv does not trigger PYTHONPATH warnings mocksession.report.not_expect("warning", "*Discarding $PYTHONPATH from environment*") # for e in os.environ: # assert e in env def test_installpkg_no_upgrade(tmpdir, newmocksession): pkg = tmpdir.ensure("package.tar.gz") mocksession = newmocksession([], "") venv = mocksession.getvenv("python") venv.just_created = True venv.envconfig.envdir.ensure(dir=1) installpkg(venv, pkg) pcalls = mocksession._pcalls assert len(pcalls) == 1 assert pcalls[0].args[1:-1] == ["-m", "pip", "install", "--exists-action", "w"] @pytest.mark.parametrize("count, level", [(0, 0), (1, 0), (2, 0), (3, 1), (4, 2), (5, 3), (6, 3)]) def test_install_command_verbosity(tmpdir, newmocksession, count, level): pkg = tmpdir.ensure("package.tar.gz") mock_session = newmocksession(["-{}".format("v" * count)], "") env = mock_session.getvenv("python") env.just_created = True env.envconfig.envdir.ensure(dir=1) installpkg(env, pkg) pcalls = mock_session._pcalls assert len(pcalls) == 1 expected = ["-m", "pip", "install", "--exists-action", "w"] + (["-v"] * level) assert pcalls[0].args[1:-1] == expected def test_installpkg_upgrade(newmocksession, tmpdir): pkg = tmpdir.ensure("package.tar.gz") mocksession = newmocksession([], "") venv = mocksession.getvenv("python") assert not hasattr(venv, "just_created") installpkg(venv, pkg) pcalls = mocksession._pcalls assert len(pcalls) == 1 index = pcalls[0].args.index(str(pkg)) assert index >= 0 assert "-U" in pcalls[0].args[:index] assert "--no-deps" in pcalls[0].args[:index] def test_run_install_command(newmocksession): mocksession = newmocksession([], "") venv = mocksession.getvenv("python") venv.just_created = True venv.envconfig.envdir.ensure(dir=1) with mocksession.newaction(venv.name, "hello") as action: venv.run_install_command(packages=["whatever"], action=action) pcalls = mocksession._pcalls assert len(pcalls) == 1 args = pcalls[0].args assert py.path.local.sysfind("python") == args[0] assert ["-m", "pip"] == args[1:3] assert "install" in args env = pcalls[0].env assert env is not None def test_run_custom_install_command(newmocksession): mocksession = newmocksession( [], """ [testenv] install_command=easy_install {opts} {packages} """, ) venv = mocksession.getvenv("python") venv.just_created = True venv.envconfig.envdir.ensure(dir=1) with mocksession.newaction(venv.name, "hello") as action: venv.run_install_command(packages=["whatever"], action=action) pcalls = mocksession._pcalls assert len(pcalls) == 1 assert "easy_install" in pcalls[0].args[0] assert pcalls[0].args[1:] == ["whatever"] def test_command_relative_issue36(newmocksession, tmpdir, monkeypatch): mocksession = newmocksession( [], """ [testenv] """, ) x = tmpdir.ensure("x") venv = mocksession.getvenv("python") x2 = venv.getcommandpath("./x", cwd=tmpdir) assert x == x2 mocksession.report.not_expect("warning", "*test command found but not*") x3 = venv.getcommandpath("/bin/bash", cwd=tmpdir) assert x3 == "/bin/bash" mocksession.report.not_expect("warning", "*test command found but not*") monkeypatch.setenv("PATH", str(tmpdir)) x4 = venv.getcommandpath("x", cwd=tmpdir) assert x4.endswith(os.sep + "x") mocksession.report.expect("warning", "*test command found but not*") def test_ignore_outcome_failing_cmd(newmocksession): mocksession = newmocksession( [], """ [testenv] commands=testenv_fail ignore_outcome=True """, ) venv = mocksession.getvenv("python") venv.test() assert venv.status == "ignored failed command" mocksession.report.expect("warning", "*command failed but result from testenv is ignored*") def test_tox_testenv_create(newmocksession): log = [] class Plugin: @tox.hookimpl def tox_testenv_create(self, action, venv): assert isinstance(action, tox.session.Action) assert isinstance(venv, VirtualEnv) log.append(1) @tox.hookimpl def tox_testenv_install_deps(self, action, venv): assert isinstance(action, tox.session.Action) assert isinstance(venv, VirtualEnv) log.append(2) mocksession = newmocksession( [], """ [testenv] commands=testenv_fail ignore_outcome=True """, plugins=[Plugin()], ) venv = mocksession.getvenv("python") with mocksession.newaction(venv.name, "getenv") as action: venv.update(action=action) assert log == [1, 2] def test_tox_testenv_pre_post(newmocksession): log = [] class Plugin: @tox.hookimpl def tox_runtest_pre(self): log.append("started") @tox.hookimpl def tox_runtest_post(self): log.append("finished") mocksession = newmocksession( [], """ [testenv] commands=testenv_fail """, plugins=[Plugin()], ) venv = mocksession.getvenv("python") venv.status = None assert log == [] runtestenv(venv, venv.envconfig.config) assert log == ["started", "finished"] @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_empty_instance(tmpdir): testfile = tmpdir.join("check_shebang_empty_instance.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # empty instance testfile.write("") args = prepend_shebang_interpreter(base_args) assert args == base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_empty_interpreter(tmpdir): testfile = tmpdir.join("check_shebang_empty_interpreter.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # empty interpreter testfile.write("#!") args = prepend_shebang_interpreter(base_args) assert args == base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_empty_interpreter_ws(tmpdir): testfile = tmpdir.join("check_shebang_empty_interpreter_ws.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # empty interpreter (whitespaces) testfile.write("#! \n") args = prepend_shebang_interpreter(base_args) assert args == base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_non_utf8(tmpdir): testfile = tmpdir.join("check_non_utf8.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] testfile.write_binary(b"#!\x9a\xef\x12\xaf\n") args = prepend_shebang_interpreter(base_args) assert args == base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_interpreter_simple(tmpdir): testfile = tmpdir.join("check_shebang_interpreter_simple.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter (simple) testfile.write("#!interpreter") args = prepend_shebang_interpreter(base_args) assert args == ["interpreter"] + base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_interpreter_ws(tmpdir): testfile = tmpdir.join("check_shebang_interpreter_ws.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter (whitespaces) testfile.write("#! interpreter \n\n") args = prepend_shebang_interpreter(base_args) assert args == ["interpreter"] + base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_interpreter_arg(tmpdir): testfile = tmpdir.join("check_shebang_interpreter_arg.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter with argument testfile.write("#!interpreter argx\n") args = prepend_shebang_interpreter(base_args) assert args == ["interpreter", "argx"] + base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_interpreter_args(tmpdir): testfile = tmpdir.join("check_shebang_interpreter_args.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter with argument (ensure single argument) testfile.write("#!interpreter argx argx-part2\n") args = prepend_shebang_interpreter(base_args) assert args == ["interpreter", "argx argx-part2"] + base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_real(tmpdir): testfile = tmpdir.join("check_shebang_real.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter (real example) testfile.write("#!/usr/bin/env python\n") args = prepend_shebang_interpreter(base_args) assert args == ["/usr/bin/env", "python"] + base_args @pytest.mark.skipif("sys.platform == 'win32'") def test_tox_testenv_interpret_shebang_long_example(tmpdir): testfile = tmpdir.join("check_shebang_long_example.py") base_args = [str(testfile), "arg1", "arg2", "arg3"] # interpreter (long example) testfile.write( "#!this-is-an-example-of-a-very-long-interpret-directive-what-should-" "be-directly-invoked-when-tox-needs-to-invoked-the-provided-script-" "name-in-the-argument-list" ) args = prepend_shebang_interpreter(base_args) expected = [ "this-is-an-example-of-a-very-long-interpret-directive-what-should-be-" "directly-invoked-when-tox-needs-to-invoked-the-provided-script-name-" "in-the-argument-list" ] assert args == expected + base_args
[]
[]
[ "PIP_NO_DEPS", "PIP_USER" ]
[]
["PIP_NO_DEPS", "PIP_USER"]
python
2
0
src/stub.py
# SPDX-License-Identifier: MIT import os, os.path, plistlib, shutil, sys, stat, subprocess, urlcache, zipfile, logging, json import osenum, firmware.wifi from util import * class StubInstaller(PackageInstaller): def __init__(self, sysinfo, dutil, osinfo, ipsw_info): super().__init__() self.dutil = dutil self.sysinfo = sysinfo self.osinfo = osinfo self.install_version = ipsw_info.version.split(maxsplit=1)[0] self.ucache = None self.copy_idata = [] self.stub_info = {} base = os.environ.get("IPSW_BASE", None) url = ipsw_info.url if base: url = base + "/" + os.path.split(url)[-1] logging.info(f"IPSW URL: {url}") if url.startswith("http"): p_progress("Downloading macOS OS package info...") self.ucache = urlcache.URLCache(url) self.pkg = zipfile.ZipFile(self.ucache) else: p_progress("Loading macOS OS package info...") self.pkg = zipfile.ZipFile(open(url, "rb")) self.flush_progress() logging.info(f"OS package opened") print() def prepare_volume(self, part): logging.info(f"StubInstaller.prepare_volume({part.name=!r})") self.part = part by_role = {} ctref = self.part.container["ContainerReference"] p_progress("Preparing target volumes...") for volume in self.part.container["Volumes"]: roles = tuple(volume["Roles"]) logging.info(f" {volume['DeviceIdentifier']} roles: {roles}") by_role.setdefault(roles, []).append(volume) for role in ("Preboot", "Recovery", "Data", "System"): vols = by_role.get(role, []) if len(vols) > 1: raise Exception(f"Multiple {role} volumes") self.label = self.part.label or "Linux" if not by_role.get(("Data",), None): if default_vol := by_role.get((), None): self.dutil.changeVolumeRole(default_vol[0]["DeviceIdentifier"], "D") self.dutil.rename(default_vol[0]["DeviceIdentifier"], self.label + " - Data") else: self.dutil.addVolume(ctref, self.label, role="D") self.dutil.refresh_part(self.part) else: self.label = self.label.rstrip(" - Data") for volume in self.part.container["Volumes"]: if volume["Roles"] == ["Data",]: data_volume = volume["DeviceIdentifier"] break else: raise Exception("Could not find Data volume") if not by_role.get(("System",), None): self.dutil.addVolume(ctref, self.label, role="S", groupWith=data_volume) if not by_role.get(("Preboot",), None): self.dutil.addVolume(ctref, "Preboot", role="B") if not by_role.get(("Recovery",), None): self.dutil.addVolume(ctref, "Recovery", role="R") self.dutil.refresh_part(self.part) def check_volume(self, part=None): if part: self.part = part logging.info(f"StubInstaller.check_volume({self.part.name=!r})") p_progress("Checking volumes...") os = self.osinfo.collect_part(self.part) if len(os) != 1: raise Exception("Container is not ready for OS install") self.osi = os[0] def chflags(self, flags, path): logging.info(f"chflags {flags} {path}") subprocess.run(["chflags", flags, path], check=True) def install_files(self, cur_os): logging.info("StubInstaller.install_files()") logging.info(f"VGID: {self.osi.vgid}") logging.info(f"OS info: {self.osi}") p_progress("Beginning stub OS install...") ipsw = self.pkg logging.info("Parsing metadata...") sysver = plistlib.load(ipsw.open("SystemVersion.plist")) manifest = plistlib.load(ipsw.open("BuildManifest.plist")) bootcaches = plistlib.load(ipsw.open("usr/standalone/bootcaches.plist")) self.flush_progress() for identity in manifest["BuildIdentities"]: if (identity["ApBoardID"] != f'0x{self.sysinfo.board_id:02X}' or identity["ApChipID"] != f'0x{self.sysinfo.chip_id:04X}' or identity["Info"]["DeviceClass"] != self.sysinfo.device_class or identity["Info"]["RestoreBehavior"] != "Erase" or identity["Info"]["Variant"] != "macOS Customer"): continue break else: raise Exception("Failed to locate a usable build identity for this device") logging.info(f'Using OS build {identity["Info"]["BuildNumber"]} for {self.sysinfo.device_class}') manifest["BuildIdentities"] = [identity] self.stub_info.update({ "vgid": self.osi.vgid, "system_version": sysver, "manifest_info": { "build_number": identity["Info"]["BuildNumber"], "variant": identity["Info"]["Variant"], "device_class": identity["Info"]["DeviceClass"], "board_id": identity["ApBoardID"], "chip_id": identity["ApChipID"], } }) p_progress("Setting up System volume...") logging.info("Setting up System volume") self.extract("usr/standalone/bootcaches.plist", self.osi.system) shutil.copy("logo.icns", os.path.join(self.osi.system, ".VolumeIcon.icns")) cs = os.path.join(self.osi.system, "System/Library/CoreServices") os.makedirs(cs, exist_ok=True) sysver["ProductUserVisibleVersion"] += " (stub)" self.extract("PlatformSupport.plist", cs) self.flush_progress() # Make the icon work try: logging.info(f"xattr -wx com.apple.FinderInfo .... {self.osi.system}") subprocess.run(["xattr", "-wx", "com.apple.FinderInfo", "0000000000000000040000000000000000000000000000000000000000000000", self.osi.system], check=True) except: p_error("Failed to apply extended attributes, logo will not work.") p_progress("Setting up Data volume...") logging.info("Setting up Data volume") os.makedirs(os.path.join(self.osi.data, "private/var/db/dslocal"), exist_ok=True) p_progress("Setting up Preboot volume...") logging.info("Setting up Preboot volume") pb_vgid = os.path.join(self.osi.preboot, self.osi.vgid) os.makedirs(pb_vgid, exist_ok=True) bless2 = bootcaches["bless2"] restore_bundle = os.path.join(pb_vgid, bless2["RestoreBundlePath"]) os.makedirs(restore_bundle, exist_ok=True) restore_manifest = os.path.join(restore_bundle, "BuildManifest.plist") with open(restore_manifest, "wb") as fd: plistlib.dump(manifest, fd) self.copy_idata.append((restore_manifest, "BuildManifest.plist")) self.extract("SystemVersion.plist", restore_bundle) self.extract("RestoreVersion.plist", restore_bundle) self.copy_idata.append((os.path.join(restore_bundle, "RestoreVersion.plist"), "RestoreVersion.plist")) self.extract("usr/standalone/bootcaches.plist", restore_bundle) self.extract_tree("BootabilityBundle/Restore/Bootability", os.path.join(restore_bundle, "Bootability")) self.extract_file("BootabilityBundle/Restore/Firmware/Bootability.dmg.trustcache", os.path.join(restore_bundle, "Bootability/Bootability.trustcache")) self.extract_tree("Firmware/Manifests/restore/macOS Customer/", restore_bundle) copied = set() for key, val in identity["Manifest"].items(): if key in ("BaseSystem", "OS", "Ap,SystemVolumeCanonicalMetadata"): continue path = val["Info"]["Path"] if path in copied: continue self.extract(path, restore_bundle) if path.startswith("kernelcache."): name = os.path.basename(path) self.copy_idata.append((os.path.join(restore_bundle, name), name)) copied.add(path) self.flush_progress() os.makedirs(os.path.join(pb_vgid, "var/db"), exist_ok=True) admin_users = os.path.join(cur_os.preboot, cur_os.vgid, "var/db/AdminUserRecoveryInfo.plist") tg_admin_users = os.path.join(pb_vgid, "var/db/AdminUserRecoveryInfo.plist") if os.path.exists(tg_admin_users): self.chflags("noschg", tg_admin_users) shutil.copy(admin_users, tg_admin_users) self.copy_idata.append((tg_admin_users, "AdminUserRecoveryInfo.plist")) admin_users = plistlib.load(open(tg_admin_users, "rb")) self.stub_info["admin_users"] = {} for user, info in admin_users.items(): self.stub_info["admin_users"][user] = { "uid": info["GeneratedUID"], "real_name": info["RealName"], } # Stop macOS <12.0 bootability stufff from clobbering this file self.chflags("schg", tg_admin_users) # This is a workaround for some screwiness in the macOS <12.0 bootability # code, which ends up putting the apticket in the wrong volume... sys_restore_bundle = os.path.join(self.osi.system, bless2["RestoreBundlePath"]) if os.path.lexists(sys_restore_bundle): os.unlink(sys_restore_bundle) os.symlink(restore_bundle, sys_restore_bundle) p_progress("Setting up Recovery volume...") logging.info("Setting up Recovery volume") rec_vgid = os.path.join(self.osi.recovery, self.osi.vgid) os.makedirs(rec_vgid, exist_ok=True) basesystem_path = os.path.join(rec_vgid, "usr/standalone/firmware") os.makedirs(basesystem_path, exist_ok=True) logging.info("Extracting arm64eBaseSystem.dmg") self.extract_file(identity["Manifest"]["BaseSystem"]["Info"]["Path"], os.path.join(basesystem_path, "arm64eBaseSystem.dmg")) self.flush_progress() self.systemversion_path = os.path.join(cs, "SystemVersion.plist") p_progress("Wrapping up...") logging.info("Writing SystemVersion.plist") with open(self.systemversion_path, "wb") as fd: plistlib.dump(sysver, fd) self.copy_idata.append((self.systemversion_path, "SystemVersion.plist")) logging.info("Copying Finish Installation.app") shutil.copytree("step2/Finish Installation.app", os.path.join(self.osi.system, "Finish Installation.app")) logging.info("Writing step2.sh") step2_sh = open("step2/step2.sh").read().replace("##VGID##", self.osi.vgid) resources = os.path.join(self.osi.system, "Finish Installation.app/Contents/Resources") step2_sh_dst = os.path.join(resources, "step2.sh") with open(step2_sh_dst, "w") as fd: fd.write(step2_sh) os.chmod(step2_sh_dst, 0o755) self.step2_sh = step2_sh_dst self.boot_obj_path = os.path.join(resources, "boot.bin") logging.info("Copying .IAPhysicalMedia") shutil.copy("step2/IAPhysicalMedia.plist", os.path.join(self.osi.system, ".IAPhysicalMedia")) print() p_success("Stub OS installation complete.") logging.info("Stub OS installed") print() def collect_firmware(self, pkg): p_progress("Collecting firmware...") logging.info("StubInstaller.collect_firmware()") img = os.path.join(self.osi.recovery, self.osi.vgid, "usr/standalone/firmware/arm64eBaseSystem.dmg") logging.info("Attaching recovery ramdisk") subprocess.run(["hdiutil", "attach", "-quiet", "-readonly", "-mountpoint", "recovery", img], check=True) logging.info("Collecting WiFi firmware") col = firmware.wifi.WiFiFWCollection("recovery/usr/share/firmware/wifi/") pkg.add_files(sorted(col.files())) logging.info("Making fallback firmware archive") subprocess.run(["tar", "czf", "all_firmware.tar.gz", "-C", "recovery/usr/share", "firmware"], check=True) self.copy_idata.append(("all_firmware.tar.gz", "all_firmware.tar.gz")) logging.info("Detaching recovery ramdisk") subprocess.run(["hdiutil", "detach", "-quiet", "recovery"]) def collect_installer_data(self, path): p_progress("Collecting installer data...") logging.info(f"Copying installer data to {path}") for src, name in self.copy_idata: shutil.copy(src, os.path.join(path, name)) with open(os.path.join(path, "stub_info.json"), "w") as fd: json.dump(self.stub_info, fd)
[]
[]
[ "IPSW_BASE" ]
[]
["IPSW_BASE"]
python
1
0
tests/in_progress/v2t_cal/test_v2t_cal.py
# general imports import os import pytest from pathlib import Path # DragonPHY imports from dragonphy import * THIS_DIR = Path(__file__).parent.resolve() BUILD_DIR = THIS_DIR / 'build' if 'FPGA_SERVER' in os.environ: SIMULATOR = 'vivado' else: SIMULATOR = 'ncsim' @pytest.mark.wip def test_sim(): deps = get_deps_cpu_sim_new(impl_file=THIS_DIR / 'test.sv') print(deps) DragonTester( ext_srcs=deps, directory=BUILD_DIR, top_module='test', inc_dirs=[get_mlingua_dir() / 'samples', get_dir('inc/new_cpu')], defines={'DAVE_TIMEUNIT': '1fs', 'NCVLOG': None}, simulator=SIMULATOR ).run() if __name__ == "__main__": test_sim()
[]
[]
[]
[]
[]
python
0
0
client/cmd/swarming/spawn_tasks.go
// Copyright 2018 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "encoding/json" "io" "log" "os" "sync" "time" "github.com/maruel/subcommands" "go.chromium.org/luci/auth" "go.chromium.org/luci/common/api/swarming/swarming/v1" "go.chromium.org/luci/common/data/text/units" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/sync/parallel" "go.chromium.org/luci/common/system/signals" ) func cmdSpawnTasks(defaultAuthOpts auth.Options) *subcommands.Command { return &subcommands.Command{ UsageLine: "spawn-tasks <options>", ShortDesc: "Spawns a set of Swarming tasks", LongDesc: "Spawns a set of Swarming tasks given a JSON file.", CommandRun: func() subcommands.CommandRun { r := &spawnTasksRun{} r.Init(defaultAuthOpts) return r }, } } type spawnTasksRun struct { commonFlags jsonInput string jsonOutput string cancelExtraTasks bool } func (c *spawnTasksRun) Init(defaultAuthOpts auth.Options) { c.commonFlags.Init(defaultAuthOpts) c.Flags.StringVar(&c.jsonInput, "json-input", "", "(required) Read Swarming task requests from this file.") c.Flags.StringVar(&c.jsonOutput, "json-output", "", "Write details about the triggered task(s) to this file as json.") // TODO(https://crbug.com/997221): Remove this option. c.Flags.BoolVar(&c.cancelExtraTasks, "cancel-extra-tasks", false, "Cancel extra spawned tasks.") } func (c *spawnTasksRun) Parse(args []string) error { if err := c.commonFlags.Parse(); err != nil { return err } if c.jsonInput == "" { return errors.Reason("input JSON file is required").Err() } return nil } func (c *spawnTasksRun) Run(a subcommands.Application, args []string, env subcommands.Env) int { if err := c.Parse(args); err != nil { printError(a, err) return 1 } cl, err := c.defaultFlags.StartTracing() if err != nil { printError(a, err) return 1 } defer cl.Close() if err := c.main(a, args, env); err != nil { printError(a, err) return 1 } return 0 } func (c *spawnTasksRun) main(a subcommands.Application, args []string, env subcommands.Env) error { start := time.Now() ctx, cancel := context.WithCancel(c.defaultFlags.MakeLoggingContext(os.Stderr)) signals.HandleInterrupt(cancel) tasksFile, err := os.Open(c.jsonInput) if err != nil { return errors.Annotate(err, "failed to open tasks file").Err() } defer tasksFile.Close() requests, err := processTasksStream(tasksFile) if err != nil { return err } invocationTag, err := addInvocationUUIDTags(requests...) if err != nil { return errors.Annotate(err, "failed to add InvocationUUID tags to requests").Err() } _, err = addRPCUUIDTags(requests...) if err != nil { return errors.Annotate(err, "failed to add RPCUUID tags to requests").Err() } service, err := c.createSwarmingClient(ctx) if err != nil { return err } createStart := float64(time.Now().Unix()) results, merr := createNewTasks(ctx, service, requests) if merr == nil && c.cancelExtraTasks { if err = cancelExtraTasks(ctx, service, createStart, invocationTag, results); err != nil { return errors.Annotate(err, "failed to cancel extra tasks for invocation %s", invocationTag).Err() } } var output io.Writer if c.jsonOutput != "" { file, err := os.Create(c.jsonOutput) if err != nil { return err } defer file.Close() output = file } else { output = os.Stdout } data := triggerResults{Tasks: results} b, err := json.MarshalIndent(&data, "", " ") if err != nil { return errors.Annotate(err, "marshalling trigger result").Err() } if _, err = output.Write(b); err != nil { return errors.Annotate(err, "writing json output").Err() } log.Printf("Duration: %s\n", units.Round(time.Since(start), time.Millisecond)) return merr } type tasksInput struct { Requests []*swarming.SwarmingRpcsNewTaskRequest `json:"requests"` } func processTasksStream(tasks io.Reader) ([]*swarming.SwarmingRpcsNewTaskRequest, error) { dec := json.NewDecoder(tasks) dec.DisallowUnknownFields() requests := tasksInput{} if err := dec.Decode(&requests); err != nil { return nil, errors.Annotate(err, "decoding tasks file").Err() } // Populate the tasks with information about the current envirornment // if they're not already set. currentUser := os.Getenv("USER") parentTaskID := os.Getenv("SWARMING_TASK_ID") for _, request := range requests.Requests { if request.User == "" { request.User = currentUser } if request.ParentTaskId == "" { request.ParentTaskId = parentTaskID } } return requests.Requests, nil } func createNewTasks(c context.Context, service swarmingService, requests []*swarming.SwarmingRpcsNewTaskRequest) ([]*swarming.SwarmingRpcsTaskRequestMetadata, error) { var mu sync.Mutex results := make([]*swarming.SwarmingRpcsTaskRequestMetadata, 0, len(requests)) err := parallel.WorkPool(8, func(gen chan<- func() error) { for _, request := range requests { request := request gen <- func() error { result, err := service.NewTask(c, request) if err != nil { return err } mu.Lock() defer mu.Unlock() results = append(results, result) return nil } } }) return results, err }
[ "\"USER\"", "\"SWARMING_TASK_ID\"" ]
[]
[ "SWARMING_TASK_ID", "USER" ]
[]
["SWARMING_TASK_ID", "USER"]
go
2
0
sdk/communication/azure-communication-phonenumbers/samples/list_purchased_phone_numbers_sample_async.py
# coding: utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ FILE: list_purchased_phone_numbers_async_sample.py DESCRIPTION: This sample demonstrates how to get all of you acquired phone numbers using your connection string USAGE: python list_purchased_phone_numbers_sample.py Set the environment variables with your own values before running the sample: 1) COMMUNICATION_SAMPLES_CONNECTION_STRING - The connection string including your endpoint and access key of your Azure Communication Service """ import asyncio import os from azure.communication.phonenumbers.aio import ( PhoneNumbersClient ) connection_str = os.getenv('COMMUNICATION_SAMPLES_CONNECTION_STRING') phone_numbers_client = PhoneNumbersClient.from_connection_string(connection_str) async def list_purchased_phone_numbers(): async with phone_numbers_client: purchased_phone_numbers = phone_numbers_client.list_purchased_phone_numbers() print("Purchased Phone Numbers:") async for item in purchased_phone_numbers: print(item.phone_number) if __name__ == '__main__': asyncio.run(list_purchased_phone_numbers())
[]
[]
[ "COMMUNICATION_SAMPLES_CONNECTION_STRING" ]
[]
["COMMUNICATION_SAMPLES_CONNECTION_STRING"]
python
1
0
docs/sphinx/conf.py
# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # type: ignore import os from pkg_resources import parse_version try: from cluplus import __version__ except ModuleNotFoundError: from sdsstools import get_package_version __version__ = get_package_version(__file__, 'sdss-cluplus') or 'dev' # Are we building in RTD? on_rtd = os.environ.get('READTHEDOCS') == 'True' # Sphinx template selected in cookiecutter and whether to use releases sphinx_template = 'sphinx-bootstrap' use_releases = 'no' if sphinx_template == 'sphinx-bootstrap': import sphinx_bootstrap_theme # Importing matplotlib here with agg to prevent tkinter error in readthedocs # import matplotlib # matplotlib.use('agg') # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # source_parsers = { # '.md': 'recommonmark.parser.CommonMarkParser', # } # The master toctree document. master_doc = 'index' # General information about the project. project = 'cluplus' copyright = '{0}, {1}'.format('2021', 'Florian Briegel') author = 'Florian Briegel' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The short X.Y version. version = parse_version(__version__).base_version # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = 'py:obj' # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # Intersphinx mappings intersphinx_mapping = {'python': ('https://docs.python.org/', None), 'astropy': ('http://docs.astropy.org/en/latest', None), 'numpy': ('http://docs.scipy.org/doc/numpy/', None)} autodoc_mock_imports = ['_tkinter'] autodoc_member_order = 'groupwise' napoleon_use_rtype = False napoleon_use_ivar = True rst_epilog = f""" .. |numpy_array| replace:: Numpy array .. |HDUList| replace:: :class:`~astropy.io.fits.HDUList` .. |cluplus_version| replace:: {__version__} """ # -- Options for HTML output ---------------------------------------------- html_css_files = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if sphinx_template == 'sphinx-bootstrap': html_theme = 'bootstrap' html_sidebars = {} # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { # Navigation bar title. (Default: ``project`` value) 'navbar_title': "SDSS: {0}".format(project), # Tab name for entire site. (Default: "Site") 'navbar_site_name': "Site", # A list of tuples containing pages or urls to link to. # Valid tuples should be in the following forms: # (name, page) # a link to a page # (name, "/aa/bb", 1) # a link to an arbitrary relative url # (name, "http://example.com", True) # arbitrary absolute url # Note the "1" or "True" value above as the third argument to indicate # an arbitrary url. 'navbar_links': [ ], # Render the next and previous page links in navbar. (Default: true) 'navbar_sidebarrel': False, # Render the current pages TOC in the navbar. (Default: true) 'navbar_pagenav': False, # Tab name for the current pages TOC. (Default: "Page") 'navbar_pagenav_name': "Page", # Global TOC depth for "site" navbar tab. (Default: 1) # Switching to -1 shows all levels. 'globaltoc_depth': 2, # Include hidden TOCs in Site navbar? # # Note: If this is "false", you cannot have mixed ``:hidden:`` and # non-hidden ``toctree`` directives in the same page, or else the build # will break. # # Values: "true" (default) or "false" 'globaltoc_includehidden': "true", # HTML navbar class (Default: "navbar") to attach to <div> element. # For black navbar, do "navbar navbar-inverse" 'navbar_class': "navbar", # Fix navigation bar to top of page? # Values: "true" (default) or "false" 'navbar_fixed_top': "true", # Location of link to source. # Options are "nav" (default), "footer" or anything else to exclude. 'source_link_position': "", # Bootswatch (http://bootswatch.com/) theme. # # Options are nothing (default) or the name of a valid theme # such as "amelia" or "cosmo". 'bootswatch_theme': "paper", # Choose Bootstrap version. # Values: "3" (default) or "2" (in quotes) 'bootstrap_version': "3", } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() html_logo = '_static/sdssv_logo_small.png' html_css_files += ["custom_bootstrap.css"] html_sidebars = {'**': ['localtoc.html']} elif sphinx_template == 'alabaster': html_theme = 'alabaster' html_theme_options = { 'logo': 'sdssv_logo.png', 'github_user': 'sdss', 'github_repo': project, 'github_button': True, 'github_type': 'star', 'sidebar_collapse': True, 'page_width': '80%' } html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', ] } html_css_files += ["custom.css"] html_favicon = './_static/favicon_sdssv.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this if on_rtd: html_static_path = [] else: html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = '{0}pdoc'.format('cluplus') # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project), author, 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'cluplus', u'{0} Documentation'.format(project), [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, project, u'{0} Documentation'.format(project), author, project, 'One line description of project.', 'Miscellaneous'), ] if use_releases == 'yes': extensions += ['sdsstools.releases'] releases_github_path = 'wasndas/cluplus' releases_document_name = ['CHANGELOG'] releases_unstable_prehistory = True
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
lib/kb_SPAdes/kb_SPAdesServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import os import random as _random import sys import traceback from getopt import getopt, GetoptError from multiprocessing import Process from os import environ from wsgiref.simple_server import make_server import requests as _requests from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from biokbase import log from kb_SPAdes.authclient import KBaseAuth as _KBaseAuth try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'kb_SPAdes'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from kb_SPAdes.kb_SPAdesImpl import kb_SPAdes # noqa @IgnorePep8 impl_kb_SPAdes = kb_SPAdes(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if len(e.args) == 1: newerr.data = repr(e.args[0]) else: newerr.data = repr(e.args) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if 'types' in self.method_data[request['method']]: self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'kb_SPAdes' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_kb_SPAdes.run_SPAdes, name='kb_SPAdes.run_SPAdes', types=[dict]) self.method_authentication['kb_SPAdes.run_SPAdes'] = 'required' # noqa self.rpc_service.add(impl_kb_SPAdes.run_metaSPAdes, name='kb_SPAdes.run_metaSPAdes', types=[dict]) self.method_authentication['kb_SPAdes.run_metaSPAdes'] = 'required' # noqa self.rpc_service.add(impl_kb_SPAdes.status, name='kb_SPAdes.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'kb_SPAdes ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception as e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print('Request method was %s\n' % environ['REQUEST_METHOD']) # print('Environment dictionary is:\n%s\n' % pprint.pformat(environ)) # print('Request body was: %s' % request_body) # print('Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result)) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body.encode('utf8')] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print("Monkeypatching std libraries for async") from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print("Listening on port %s" % port) if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print("Host set to %s" % host) else: assert False, "unhandled option" start_server(host=host, port=port) # print("Listening on port %s" % port) # httpd = make_server( host, port, application) # # httpd.serve_forever()
[]
[]
[ "SDK_CALLBACK_URL" ]
[]
["SDK_CALLBACK_URL"]
python
1
0
test/k8s-integration/cluster.go
package main import ( "encoding/json" "errors" "fmt" "os" "os/exec" "path/filepath" "strconv" "strings" apimachineryversion "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" ) func gkeLocationArgs(gceZone, gceRegion string) (locationArg, locationVal string, err error) { switch { case len(gceZone) > 0: locationArg = "--zone" locationVal = gceZone case len(gceRegion) > 0: locationArg = "--region" locationVal = gceRegion default: return "", "", fmt.Errorf("zone and region unspecified") } return } func isRegionalGKECluster(gceZone, gceRegion string) bool { return len(gceRegion) > 0 } func clusterDownGCE(k8sDir string) error { cmd := exec.Command(filepath.Join(k8sDir, "hack", "e2e-internal", "e2e-down.sh")) cmd.Env = os.Environ() err := runCommand("Bringing Down E2E Cluster on GCE", cmd) if err != nil { return fmt.Errorf("failed to bring down kubernetes e2e cluster on gce: %v", err) } return nil } func clusterDownGKE(gceZone, gceRegion string) error { locationArg, locationVal, err := gkeLocationArgs(gceZone, gceRegion) if err != nil { return err } cmd := exec.Command("gcloud", "container", "clusters", "delete", *gkeTestClusterName, locationArg, locationVal, "--quiet") err = runCommand("Bringing Down E2E Cluster on GKE", cmd) if err != nil { return fmt.Errorf("failed to bring down kubernetes e2e cluster on gke: %v", err) } return nil } func buildKubernetes(k8sDir, command string) error { cmd := exec.Command("make", "-C", k8sDir, command) cmd.Env = os.Environ() err := runCommand(fmt.Sprintf("Running command in kubernetes/kubernetes path=%s", k8sDir), cmd) if err != nil { return fmt.Errorf("failed to build Kubernetes: %v", err) } return nil } func clusterUpGCE(k8sDir, gceZone string, numNodes int, numWindowsNodes int, imageType string) error { kshPath := filepath.Join(k8sDir, "cluster", "kubectl.sh") _, err := os.Stat(kshPath) if err == nil { // Set kubectl to the one bundled in the k8s tar for versioning err = os.Setenv("GCE_PD_KUBECTL", kshPath) if err != nil { return fmt.Errorf("failed to set cluster specific kubectl: %v", err) } } else { klog.Errorf("could not find cluster kubectl at %s, falling back to default kubectl", kshPath) } if len(*kubeFeatureGates) != 0 { err = os.Setenv("KUBE_FEATURE_GATES", *kubeFeatureGates) if err != nil { return fmt.Errorf("failed to set kubernetes feature gates: %v", err) } klog.V(4).Infof("Set Kubernetes feature gates: %v", *kubeFeatureGates) } err = setImageTypeEnvs(imageType) if err != nil { return fmt.Errorf("failed to set image type environment variables: %v", err) } err = os.Setenv("NUM_NODES", strconv.Itoa(numNodes)) if err != nil { return err } // the chain is NUM_WINDOWS_NODES -> --num-windows-nodes -> NUM_WINDOWS_NODES // runCommand runs e2e-up.sh inheriting env vars so the `--num-windows-nodes` // flags might not be needed, added to be similar to the setup of NUM_NODES err = os.Setenv("NUM_WINDOWS_NODES", strconv.Itoa(numWindowsNodes)) if err != nil { return err } // The default master size with few nodes is too small; the tests must hit the API server // more than usual. The main issue seems to be memory, to reduce GC times that stall the // api server. For defaults, get-master-size in k/k/cluster/gce/config-common.sh. if numNodes < 20 { err = os.Setenv("MASTER_SIZE", "n1-standard-4") if err != nil { return err } } err = os.Setenv("KUBE_GCE_ZONE", gceZone) if err != nil { return err } cmd := exec.Command(filepath.Join(k8sDir, "hack", "e2e-internal", "e2e-up.sh")) cmd.Env = os.Environ() err = runCommand("Starting E2E Cluster on GCE", cmd) if err != nil { return fmt.Errorf("failed to bring up kubernetes e2e cluster on gce: %v", err) } return nil } func setImageTypeEnvs(imageType string) error { switch strings.ToLower(imageType) { case "cos": case "cos_containerd": case "gci": // GCI/COS is default type and does not need env vars set case "ubuntu", "ubuntu_containerd": return errors.New("setting environment vars for bringing up *ubuntu* cluster on GCE is unimplemented") /* TODO(dyzz) figure out how to bring up a Ubuntu cluster on GCE. The below doesn't work. err := os.Setenv("KUBE_OS_DISTRIBUTION", "ubuntu") if err != nil { return err } err = os.Setenv("KUBE_GCE_NODE_IMAGE", image) if err != nil { return err } err = os.Setenv("KUBE_GCE_NODE_PROJECT", imageProject) if err != nil { return err } */ default: return fmt.Errorf("could not set env for image type %s, only gci, cos, ubuntu supported", imageType) } return nil } func clusterUpGKE(gceZone, gceRegion string, numNodes int, numWindowsNodes int, imageType string, useManagedDriver bool) error { locationArg, locationVal, err := gkeLocationArgs(gceZone, gceRegion) if err != nil { return err } out, err := exec.Command("gcloud", "container", "clusters", "list", locationArg, locationVal, "--verbosity", "none", "--filter", fmt.Sprintf("name=%s", *gkeTestClusterName)).CombinedOutput() if err != nil { return fmt.Errorf("failed to check for previous test cluster: %v %s", err, out) } if len(out) > 0 { klog.Infof("Detected previous cluster %s. Deleting so a new one can be created...", *gkeTestClusterName) err = clusterDownGKE(gceZone, gceRegion) if err != nil { return err } } var cmd *exec.Cmd cmdParams := []string{"container", "clusters", "create", *gkeTestClusterName, locationArg, locationVal, "--num-nodes", strconv.Itoa(numNodes), "--quiet", "--machine-type", "n1-standard-2", "--image-type", imageType} if isVariableSet(gkeClusterVer) { cmdParams = append(cmdParams, "--cluster-version", *gkeClusterVer) } else { cmdParams = append(cmdParams, "--release-channel", *gkeReleaseChannel) // release channel based GKE clusters require autorepair to be enabled. cmdParams = append(cmdParams, "--enable-autorepair") } if isVariableSet(gkeNodeVersion) { cmdParams = append(cmdParams, "--node-version", *gkeNodeVersion) } if useManagedDriver { cmdParams = append(cmdParams, "--addons", "GcePersistentDiskCsiDriver") } cmd = exec.Command("gcloud", cmdParams...) err = runCommand("Starting E2E Cluster on GKE", cmd) if err != nil { return fmt.Errorf("failed to bring up kubernetes e2e cluster on gke: %v", err) } // Because gcloud cannot disable addons on cluster create, the deployment has // to be disabled on update. clusterVersion := mustGetKubeClusterVersion() if !useManagedDriver && isGKEDeploymentInstalledByDefault(clusterVersion) { cmd = exec.Command( "gcloud", "beta", "container", "clusters", "update", *gkeTestClusterName, locationArg, locationVal, "--quiet", "--update-addons", "GcePersistentDiskCsiDriver=DISABLED") err = runCommand("Updating E2E Cluster on GKE to disable driver deployment", cmd) if err != nil { return fmt.Errorf("failed to update kubernetes e2e cluster on gke: %v", err) } } return nil } func downloadKubernetesSource(pkgDir, k8sIoDir, kubeVersion string) error { k8sDir := filepath.Join(k8sIoDir, "kubernetes") klog.Infof("Downloading Kubernetes source v=%s to path=%s", kubeVersion, k8sIoDir) if err := os.MkdirAll(k8sIoDir, 0777); err != nil { return err } if err := os.RemoveAll(k8sDir); err != nil { return err } // We clone rather than download from release archives, because the file naming has not been // stable. For example, in late 2021 it appears that archives of minor versions (eg v1.21.tgz) // stopped and was replaced with just patch version. if kubeVersion == "master" { // Clone of master. We cannot use a shallow clone, because the k8s version is not set, and // in order to find the revision git searches through the tags, and tags are not fetched in // a shallow clone. Not using a shallow clone adds about 700M to the ~5G archive directory, // after make quick-release, so this is not disastrous. klog.Info("cloning k8s master") out, err := exec.Command("git", "clone", "https://github.com/kubernetes/kubernetes", k8sDir).CombinedOutput() if err != nil { return fmt.Errorf("failed to clone kubernetes master: %s, err: %v", out, err) } } else { // Shallow clone of a release branch. vKubeVersion := "v" + kubeVersion klog.Infof("shallow clone of k8s %s", vKubeVersion) out, err := exec.Command("git", "clone", "--depth", "1", "https://github.com/kubernetes/kubernetes", k8sDir).CombinedOutput() if err != nil { return fmt.Errorf("failed to clone kubernetes %s: %s, err: %v", vKubeVersion, out, err) } } return nil } func getGKEKubeTestArgs(gceZone, gceRegion, imageType string, useKubetest2 bool) ([]string, error) { var locationArg, locationVal, locationArgK2 string switch { case len(gceZone) > 0: locationArg = "--gcp-zone" locationArgK2 = "--zone" locationVal = gceZone case len(gceRegion) > 0: locationArg = "--gcp-region" locationArgK2 = "--region" locationVal = gceRegion } var gkeEnv string switch gkeURL := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER"); gkeURL { case "https://staging-container.sandbox.googleapis.com/": gkeEnv = "staging" case "https://test-container.sandbox.googleapis.com/": gkeEnv = "test" case "": gkeEnv = "prod" default: // if the URL does not match to an option, assume it is a custom GKE backend // URL and pass that to kubetest gkeEnv = gkeURL } cmd := exec.Command("gcloud", "config", "get-value", "project") project, err := cmd.Output() if err != nil { return nil, fmt.Errorf("failed to get current project: %v", err) } // kubetest arguments args := []string{ "--up=false", "--down=false", "--provider=gke", "--gcp-network=default", "--check-version-skew=false", "--deployment=gke", fmt.Sprintf("--gcp-node-image=%s", imageType), "--gcp-network=default", fmt.Sprintf("--cluster=%s", *gkeTestClusterName), fmt.Sprintf("--gke-environment=%s", gkeEnv), fmt.Sprintf("%s=%s", locationArg, locationVal), fmt.Sprintf("--gcp-project=%s", project[:len(project)-1]), } // kubetest2 arguments argsK2 := []string{ "--up=false", "--down=false", fmt.Sprintf("--cluster-name=%s", *gkeTestClusterName), fmt.Sprintf("--environment=%s", gkeEnv), fmt.Sprintf("%s=%s", locationArgK2, locationVal), fmt.Sprintf("--project=%s", project[:len(project)-1]), } if useKubetest2 { return argsK2, nil } else { return args, nil } } func getNormalizedVersion(kubeVersion, gkeVersion string) (string, error) { if kubeVersion != "" && gkeVersion != "" { return "", fmt.Errorf("both kube version (%s) and gke version (%s) specified", kubeVersion, gkeVersion) } if kubeVersion == "" && gkeVersion == "" { return "", errors.New("neither kube version nor gke version specified") } var v string if kubeVersion != "" { v = kubeVersion } else if gkeVersion != "" { v = gkeVersion } if v == "master" || v == "latest" { // Ugh return v, nil } toks := strings.Split(v, ".") if len(toks) < 2 || len(toks) > 3 { return "", fmt.Errorf("got unexpected number of tokens in version string %s - wanted 2 or 3", v) } return strings.Join(toks[:2], "."), nil } func getKubeClusterVersion() (string, error) { out, err := exec.Command("kubectl", "version", "-o=json").Output() if err != nil { return "", fmt.Errorf("failed to obtain cluster version, error: %v; output was %s", err, out) } type version struct { ClientVersion *apimachineryversion.Info `json:"clientVersion,omitempty" yaml:"clientVersion,omitempty"` ServerVersion *apimachineryversion.Info `json:"serverVersion,omitempty" yaml:"serverVersion,omitempty"` } var v version err = json.Unmarshal(out, &v) if err != nil { return "", fmt.Errorf("Failed to parse kubectl version output, error: %v", err) } return v.ServerVersion.GitVersion, nil } func mustGetKubeClusterVersion() string { ver, err := getKubeClusterVersion() if err != nil { klog.Fatalf("Error: %v", err) } return ver } // getKubeConfig returns the full path to the // kubeconfig file set in $KUBECONFIG env. // If unset, then it defaults to $HOME/.kube/config func getKubeConfig() (string, error) { config, ok := os.LookupEnv("KUBECONFIG") if ok { return config, nil } homeDir, ok := os.LookupEnv("HOME") if !ok { return "", fmt.Errorf("HOME env not set") } return filepath.Join(homeDir, ".kube/config"), nil } // getKubeClient returns a Kubernetes client interface // for the test cluster func getKubeClient() (kubernetes.Interface, error) { kubeConfig, err := getKubeConfig() if err != nil { return nil, err } config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { return nil, fmt.Errorf("failed to create config: %v", err) } kubeClient, err := kubernetes.NewForConfig(config) if err != nil { return nil, fmt.Errorf("failed to create client: %v", err) } return kubeClient, nil } func isGKEDeploymentInstalledByDefault(clusterVersion string) bool { cv := mustParseVersion(clusterVersion) return cv.atLeast(mustParseVersion("1.18.10-gke.2101")) && cv.lessThan(mustParseVersion("1.19.0")) || cv.atLeast(mustParseVersion("1.19.3-gke.2100")) }
[ "\"CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER\"" ]
[]
[ "CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER" ]
[]
["CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER"]
go
1
0
setup.py
from setuptools import setup __version__ = '0.0.1' __author__ = 'felix.scriptworld' requirements = [ 'selenium', 'typing', 'schedule' ] description = 'Travian Kingdoms automation bot.' setup( name='king-bot', version=__version__, author=__author__, author_email='[email protected]', url='https://github.com/scriptworld-git/king-bot', description=description, install_requires=requirements, include_package_data=True, )
[]
[]
[]
[]
[]
python
null
null
null
pulp/apis/gurobi_api.py
# PuLP : Python LP Modeler # Version 1.4.2 # Copyright (c) 2002-2005, Jean-Sebastien Roy ([email protected]) # Modifications Copyright (c) 2007- Stuart Anthony Mitchell ([email protected]) # $Id:solvers.py 1791 2008-04-23 22:54:34Z smit023 $ # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" from .core import LpSolver_CMD, LpSolver, subprocess, PulpSolverError, clock, log from .core import gurobi_path import os from uuid import uuid4 import sys from .. import constants import warnings # to import the gurobipy name into the module scope gurobipy = None class GUROBI(LpSolver): """ The Gurobi LP/MIP solver (via its python interface) The Gurobi variables are available (after a solve) in var.solverVar Constriaints in constraint.solverConstraint and the Model is in prob.solverModel """ try: sys.path.append(gurobi_path) # to import the name into the module scope global gurobipy import gurobipy except: # FIXME: Bug because gurobi returns #a gurobi exception on failed imports def available(self): """True if the solver is available""" return False def actualSolve(self, lp, callback = None): """Solve a well formulated lp problem""" raise PulpSolverError("GUROBI: Not Available") else: def __init__(self, mip = True, msg = True, timeLimit = None, epgap = None, **solverParams): """ Initializes the Gurobi solver. @param mip: if False the solver will solve a MIP as an LP @param msg: displays information from the solver to stdout @param timeLimit: sets the maximum time for solution @param epgap: sets the integer bound gap """ LpSolver.__init__(self, mip, msg) self.timeLimit = timeLimit self.epgap = epgap #set the output of gurobi if not self.msg: gurobipy.setParam("OutputFlag", 0) #set the gurobi parameter values for key,value in solverParams.items(): gurobipy.setParam(key, value) def findSolutionValues(self, lp): model = lp.solverModel solutionStatus = model.Status GRB = gurobipy.GRB # TODO: check status for Integer Feasible gurobiLpStatus = {GRB.OPTIMAL: constants.LpStatusOptimal, GRB.INFEASIBLE: constants.LpStatusInfeasible, GRB.INF_OR_UNBD: constants.LpStatusInfeasible, GRB.UNBOUNDED: constants.LpStatusUnbounded, GRB.ITERATION_LIMIT: constants.LpStatusNotSolved, GRB.NODE_LIMIT: constants.LpStatusNotSolved, GRB.TIME_LIMIT: constants.LpStatusNotSolved, GRB.SOLUTION_LIMIT: constants.LpStatusNotSolved, GRB.INTERRUPTED: constants.LpStatusNotSolved, GRB.NUMERIC: constants.LpStatusNotSolved, } #populate pulp solution values try: for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.X, model.getVars())): var.varValue = value except (gurobipy.GurobiError, AttributeError): pass try: for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.RC, model.getVars())): var.dj = value except (gurobipy.GurobiError, AttributeError): pass #put pi and slack variables against the constraints try: for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Pi, model.getConstrs())): constr.pi = value except (gurobipy.GurobiError, AttributeError): pass try: for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Slack, model.getConstrs())): constr.slack = value except (gurobipy.GurobiError, AttributeError): pass if self.msg: print("Gurobi status=", solutionStatus) lp.resolveOK = True for var in lp.variables(): var.isModified = False status = gurobiLpStatus.get(solutionStatus, constants.LpStatusUndefined) lp.assignStatus(status) return status def available(self): """True if the solver is available""" return True def callSolver(self, lp, callback = None): """Solves the problem with gurobi """ #solve the problem self.solveTime = -clock() lp.solverModel.optimize(callback = callback) self.solveTime += clock() def buildSolverModel(self, lp): """ Takes the pulp lp model and translates it into a gurobi model """ log.debug("create the gurobi model") lp.solverModel = gurobipy.Model(lp.name) log.debug("set the sense of the problem") if lp.sense == constants.LpMaximize: lp.solverModel.setAttr("ModelSense", -1) if self.timeLimit: lp.solverModel.setParam("TimeLimit", self.timeLimit) if self.epgap: lp.solverModel.setParam("MIPGap", self.epgap) log.debug("add the variables to the problem") for var in lp.variables(): lowBound = var.lowBound if lowBound is None: lowBound = -gurobipy.GRB.INFINITY upBound = var.upBound if upBound is None: upBound = gurobipy.GRB.INFINITY obj = lp.objective.get(var, 0.0) varType = gurobipy.GRB.CONTINUOUS if var.cat == constants.LpInteger and self.mip: varType = gurobipy.GRB.INTEGER var.solverVar = lp.solverModel.addVar(lowBound, upBound, vtype = varType, obj = obj, name = var.name) lp.solverModel.update() log.debug("add the Constraints to the problem") for name,constraint in lp.constraints.items(): #build the expression expr = gurobipy.LinExpr(list(constraint.values()), [v.solverVar for v in constraint.keys()]) if constraint.sense == constants.LpConstraintLE: relation = gurobipy.GRB.LESS_EQUAL elif constraint.sense == constants.LpConstraintGE: relation = gurobipy.GRB.GREATER_EQUAL elif constraint.sense == constants.LpConstraintEQ: relation = gurobipy.GRB.EQUAL else: raise PulpSolverError('Detected an invalid constraint type') constraint.solverConstraint = lp.solverModel.addConstr(expr, relation, -constraint.constant, name) lp.solverModel.update() def actualSolve(self, lp, callback = None): """ Solve a well formulated lp problem creates a gurobi model, variables and constraints and attaches them to the lp model which it then solves """ self.buildSolverModel(lp) #set the initial solution log.debug("Solve the Model using gurobi") self.callSolver(lp, callback = callback) #get the solution information solutionStatus = self.findSolutionValues(lp) for var in lp.variables(): var.modified = False for constraint in lp.constraints.values(): constraint.modified = False return solutionStatus def actualResolve(self, lp, callback = None): """ Solve a well formulated lp problem uses the old solver and modifies the rhs of the modified constraints """ log.debug("Resolve the Model using gurobi") for constraint in lp.constraints.values(): if constraint.modified: constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS, -constraint.constant) lp.solverModel.update() self.callSolver(lp, callback = callback) #get the solution information solutionStatus = self.findSolutionValues(lp) for var in lp.variables(): var.modified = False for constraint in lp.constraints.values(): constraint.modified = False return solutionStatus class GUROBI_CMD(LpSolver_CMD): """The GUROBI_CMD solver""" def defaultPath(self): return self.executableExtension("gurobi_cl") def available(self): """True if the solver is available""" return self.executable(self.path) def actualSolve(self, lp): """Solve a well formulated lp problem""" # TODO: workaround for python not reading LD_LIBRARY_PATH # in my version of ubuntu if 'GUROBI_HOME' in os.environ: if 'LD_LIBRARY_PATH' not in os.environ: os.environ['LD_LIBRARY_PATH'] = "" os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['GUROBI_HOME'] + "/lib" if not self.executable(self.path): raise PulpSolverError("PuLP: cannot execute "+self.path) if not self.keepFiles: uuid = uuid4().hex tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid) tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid) tmpMst = os.path.join(self.tmpDir, "%s-pulp.mst" % uuid) else: tmpLp = lp.name+"-pulp.lp" tmpSol = lp.name+"-pulp.sol" tmpMst = lp.name + "-pulp.mst" vs = lp.writeLP(tmpLp, writeSOS = 1) try: os.remove(tmpSol) except: pass cmd = self.path cmd += ' ' + ' '.join(['%s=%s' % (key, value) for key, value in self.options]) cmd += ' ResultFile=%s' % tmpSol if self.mip_start: self.writesol(filename=tmpMst, vs=vs) cmd += ' InputFile=%s' % tmpMst if lp.isMIP(): if not self.mip: warnings.warn('GUROBI_CMD does not allow a problem to be relaxed') cmd += ' %s' % tmpLp if self.msg: pipe = None else: pipe = open(os.devnull, 'w') return_code = subprocess.call(cmd.split(), stdout = pipe, stderr = pipe) # Close the pipe now if we used it. if pipe is not None: pipe.close() if return_code != 0: raise PulpSolverError("PuLP: Error while trying to execute "+self.path) if not os.path.exists(tmpSol): warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions') status = constants.LpStatusNotSolved values = reducedCosts = shadowPrices = slacks = None else: status, values, reducedCosts, shadowPrices, slacks = self.readsol(tmpSol) if not self.keepFiles: for f in [tmpSol, tmpMst, tmpLp, "gurobi.log"]: try: os.remove(f) except: pass if status != constants.LpStatusInfeasible: lp.assignVarsVals(values) lp.assignVarsDj(reducedCosts) lp.assignConsPi(shadowPrices) lp.assignConsSlack(slacks) lp.assignStatus(status) return status def readsol(self, filename): """Read a Gurobi solution file""" with open(filename) as my_file: try: next(my_file) # skip the objective value except StopIteration: # Empty file not solved warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions') status = constants.LpStatusNotSolved return status, {}, {}, {}, {} #We have no idea what the status is assume optimal # TODO: check status for Integer Feasible status = constants.LpStatusOptimal shadowPrices = {} slacks = {} shadowPrices = {} slacks = {} values = {} reducedCosts = {} for line in my_file: if line[0] != '#': #skip comments name, value = line.split() values[name] = float(value) return status, values, reducedCosts, shadowPrices, slacks def writesol(self, filename, vs): """Writes a GUROBI solution file""" values = [(v.name, v.value()) for v in vs if v.value() is not None] rows = [] for name, value in values: rows.append('{} {}'.format(name, value)) with open(filename, 'w') as f: f.write('\n'.join(rows)) return True
[]
[]
[ "LD_LIBRARY_PATH", "GUROBI_HOME" ]
[]
["LD_LIBRARY_PATH", "GUROBI_HOME"]
python
2
0
pkg/exchange/max/stream.go
package max import ( "context" "os" "strconv" "time" "github.com/gorilla/websocket" max "github.com/c9s/bbgo/pkg/exchange/max/maxapi" "github.com/c9s/bbgo/pkg/fixedpoint" "github.com/c9s/bbgo/pkg/types" "github.com/c9s/bbgo/pkg/util" ) var logger = log.WithField("exchange", "max") type Stream struct { types.StandardStream websocketService *max.WebSocketService publicOnly bool } func NewStream(key, secret string) *Stream { url := os.Getenv("MAX_API_WS_URL") if url == "" { url = max.WebSocketURL } wss := max.NewWebSocketService(url, key, secret) stream := &Stream{ websocketService: wss, } wss.OnConnect(func(conn *websocket.Conn) { if key == "" || secret == "" { log.Warn("MAX API key or secret is empty, will not send authentication command") } else { if err := wss.Auth(); err != nil { wss.EmitError(err) logger.WithError(err).Error("failed to send auth request") } } }) wss.OnDisconnect(stream.EmitDisconnect) wss.OnMessage(func(message []byte) { logger.Debugf("M: %s", message) }) wss.OnKLineEvent(func(e max.KLineEvent) { kline := e.KLine.KLine() stream.EmitKLine(kline) if kline.Closed { stream.EmitKLineClosed(kline) } }) wss.OnOrderSnapshotEvent(func(e max.OrderSnapshotEvent) { for _, o := range e.Orders { globalOrder, err := toGlobalOrderUpdate(o) if err != nil { log.WithError(err).Error("websocket order snapshot convert error") continue } stream.EmitOrderUpdate(*globalOrder) } }) wss.OnOrderUpdateEvent(func(e max.OrderUpdateEvent) { for _, o := range e.Orders { globalOrder, err := toGlobalOrderUpdate(o) if err != nil { log.WithError(err).Error("websocket order update convert error") continue } stream.EmitOrderUpdate(*globalOrder) } }) wss.OnTradeUpdateEvent(func(e max.TradeUpdateEvent) { for _, tradeUpdate := range e.Trades { trade, err := convertWebSocketTrade(tradeUpdate) if err != nil { log.WithError(err).Error("websocket trade update convert error") return } stream.EmitTradeUpdate(*trade) } }) wss.OnBookEvent(func(e max.BookEvent) { newBook, err := e.OrderBook() if err != nil { logger.WithError(err).Error("book convert error") return } newBook.Symbol = toGlobalSymbol(e.Market) switch e.Event { case "snapshot": stream.EmitBookSnapshot(newBook) case "update": stream.EmitBookUpdate(newBook) } }) wss.OnConnect(func(conn *websocket.Conn) { stream.EmitConnect() }) wss.OnAccountSnapshotEvent(func(e max.AccountSnapshotEvent) { snapshot := map[string]types.Balance{} for _, bm := range e.Balances { balance, err := bm.Balance() if err != nil { continue } snapshot[toGlobalCurrency(balance.Currency)] = *balance } stream.EmitBalanceSnapshot(snapshot) }) wss.OnAccountUpdateEvent(func(e max.AccountUpdateEvent) { snapshot := map[string]types.Balance{} for _, bm := range e.Balances { balance, err := bm.Balance() if err != nil { continue } snapshot[toGlobalCurrency(balance.Currency)] = *balance } stream.EmitBalanceUpdate(snapshot) }) wss.OnError(func(err error) { log.WithError(err).Error("websocket error") }) return stream } func (s *Stream) SetPublicOnly() { s.publicOnly = true } func (s *Stream) Subscribe(channel types.Channel, symbol string, options types.SubscribeOptions) { opt := max.SubscribeOptions{} if len(options.Depth) > 0 { depth, err := strconv.Atoi(options.Depth) if err != nil { panic(err) } opt.Depth = depth } if len(options.Interval) > 0 { opt.Resolution = options.Interval } s.websocketService.Subscribe(string(channel), toLocalSymbol(symbol), opt) } func (s *Stream) Connect(ctx context.Context) error { err := s.websocketService.Connect(ctx) if err != nil { return err } s.EmitStart() return nil } func (s *Stream) Close() error { return s.websocketService.Close() } func convertWebSocketTrade(t max.TradeUpdate) (*types.Trade, error) { // skip trade ID that is the same. however this should not happen var side = toGlobalSideType(t.Side) // trade time mts := time.Unix(0, t.Timestamp*int64(time.Millisecond)) price, err := strconv.ParseFloat(t.Price, 64) if err != nil { return nil, err } quantity, err := strconv.ParseFloat(t.Volume, 64) if err != nil { return nil, err } quoteQuantity := price * quantity fee, err := strconv.ParseFloat(t.Fee, 64) if err != nil { return nil, err } return &types.Trade{ ID: int64(t.ID), OrderID: t.OrderID, Symbol: toGlobalSymbol(t.Market), Exchange: types.ExchangeMax, Price: price, Quantity: quantity, Side: side, IsBuyer: side == types.SideTypeBuy, IsMaker: t.Maker, Fee: fee, FeeCurrency: toGlobalCurrency(t.FeeCurrency), QuoteQuantity: quoteQuantity, Time: types.Time(mts), }, nil } func toGlobalOrderUpdate(u max.OrderUpdate) (*types.Order, error) { executedVolume, err := fixedpoint.NewFromString(u.ExecutedVolume) if err != nil { return nil, err } remainingVolume, err := fixedpoint.NewFromString(u.RemainingVolume) if err != nil { return nil, err } return &types.Order{ SubmitOrder: types.SubmitOrder{ ClientOrderID: u.ClientOID, Symbol: toGlobalSymbol(u.Market), Side: toGlobalSideType(u.Side), Type: toGlobalOrderType(u.OrderType), Quantity: util.MustParseFloat(u.Volume), Price: util.MustParseFloat(u.Price), StopPrice: util.MustParseFloat(u.StopPrice), TimeInForce: "GTC", // MAX only supports GTC GroupID: u.GroupID, }, Exchange: types.ExchangeMax, OrderID: u.ID, Status: toGlobalOrderStatus(u.State, executedVolume, remainingVolume), ExecutedQuantity: executedVolume.Float64(), CreationTime: types.Time(time.Unix(0, u.CreatedAtMs*int64(time.Millisecond))), }, nil }
[ "\"MAX_API_WS_URL\"" ]
[]
[ "MAX_API_WS_URL" ]
[]
["MAX_API_WS_URL"]
go
1
0
backend/config/ext_config.py
import os env = os.environ FTP_URL = env.get('FTP_URL') FTP_USER = env.get('FTP_USER') FTP_PW = env.get('FTP_PW') FORECAST_INTERVAL = 3 DATABASE_URL = env.get('DATABASE_URL') TEST_DATABASE_URL = env.get('TEST_DATABASE_URL') EOSM_LOGIN = env.get('EOSM_LOGIN') ELEVATION_SERVICE_URL = env.get('ELEVATION_SERVICE_URL')
[]
[]
[]
[]
[]
python
0
0
proxy/python2.7/proxy.py
# Copyright (C) 2017 MinLab Ltd. from __future__ import print_function import json import os import urllib2 import socket def simplify(obj): keys = getattr(obj,'__slots__',None) or getattr(obj,'__dict__',None) if keys == None: return obj return {key: simplify(getattr(obj, key)) for key in keys} def proxy(event, context, server=None, funcName=None): server = server or os.environ.get("MLESS_SERVER") if server == None: raise Exception("No server defined") funcName = funcName or os.environ.get("MLESS_FUNCNAME") env = dict(os.environ.items()) if funcName != None: env["AWS_LAMBDA_FUNCTION_NAME"] = funcName req = { "event": event, "context": simplify(context), "remaining": context.get_remaining_time_in_millis(), "env": env } print("sending request to " + server) try: f = urllib2.urlopen(urllib2.Request(server+"/invoke", json.dumps(req), {'Content-Type': 'application/json'})) except urllib2.HTTPError as e: content=e.read() raise StandardError("ServerError: %s: %s" % (e.code, content)) response = f.read() f.close() print("response:" +response) return response
[]
[]
[ "MLESS_FUNCNAME", "MLESS_SERVER" ]
[]
["MLESS_FUNCNAME", "MLESS_SERVER"]
python
2
0
Judger/src/main/java/tw/waterball/judgegirl/judger/tests/AbstractJudgerTest.java
package tw.waterball.judgegirl.judger.tests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import tw.waterball.judgegirl.commons.models.files.FileResource; import tw.waterball.judgegirl.judger.CCJudger; import tw.waterball.judgegirl.judger.DefaultCCJudgerFactory; import tw.waterball.judgegirl.plugins.impl.match.AllMatchPolicyPlugin; import tw.waterball.judgegirl.primitives.problem.*; import tw.waterball.judgegirl.primitives.submission.Submission; import tw.waterball.judgegirl.primitives.submission.events.VerdictIssuedEvent; import tw.waterball.judgegirl.primitives.submission.verdict.Judge; import tw.waterball.judgegirl.primitives.submission.verdict.ProgramProfile; import tw.waterball.judgegirl.primitives.submission.verdict.Verdict; import tw.waterball.judgegirl.problemapi.clients.ProblemServiceDriver; import tw.waterball.judgegirl.problemapi.views.ProblemView; import tw.waterball.judgegirl.submissionapi.clients.EventPublisher; import tw.waterball.judgegirl.submissionapi.clients.SubmissionServiceDriver; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import static java.io.File.createTempFile; import static java.lang.String.format; import static java.util.Collections.singletonList; import static java.util.Optional.of; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; import static tw.waterball.judgegirl.commons.utils.ZipUtils.zipDirectoryContents; import static tw.waterball.judgegirl.submissionapi.views.SubmissionView.toViewModel; /** * @author - [email protected] (Waterball) */ public abstract class AbstractJudgerTest { static String problemHomePath; static String providedCodesHomeFormat; static String testcaseIOsHomeFormat; static String submittedCodesHomeFormat; public static final Language CURRENTLY_ONLY_SUPPORT_C = Language.C; private final int studentId = 1234; private int problemId; private Problem problem; private Submission submission; private ProblemServiceDriver problemServiceDriver; private SubmissionServiceDriver submissionServiceDriver; private EventPublisher eventPublisher; private CCJudger judger; @BeforeAll protected static void beforeAll() { problemHomePath = System.getenv("JUDGER_TEST_PROBLEM_HOME"); if (problemHomePath == null) { throw new IllegalArgumentException("Require an env var: JUDGER_TEST_PROBLEM_HOME"); } providedCodesHomeFormat = problemHomePath + "/%s/providedCodes"; // (1: problem's id) testcaseIOsHomeFormat = problemHomePath + "/%s/testcases"; // (1: problem's id) submittedCodesHomeFormat = problemHomePath + "/%s/%s/submitted"; // (1: problem's id, 2: judge status) System.out.printf("Problem home: %s%n", problemHomePath); } @BeforeEach void setup() { problem = getProblem(); problem.setOutputMatchPolicyPluginTag(AllMatchPolicyPlugin.TAG); problem.getLanguageEnvs().values() .forEach(languageEnv -> languageEnv.setProvidedCodes(new ProvidedCodes("providedCodesFileId", singletonList("providedCodesFileName")))); problemId = problem.getId(); submission = new Submission(studentId, problem.getId(), CURRENTLY_ONLY_SUPPORT_C.toString(), "fileId"); problemServiceDriver = mock(ProblemServiceDriver.class); submissionServiceDriver = mock(SubmissionServiceDriver.class); eventPublisher = mock(EventPublisher.class); judger = DefaultCCJudgerFactory.create("test", "/judger-layout.yaml", problemServiceDriver, submissionServiceDriver, eventPublisher); } protected abstract Problem getProblem(); @Test void judge_AC() throws IOException { submission.setId("[" + problemId + "] Submission_AC"); if (submittedCodeExists(JudgeStatus.AC)) { mockServiceDrivers(JudgeStatus.AC); judger.judge(studentId, problemId, submission.getId()); verifyACPublished(); } } @Test void judge_CE() throws IOException { submission.setId("[" + problemId + "] Submission_CE"); if (submittedCodeExists(JudgeStatus.CE)) { mockServiceDrivers(JudgeStatus.CE); judger.judge(studentId, problemId, submission.getId()); verifyCEPublished(); } } @Test void judge_TLE() throws IOException { submission.setId("[" + problemId + "] Submission_TLE"); if (submittedCodeExists(JudgeStatus.TLE)) { mockServiceDrivers(JudgeStatus.TLE); judger.judge(studentId, problemId, submission.getId()); verifyTLEPublished(); } } @Test void judge_WA() throws IOException { submission.setId("[" + problemId + "] Submission_WA"); if (submittedCodeExists(JudgeStatus.WA)) { mockServiceDrivers(JudgeStatus.WA); judger.judge(studentId, problemId, submission.getId()); verifyWAPublished(); } } private void verifyACPublished() { VerdictIssuedEvent event = captureVerdictIssuedEvent(); Verdict verdict = event.getVerdict(); for (int i = 0; i < problem.numOfTestcases(); i++) { Judge judge = verdict.getJudges().get(i); Testcase testCase = problem.getTestcase(i); assertEquals(JudgeStatus.AC, judge.getStatus(), event.toString()); assertEquals(testCase.getGrade(), judge.getGrade()); assertEquals(testCase.getName(), judge.getTestcaseName()); ProgramProfile profile = judge.getProgramProfile(); assertEquals("", profile.getErrorMessage(), "Error message should be empty if AC"); assertTrue(profile.getRuntime() <= testCase.getTimeLimit()); assertTrue(profile.getMemoryUsage() <= testCase.getMemoryLimit()); } assertEquals(problemId, event.getProblemId()); assertEquals(problem.getTitle(), event.getProblemTitle()); assertEquals(submission.getId(), event.getSubmissionId()); assertNull(verdict.getErrorMessage(), "Compile error message should be null if the compile succeeded."); } private void verifyCEPublished() { VerdictIssuedEvent event = captureVerdictIssuedEvent(); Verdict verdict = event.getVerdict(); assertEquals(problemId, event.getProblemId()); assertEquals(problem.getTitle(), event.getProblemTitle()); assertEquals(submission.getId(), event.getSubmissionId()); assertTrue(verdict.isCompileError()); assertNotNull(verdict.getErrorMessage(), "Compile error message should not be null if the compile failed."); } private void verifyTLEPublished() { VerdictIssuedEvent event = captureVerdictIssuedEvent(); Verdict verdict = event.getVerdict(); for (int i = 0; i < problem.numOfTestcases(); i++) { Judge judge = verdict.getJudges().get(i); Testcase testCase = problem.getTestcase(i); assertEquals(JudgeStatus.TLE, judge.getStatus(), event.toString()); assertEquals(0, judge.getGrade()); assertEquals(testCase.getName(), judge.getTestcaseName()); ProgramProfile profile = judge.getProgramProfile(); assertTrue(profile.getRuntime() > testCase.getTimeLimit()); } assertEquals(problemId, event.getProblemId()); assertEquals(problem.getTitle(), event.getProblemTitle()); assertEquals(submission.getId(), event.getSubmissionId()); assertNull(verdict.getErrorMessage(), "Compile error message should be null if the compile succeeded."); } private void verifyWAPublished() { VerdictIssuedEvent event = captureVerdictIssuedEvent(); Verdict verdict = event.getVerdict(); for (int i = 0; i < problem.numOfTestcases(); i++) { Judge judge = verdict.getJudges().get(i); Testcase testCase = problem.getTestcase(i); assertEquals(JudgeStatus.WA, judge.getStatus(), event.toString()); assertEquals(0, judge.getGrade()); assertEquals(testCase.getName(), judge.getTestcaseName()); } assertEquals(problemId, event.getProblemId()); assertEquals(problem.getTitle(), event.getProblemTitle()); assertEquals(submission.getId(), event.getSubmissionId()); assertNull(verdict.getErrorMessage(), "Compile error message should be null if the compile succeeded."); } private boolean submittedCodeExists(JudgeStatus status) { String fileName = format(submittedCodesHomeFormat, problem.getId(), status); return Files.exists(Paths.get(fileName)); } private void mockServiceDrivers(JudgeStatus judgeStatus) throws IOException { when(submissionServiceDriver.getSubmission(problemId, studentId, submission.getId())) .thenReturn(toViewModel(submission)); when(problemServiceDriver.getProblem(problem.getId())) .thenReturn(of(problem).map(ProblemView::toViewModel)); mockDownloadRequests(judgeStatus); } private void mockDownloadRequests(JudgeStatus status) throws IOException { mockDownloadSubmittedCodes(status); mockDownloadProvidedCodes(); mockDownloadTestcaseIOs(); } private void mockDownloadSubmittedCodes(JudgeStatus status) throws IOException { String submittedCodesHomePath = format(submittedCodesHomeFormat, problem.getId(), status); byte[] zippedSubmittedCodesBytes = zipDirectory(submittedCodesHomePath); when(submissionServiceDriver.downloadSubmittedCodes( problemId, studentId, submission.getId(), submission.getSubmittedCodesFileId())) .thenReturn(new FileResource(submittedCodesHomePath, zippedSubmittedCodesBytes.length, new ByteArrayInputStream(zippedSubmittedCodesBytes))); } private void mockDownloadProvidedCodes() throws IOException { String providedCodesHomePath = format(providedCodesHomeFormat, problem.getId()); byte[] zippedProvidedCodesBytes = zipDirectory(providedCodesHomePath); var languageEnv = problem.getLanguageEnv(CURRENTLY_ONLY_SUPPORT_C); when(problemServiceDriver.downloadProvidedCodes(problem.getId(), languageEnv.getName(), languageEnv.getProvidedCodesFileId().orElseThrow())) .thenReturn(new FileResource(providedCodesHomePath, zippedProvidedCodesBytes.length, new ByteArrayInputStream(zippedProvidedCodesBytes))); } private void mockDownloadTestcaseIOs() throws IOException { String testcaseIOsHomePath = format(testcaseIOsHomeFormat, problem.getId()); for (Testcase testcase : problem.getTestcases()) { byte[] zippedTestcaseIOBytes = zipDirectory(testcaseIOsHomePath + "/" + testcase.getName()); when(problemServiceDriver.downloadTestCaseIOs(problem.getId(), testcase.getId())) .thenReturn(new FileResource(testcaseIOsHomePath, zippedTestcaseIOBytes.length, new ByteArrayInputStream(zippedTestcaseIOBytes))); } } private static byte[] zipDirectory(String directoryPath) throws IOException { File tempZip = createTempFile("judge-girl", ".zip"); tempZip.deleteOnExit(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); zipDirectoryContents(new File(directoryPath), baos); Files.write(tempZip.toPath(), baos.toByteArray()); return baos.toByteArray(); } private VerdictIssuedEvent captureVerdictIssuedEvent() { ArgumentCaptor<VerdictIssuedEvent> argumentCaptor = ArgumentCaptor.forClass(VerdictIssuedEvent.class); verify(eventPublisher).publish(argumentCaptor.capture()); return argumentCaptor.getValue(); } }
[ "\"JUDGER_TEST_PROBLEM_HOME\"" ]
[]
[ "JUDGER_TEST_PROBLEM_HOME" ]
[]
["JUDGER_TEST_PROBLEM_HOME"]
java
1
0
cmd/root.go
package cmd // Copyright © 2018 Everbridge, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. import ( "fmt" "os" "path/filepath" "github.com/Everbridge/generate-secure-pillar/pki" homedir "github.com/mitchellh/go-homedir" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" tilde "gopkg.in/mattes/go-expand-tilde.v1" ) var logger = logrus.New() var inputFilePath string var outputFilePath = os.Stdout.Name() var cfgFile string var profile string var pgpKeyName string var publicKeyRing = "~/.gnupg/pubring.gpg" var privateKeyRing = "~/.gnupg/secring.gpg" var updateInPlace bool var topLevelElement string var recurseDir string var yamlPath string // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "generate-secure-pillar", Short: "Create and update encrypted content or decrypt encrypted content.", Example: ` # specify a config profile and create a new file $ generate-secure-pillar --profile dev create --name secret_name1 --value secret_value1 --name secret_name2 --value secret_value2 --outfile new.sls # create a new sls file $ generate-secure-pillar -k "Salt Master" create --name secret_name1 --value secret_value1 --name secret_name2 --value secret_value2 --outfile new.sls # add to the new file $ generate-secure-pillar -k "Salt Master" update --name new_secret_name --value new_secret_value --file new.sls # update an existing value $ generate-secure-pillar -k "Salt Master" update --name secret_name --value secret_value3 --file new.sls # encrypt all plain text values in a file $ generate-secure-pillar -k "Salt Master" encrypt all --file us1.sls --outfile us1.sls # or use --update flag $ generate-secure-pillar -k "Salt Master" encrypt all --file us1.sls --update # encrypt all plain text values in a file under the element 'secret_stuff' $ generate-secure-pillar -k "Salt Master" --element secret_stuff encrypt all --file us1.sls --outfile us1.sls # recurse through all sls files, encrypting all values $ generate-secure-pillar -k "Salt Master" encrypt recurse -d /path/to/pillar/secure/stuff # recurse through all sls files, decrypting all values (requires imported private key) $ generate-secure-pillar decrypt recurse -d /path/to/pillar/secure/stuff # decrypt a specific existing value (requires imported private key) $ generate-secure-pillar decrypt path --path "some:yaml:path" --file new.sls # decrypt all files and re-encrypt with given key (requires imported private key) $ generate-secure-pillar -k "New Salt Master Key" rotate -d /path/to/pillar/secure/stuff # show all PGP key IDs used in a file $ generate-secure-pillar keys all --file us1.sls # show all keys used in all files in a given directory $ generate-secure-pillar keys recurse -d /path/to/pillar/secure/stuff # show the PGP Key ID used for an element at a path in a file $ generate-secure-pillar keys path --path "some:yaml:path" --file new.sls `, Version: "1.0.546", } const all = "all" const recurse = "recurse" const path = "path" // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := rootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } func init() { logger.Out = os.Stdout cobra.OnInitialize(initConfig) // respect the env var if set gpgHome := os.Getenv("GNUPGHOME") if gpgHome != "" { publicKeyRing = fmt.Sprintf("%s/pubring.gpg", gpgHome) privateKeyRing = fmt.Sprintf("%s/secring.gpg", gpgHome) } // check for GNUPG1 pubring file filePath, err := tilde.Expand(publicKeyRing) if err != nil { logger.Fatalf("Error with GNUPG pubring path: %s", err) } if _, err = os.Stat(filepath.Clean(filePath)); os.IsNotExist(err) { if err != nil { logger.Fatalf("Error finding GNUPG pubring file: %s", err) } } rootCmd.PersistentFlags().Bool("version", false, "print the version") rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.config/generate-secure-pillar/config.yaml)") rootCmd.PersistentFlags().StringVar(&profile, "profile", "", "config file (default is $HOME/.config/generate-secure-pillar/config.yaml)") rootCmd.PersistentFlags().StringVarP(&pgpKeyName, "pgp_key", "k", pgpKeyName, "PGP key name, email, or ID to use for encryption") rootCmd.PersistentFlags().StringVar(&publicKeyRing, "pubring", publicKeyRing, "PGP public keyring") rootCmd.PersistentFlags().StringVar(&privateKeyRing, "secring", privateKeyRing, "PGP private keyring") rootCmd.PersistentFlags().StringVarP(&topLevelElement, "element", "e", "", "Name of the top level element under which encrypted key/value pairs are kept") } // initConfig reads in config file and ENV variables if set. func initConfig() { if cfgFile != "" { // Use config file from the flag. viper.SetConfigFile(cfgFile) } else { // Find home directory. home, err := homedir.Dir() if err != nil { fmt.Println(err) os.Exit(1) } configPath := fmt.Sprintf("%s/.config/generate-secure-pillar/", home) dir := filepath.Clean(configPath) err = os.MkdirAll(dir, 0700) if err != nil { logger.Fatalf("error creating config file path: %s", err) } _, err = os.OpenFile(dir+"/config.yaml", os.O_RDONLY|os.O_CREATE, 0660) if err != nil { logger.Fatalf("Error creating config file: %s", err) } // set config in "~/.config/generate-secure-pillar/config.yaml". viper.AddConfigPath(configPath) viper.SetConfigName("config") viper.SetConfigType("yaml") } viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file logger.Fatalf("Fatal error config file: %s", err) } readProfile() } func getPki() pki.Pki { return pki.New(pgpKeyName, publicKeyRing, privateKeyRing) } func readProfile() { if viper.IsSet("profiles") { profiles := viper.Get("profiles") profName := rootCmd.Flag("profile").Value.String() if profName != "" || pgpKeyName == "" { for _, prof := range profiles.([]interface{}) { p := prof.(map[interface{}]interface{}) if p["default"] == true || profName == p["name"] { gpgHome := p["gnupg_home"].(string) if gpgHome != "" { publicKeyRing = fmt.Sprintf("%s/pubring.gpg", gpgHome) privateKeyRing = fmt.Sprintf("%s/secring.gpg", gpgHome) } if p["default_key"] != nil { pgpKeyName = p["default_key"].(string) } } } } } } // if we are getting stdin from a pipe we don't want // to output log info about it that could mess up parsing func stdinIsPiped() bool { fi, _ := os.Stdin.Stat() return ((fi.Mode() & os.ModeCharDevice) == 0) }
[ "\"GNUPGHOME\"" ]
[]
[ "GNUPGHOME" ]
[]
["GNUPGHOME"]
go
1
0
Jumping on the clouds: Revisited.py
''' Problem Statement: https://www.hackerrank.com/challenges/jumping-on-the-clouds-revisited/problem @Coded by TSG,2020 ''' import math import os import random import re import sys # Complete the jumpingOnClouds function below. def jumpingOnClouds(c, k): e=100 energy=0 i=0 while(i!=len(c)): if(c[i]==1): energy=e-3 e=energy else: energy=e-1 e=energy i+=k return energy if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nk = input().split() n = int(nk[0]) k = int(nk[1]) c = list(map(int, input().rstrip().split())) result = jumpingOnClouds(c, k) fptr.write(str(result) + '\n') fptr.close()
[]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
python
1
0
pipenv/utils.py
# -*- coding: utf-8 -*- import errno import os import re import hashlib import tempfile import sys import shutil import logging import click import crayons import delegator import parse import requests import six import stat import warnings try: from weakref import finalize except ImportError: try: from .vendor.backports.weakref import finalize except ImportError: class finalize(object): def __init__(self, *args, **kwargs): logging.warn('weakref.finalize unavailable, not cleaning...') def detach(self): return False from time import time logging.basicConfig(level=logging.ERROR) try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path except ImportError: try: from .vendor.pathlib2 import Path except ImportError: pass from distutils.spawn import find_executable from contextlib import contextmanager from .patched.piptools.resolver import Resolver from .patched.piptools.repositories.pypi import PyPIRepository from .patched.piptools.scripts.compile import get_pip_command from .patched.piptools import logging as piptools_logging from .patched.piptools.exceptions import NoCandidateFound from .vendor.pip9.download import is_archive_file from .vendor.pip9.exceptions import DistributionNotFound from .vendor.pip9.index import Link from .vendor.pip9._vendor.requests.exceptions import HTTPError, ConnectionError from .pep508checker import lookup from .environments import PIPENV_MAX_ROUNDS, PIPENV_CACHE_DIR if six.PY2: class ResourceWarning(Warning): pass specifiers = [k for k in lookup.keys()] # List of version control systems we support. VCS_LIST = ('git', 'svn', 'hg', 'bzr') SCHEME_LIST = ('http://', 'https://', 'ftp://', 'ftps://', 'file://') requests = requests.Session() def get_requirement(dep): from .vendor.pip9.req.req_install import _strip_extras, Wheel from .vendor import requirements """Pre-clean requirement strings passed to the requirements parser. Ensures that we can accept both local and relative paths, file and VCS URIs, remote URIs, and package names, and that we pass only valid requirement strings to the requirements parser. Performs necessary modifications to requirements object if the user input was a local relative path. :param str dep: A requirement line :returns: :class:`requirements.Requirement` object """ path = None uri = None cleaned_uri = None editable = False dep_link = None # check for editable dep / vcs dep if dep.startswith('-e '): editable = True # Use the user supplied path as the written dependency dep = dep.split(' ', 1)[1] # Split out markers if they are present - similar to how pip does it # See pip9.req.req_install.InstallRequirement.from_line if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): marker_sep = ';' else: marker_sep = '; ' if marker_sep in dep: dep, markers = dep.split(marker_sep, 1) markers = markers.strip() if not markers: markers = None else: markers = None # Strip extras from the requirement so we can make a properly parseable req dep, extras = _strip_extras(dep) # Only operate on local, existing, non-URI formatted paths which are installable if is_installable_file(dep): dep_path = Path(dep) dep_link = Link(dep_path.absolute().as_uri()) if dep_path.is_absolute() or dep_path.as_posix() == '.': path = dep_path.as_posix() else: path = get_converted_relative_path(dep) dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment elif is_vcs(dep): # Generate a Link object for parsing egg fragments dep_link = Link(dep) # Save the original path to store in the pipfile uri = dep_link.url # Construct the requirement using proper git+ssh:// replaced uris or names if available cleaned_uri = clean_git_uri(dep) dep = cleaned_uri if editable: dep = '-e {0}'.format(dep) req = [r for r in requirements.parse(dep)][0] # if all we built was the requirement name and still need everything else if req.name and not any([req.uri, req.path]): if dep_link: if dep_link.scheme.startswith('file') and path and not req.path: req.path = path req.local_file = True req.uri = None else: req.uri = dep_link.url_without_fragment # If the result is a local file with a URI and we have a local path, unset the URI # and set the path instead -- note that local files may have 'path' set by accident elif req.local_file and path and not req.vcs: req.path = path req.uri = None if dep_link and dep_link.is_wheel and not req.name: req.name = os.path.basename(Wheel(dep_link.path).name) elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri: req.uri = strip_ssh_from_git_uri(req.uri) req.line = strip_ssh_from_git_uri(req.line) req.editable = editable if markers: req.markers = markers if extras: # Bizarrely this is also what pip does... req.extras = [ r for r in requirements.parse('fakepkg{0}'.format(extras)) ][ 0 ].extras return req def cleanup_toml(tml): toml = tml.split('\n') new_toml = [] # Remove all empty lines from TOML. for line in toml: if line.strip(): new_toml.append(line) toml = '\n'.join(new_toml) new_toml = [] # Add newlines between TOML sections. for i, line in enumerate(toml.split('\n')): # Skip the first line. if line.startswith('['): if i > 0: # Insert a newline before the heading. new_toml.append('') new_toml.append(line) # adding new line at the end of the TOML file new_toml.append('') toml = '\n'.join(new_toml) return toml def parse_python_version(output): """Parse a Python version output returned by `python --version`. Return a dict with three keys: major, minor, and micro. Each value is a string containing a version part. Note: The micro part would be `'0'` if it's missing from the input string. """ version_pattern = re.compile(r''' ^ # Beginning of line. Python # Literally "Python". \s # Space. (?P<major>\d+) # Major = one or more digits. \. # Dot. (?P<minor>\d+) # Minor = one or more digits. (?: # Unnamed group for dot-micro. \. # Dot. (?P<micro>\d+) # Micro = one or more digit. )? # Micro is optional because pypa/pipenv#1893. .* # Trailing garbage. $ # End of line. ''', re.VERBOSE) match = version_pattern.match(output) if not match: return None return match.groupdict(default='0') def python_version(path_to_python): if not path_to_python: return None try: c = delegator.run([path_to_python, '--version'], block=False) except Exception: return None c.block() version = parse_python_version(c.out.strip() or c.err.strip()) try: version = u'{major}.{minor}.{micro}'.format(**version) except TypeError: return None return version def escape_grouped_arguments(s): """Prepares a string for the shell (on Windows too!) Only for use on grouped arguments (passed as a string to Popen) """ if s is None: return None # Additional escaping for windows paths if os.name == 'nt': s = "{}".format(s.replace("\\", "\\\\")) return '"' + s.replace("'", "'\\''") + '"' def clean_pkg_version(version): """Uses pip to prepare a package version string, from our internal version.""" return six.u(pep440_version(str(version).replace('==', ''))) class HackedPythonVersion(object): """A Beautiful hack, which allows us to tell pip which version of Python we're using.""" def __init__(self, python_version, python_path): self.python_version = python_version self.python_path = python_path def __enter__(self): os.environ['PIP_PYTHON_VERSION'] = str(self.python_version) os.environ['PIP_PYTHON_PATH'] = str(self.python_path) def __exit__(self, *args): # Restore original Python version information. del os.environ['PIP_PYTHON_VERSION'] def prepare_pip_source_args(sources, pip_args=None): if pip_args is None: pip_args = [] if sources: # Add the source to pip9. pip_args.extend(['-i', sources[0]['url']]) # Trust the host if it's not verified. if not sources[0].get('verify_ssl', True): pip_args.extend( [ '--trusted-host', urlparse(sources[0]['url']).netloc.split(':')[0], ] ) # Add additional sources as extra indexes. if len(sources) > 1: for source in sources[1:]: pip_args.extend(['--extra-index-url', source['url']]) # Trust the host if it's not verified. if not source.get('verify_ssl', True): pip_args.extend( [ '--trusted-host', urlparse(source['url']).hostname, ] ) return pip_args def actually_resolve_reps( deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre ): from pip9 import basecommand, req from pip9._vendor import requests as pip_requests class PipCommand(basecommand.Command): """Needed for pip-tools.""" name = 'PipCommand' constraints = [] req_dir = tempfile.mkdtemp(prefix='pipenv-', suffix='-requirements') for dep in deps: if dep: if dep.startswith('-e '): constraint = req.InstallRequirement.from_editable( dep[len('-e '):] ) else: fd, t = tempfile.mkstemp( prefix='pipenv-', suffix='-requirement.txt', dir=req_dir ) with os.fdopen(fd, 'w') as f: f.write(dep) constraint = [ c for c in req.parse_requirements(t, session=pip_requests) ][ 0 ] # extra_constraints = [] if ' -i ' in dep: index_lookup[constraint.name] = project.get_source( url=dep.split(' -i ')[1] ).get( 'name' ) if constraint.markers: markers_lookup[constraint.name] = str( constraint.markers ).replace( '"', "'" ) constraints.append(constraint) rmtree(req_dir) pip_command = get_pip_command() pip_args = [] if sources: pip_args = prepare_pip_source_args(sources, pip_args) if verbose: print('Using pip: {0}'.format(' '.join(pip_args))) pip_options, _ = pip_command.parse_args(pip_args) session = pip_command._build_session(pip_options) pypi = PyPIRepository( pip_options=pip_options, use_json=False, session=session ) if verbose: logging.log.verbose = True piptools_logging.log.verbose = True resolved_tree = set() resolver = Resolver( constraints=constraints, repository=pypi, clear_caches=clear, prereleases=pre, ) # pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages try: resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS)) except (NoCandidateFound, DistributionNotFound, HTTPError) as e: click.echo( '{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n ' 'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.' ''.format( crayons.red('Warning', bold=True), crayons.red('$ pipenv install --skip-lock'), crayons.red('$ pipenv graph'), ), err=True, ) click.echo(crayons.blue(str(e)), err=True) if 'no version found at all' in str(e): click.echo( crayons.blue( 'Please check your version specifier and version number. See PEP440 for more information.' ) ) raise RuntimeError return resolved_tree, resolver def venv_resolve_deps( deps, which, project, pre=False, verbose=False, clear=False, allow_global=False ): from . import resolver import json resolver = escape_grouped_arguments(resolver.__file__.rstrip('co')) cmd = '{0} {1} {2} {3} {4} {5}'.format( escape_grouped_arguments(which('python')), resolver, '--pre' if pre else '', '--verbose' if verbose else '', '--clear' if clear else '', '--system' if allow_global else '', ) os.environ['PIPENV_PACKAGES'] = '\n'.join(deps) c = delegator.run(cmd, block=True) del os.environ['PIPENV_PACKAGES'] try: assert c.return_code == 0 except AssertionError: if verbose: click.echo(c.out, err=True) click.echo(c.err, err=True) else: click.echo(c.err[int(len(c.err) / 2) - 1:], err=True) sys.exit(c.return_code) if verbose: click.echo(c.out.split('RESULTS:')[0], err=True) try: return json.loads(c.out.split('RESULTS:')[1].strip()) except IndexError: raise RuntimeError('There was a problem with locking.') def resolve_deps( deps, which, project, sources=None, verbose=False, python=False, clear=False, pre=False, allow_global=False, ): """Given a list of dependencies, return a resolved list of dependencies, using pip-tools -- and their hashes, using the warehouse API / pip9. """ index_lookup = {} markers_lookup = {} python_path = which('python', allow_global=allow_global) backup_python_path = sys.executable results = [] # First (proper) attempt: with HackedPythonVersion(python_version=python, python_path=python_path): try: resolved_tree, resolver = actually_resolve_reps( deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre, ) except RuntimeError: # Don't exit here, like usual. resolved_tree = None # Second (last-resort) attempt: if resolved_tree is None: with HackedPythonVersion( python_version='.'.join([str(s) for s in sys.version_info[:3]]), python_path=backup_python_path, ): try: # Attempt to resolve again, with different Python version information, # particularly for particularly particular packages. resolved_tree, resolver = actually_resolve_reps( deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre, ) except RuntimeError: sys.exit(1) for result in resolved_tree: if not result.editable: name = pep423_name(result.name) version = clean_pkg_version(result.specifier) index = index_lookup.get(result.name) if not markers_lookup.get(result.name): markers = str( result.markers ) if result.markers and 'extra' not in str( result.markers ) else None else: markers = markers_lookup.get(result.name) collected_hashes = [] if any('python.org' in source['url'] or 'pypi.org' in source['url'] for source in sources): try: # Grab the hashes from the new warehouse API. r = requests.get( 'https://pypi.org/pypi/{0}/json'.format(name), timeout=10, ) api_releases = r.json()['releases'] cleaned_releases = {} for api_version, api_info in api_releases.items(): cleaned_releases[ clean_pkg_version(api_version) ] = api_info for release in cleaned_releases[version]: collected_hashes.append(release['digests']['sha256']) collected_hashes = [ 'sha256:' + s for s in collected_hashes ] except (ValueError, KeyError, ConnectionError): if verbose: click.echo( '{0}: Error generating hash for {1}'.format( crayons.red('Warning', bold=True), name ) ) # Collect un-collectable hashes (should work with devpi). try: collected_hashes = collected_hashes + list( list(resolver.resolve_hashes([result]).items())[0][1] ) except (ValueError, KeyError, ConnectionError, IndexError): if verbose: print('Error generating hash for {}'.format(name)) collected_hashes = sorted(set(collected_hashes)) d = {'name': name, 'version': version, 'hashes': collected_hashes} if index: d.update({'index': index}) if markers: d.update({'markers': markers.replace('"', "'")}) results.append(d) return results def multi_split(s, split): """Splits on multiple given separators.""" for r in split: s = s.replace(r, '|') return [i for i in s.split('|') if len(i) > 0] def convert_deps_from_pip(dep): """"Converts a pip-formatted dependency to a Pipfile-formatted one.""" dependency = {} req = get_requirement(dep) extras = {'extras': req.extras} # File installs. if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs: # Assign a package name to the file, last 7 of it's sha256 hex digest. if not req.uri and not req.path: req.path = os.path.abspath(req.name) hashable_path = req.uri if req.uri else req.path if not req.name: req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest() req.name = req.name[len(req.name) - 7:] # {path: uri} TOML (spec 4 I guess...) if req.uri: dependency[req.name] = {'file': hashable_path} else: dependency[req.name] = {'path': hashable_path} if req.extras: dependency[req.name].update(extras) # Add --editable if applicable if req.editable: dependency[req.name].update({'editable': True}) # VCS Installs. elif req.vcs: if req.name is None: raise ValueError( 'pipenv requires an #egg fragment for version controlled ' 'dependencies. Please install remote dependency ' 'in the form {0}#egg=<package-name>.'.format(req.uri) ) # Crop off the git+, etc part. if req.uri.startswith('{0}+'.format(req.vcs)): req.uri = req.uri[len(req.vcs) + 1:] dependency.setdefault(req.name, {}).update({req.vcs: req.uri}) # Add --editable, if it's there. if req.editable: dependency[req.name].update({'editable': True}) # Add subdirectory, if it's there if req.subdirectory: dependency[req.name].update({'subdirectory': req.subdirectory}) # Add the specifier, if it was provided. if req.revision: dependency[req.name].update({'ref': req.revision}) # Extras: e.g. #egg=requests[security] if req.extras: dependency[req.name].update({'extras': req.extras}) elif req.extras or req.specs or hasattr(req, 'markers'): specs = None # Comparison operators: e.g. Django>1.10 if req.specs: r = multi_split(dep, '!=<>~') specs = dep[len(r[0]):] dependency[req.name] = specs # Extras: e.g. requests[socks] if req.extras: dependency[req.name] = extras if specs: dependency[req.name].update({'version': specs}) if hasattr(req, 'markers'): if isinstance(dependency[req.name], six.string_types): dependency[req.name] = {'version': specs} dependency[req.name].update({'markers': req.markers}) # Bare dependencies: e.g. requests else: dependency[dep] = '*' # Cleanup when there's multiple values, e.g. -e. if len(dependency) > 1: for key in dependency.copy(): if not hasattr(dependency[key], 'keys'): del dependency[key] return dependency def is_star(val): return isinstance(val, six.string_types) and val == '*' def is_pinned(val): return isinstance(val, six.string_types) and val.startswith('==') def convert_deps_to_pip(deps, project=None, r=True, include_index=False): """"Converts a Pipfile-formatted dependency to a pip-formatted one.""" dependencies = [] for dep in deps.keys(): # Default (e.g. '>1.10'). extra = deps[dep] if isinstance(deps[dep], six.string_types) else '' version = '' index = '' # Get rid of '*'. if is_star(deps[dep]) or str(extra) == '{}': extra = '' hash = '' # Support for single hash (spec 1). if 'hash' in deps[dep]: hash = ' --hash={0}'.format(deps[dep]['hash']) # Support for multiple hashes (spec 2). if 'hashes' in deps[dep]: hash = '{0} '.format( ''.join( [' --hash={0} '.format(h) for h in deps[dep]['hashes']] ) ) # Support for extras (e.g. requests[socks]) if 'extras' in deps[dep]: extra = '[{0}]'.format(','.join(deps[dep]['extras'])) if 'version' in deps[dep]: if not is_star(deps[dep]['version']): version = deps[dep]['version'] # For lockfile format. if 'markers' in deps[dep]: specs = '; {0}'.format(deps[dep]['markers']) else: # For pipfile format. specs = [] for specifier in specifiers: if specifier in deps[dep]: if not is_star(deps[dep][specifier]): specs.append( '{0} {1}'.format(specifier, deps[dep][specifier]) ) if specs: specs = '; {0}'.format(' and '.join(specs)) else: specs = '' if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]): pip_src_args = [] if 'index' in deps[dep]: pip_src_args = [project.get_source(deps[dep]['index'])] else: pip_src_args = project.sources pip_args = prepare_pip_source_args(pip_src_args) index = ' '.join(pip_args) # Support for version control maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]] vcs = maybe_vcs[0] if maybe_vcs else None # Support for files. if 'file' in deps[dep]: extra = '{1}{0}'.format(extra, deps[dep]['file']).strip() # Flag the file as editable if it is a local relative path if 'editable' in deps[dep]: dep = '-e ' else: dep = '' # Support for paths. elif 'path' in deps[dep]: extra = '{1}{0}'.format(extra, deps[dep]['path']).strip() # Flag the file as editable if it is a local relative path if 'editable' in deps[dep]: dep = '-e ' else: dep = '' if vcs: extra = '{0}+{1}'.format(vcs, deps[dep][vcs]) # Support for @refs. if 'ref' in deps[dep]: extra += '@{0}'.format(deps[dep]['ref']) extra += '#egg={0}'.format(dep) # Support for subdirectory if 'subdirectory' in deps[dep]: extra += '&subdirectory={0}'.format(deps[dep]['subdirectory']) # Support for editable. if 'editable' in deps[dep]: # Support for --egg. dep = '-e ' else: dep = '' s = '{0}{1}{2}{3}{4} {5}'.format( dep, extra, version, specs, hash, index ).strip() dependencies.append(s) if not r: return dependencies # Write requirements.txt to tmp directory. f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False) f.write('\n'.join(dependencies).encode('utf-8')) f.close() return f.name def mkdir_p(newdir): """works the way a good mkdir should :) - already exists, silently complete - regular file in the way, raise an exception - parent directory(ies) does not exist, make them as well From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/ """ if os.path.isdir(newdir): pass elif os.path.isfile(newdir): raise OSError( "a file with the same name as the desired dir, '{0}', already exists.".format( newdir ) ) else: head, tail = os.path.split(newdir) if head and not os.path.isdir(head): mkdir_p(head) if tail: os.mkdir(newdir) def is_required_version(version, specified_version): """Check to see if there's a hard requirement for version number provided in the Pipfile. """ # Certain packages may be defined with multiple values. if isinstance(specified_version, dict): specified_version = specified_version.get('version', '') if specified_version.startswith('=='): return version.strip() == specified_version.split('==')[1].strip() return True def strip_ssh_from_git_uri(uri): """Return git+ssh:// formatted URI to git+git@ format""" if isinstance(uri, six.string_types): uri = uri.replace('git+ssh://', 'git+') return uri def clean_git_uri(uri): """Cleans VCS uris from pip9 format""" if isinstance(uri, six.string_types): # Add scheme for parsing purposes, this is also what pip does if uri.startswith('git+') and '://' not in uri: uri = uri.replace('git+', 'git+ssh://') return uri def is_editable(pipfile_entry): if hasattr(pipfile_entry, 'get'): return pipfile_entry.get('editable', False) and any( pipfile_entry.get(key) for key in ('file', 'path') + VCS_LIST ) return False def is_vcs(pipfile_entry): from .vendor import requirements """Determine if dictionary entry from Pipfile is for a vcs dependency.""" if hasattr(pipfile_entry, 'keys'): return any(key for key in pipfile_entry.keys() if key in VCS_LIST) elif isinstance(pipfile_entry, six.string_types): return bool( requirements.requirement.VCS_REGEX.match( clean_git_uri(pipfile_entry) ) ) return False def is_installable_file(path): """Determine if a path can potentially be installed""" from .vendor.pip9.utils import is_installable_dir from .vendor.pip9.utils.packaging import specifiers if hasattr(path, 'keys') and any( key for key in path.keys() if key in ['file', 'path'] ): path = urlparse(path['file']).path if 'file' in path else path['path'] if not isinstance(path, six.string_types) or path == '*': return False # If the string starts with a valid specifier operator, test if it is a valid # specifier set before making a path object (to avoid breaking windows) if any(path.startswith(spec) for spec in '!=<>~'): try: specifiers.SpecifierSet(path) # If this is not a valid specifier, just move on and try it as a path except specifiers.InvalidSpecifier: pass else: return False if not os.path.exists(os.path.abspath(path)): return False lookup_path = Path(path) absolute_path = '{0}'.format(lookup_path.absolute()) if lookup_path.is_dir() and is_installable_dir(absolute_path): return True elif lookup_path.is_file() and is_archive_file(absolute_path): return True return False def is_file(package): """Determine if a package name is for a File dependency.""" if hasattr(package, 'keys'): return any(key for key in package.keys() if key in ['file', 'path']) if os.path.exists(str(package)): return True for start in SCHEME_LIST: if str(package).startswith(start): return True return False def pep440_version(version): """Normalize version to PEP 440 standards""" from .vendor.pip9.index import parse_version # Use pip built-in version parser. return str(parse_version(version)) def pep423_name(name): """Normalize package name to PEP 423 style standard.""" name = name.lower() if any(i not in name for i in (VCS_LIST + SCHEME_LIST)): return name.replace('_', '-') else: return name def proper_case(package_name): """Properly case project name from pypi.org.""" # Hit the simple API. r = requests.get( 'https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True, ) if not r.ok: raise IOError( 'Unable to find package {0} in PyPI repository.'.format( package_name ) ) r = parse.parse('https://pypi.org/pypi/{name}/json', r.url) good_name = r['name'] return good_name def split_section(input_file, section_suffix, test_function): """ Split a pipfile or a lockfile section out by section name and test function :param dict input_file: A dictionary containing either a pipfile or lockfile :param str section_suffix: A string of the name of the section :param func test_function: A test function to test against the value in the key/value pair >>> split_section(my_lockfile, 'vcs', is_vcs) { 'default': { "six": { "hashes": [ "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb", "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9" ], "version": "==1.11.0" } }, 'default-vcs': { "e1839a8": { "editable": true, "path": "." } } } """ pipfile_sections = ('packages', 'dev-packages') lockfile_sections = ('default', 'develop') if any(section in input_file for section in pipfile_sections): sections = pipfile_sections elif any(section in input_file for section in lockfile_sections): sections = lockfile_sections else: # return the original file if we can't find any pipfile or lockfile sections return input_file for section in sections: split_dict = {} entries = input_file.get(section, {}) for k in list(entries.keys()): if test_function(entries.get(k)): split_dict[k] = entries.pop(k) input_file['-'.join([section, section_suffix])] = split_dict return input_file def split_file(file_dict): """Split VCS and editable dependencies out from file.""" sections = { 'vcs': is_vcs, 'editable': lambda x: hasattr(x, 'keys') and x.get('editable'), } for k, func in sections.items(): file_dict = split_section(file_dict, k, func) return file_dict def merge_deps( file_dict, project, dev=False, requirements=False, ignore_hashes=False, blocking=False, only=False, ): """ Given a file_dict, merges dependencies and converts them to pip dependency lists. :param dict file_dict: The result of calling :func:`pipenv.utils.split_file` :param :class:`pipenv.project.Project` project: Pipenv project :param bool dev=False: Flag indicating whether dev dependencies are to be installed :param bool requirements=False: Flag indicating whether to use a requirements file :param bool ignore_hashes=False: :param bool blocking=False: :param bool only=False: :return: Pip-converted 3-tuples of [deps, requirements_deps] """ deps = [] requirements_deps = [] for section in list(file_dict.keys()): # Turn develop-vcs into ['develop', 'vcs'] section_name, suffix = section.rsplit( '-', 1 ) if '-' in section and not section == 'dev-packages' else ( section, None ) if not file_dict[section] or section_name not in ( 'dev-packages', 'packages', 'default', 'develop' ): continue is_dev = section_name in ('dev-packages', 'develop') if is_dev and not dev: continue if ignore_hashes: for k, v in file_dict[section]: if 'hash' in v: del v['hash'] # Block and ignore hashes for all suffixed sections (vcs/editable) no_hashes = True if suffix else ignore_hashes block = True if suffix else blocking include_index = True if not suffix else False converted = convert_deps_to_pip( file_dict[section], project, r=False, include_index=include_index ) deps.extend((d, no_hashes, block) for d in converted) if dev and is_dev and requirements: requirements_deps.extend((d, no_hashes, block) for d in converted) return deps, requirements_deps def recase_file(file_dict): """Recase file before writing to output.""" if 'packages' in file_dict or 'dev-packages' in file_dict: sections = ('packages', 'dev-packages') elif 'default' in file_dict or 'develop' in file_dict: sections = ('default', 'develop') for section in sections: file_section = file_dict.get(section, {}) # Try to properly case each key if we can. for key in list(file_section.keys()): try: cased_key = proper_case(key) except IOError: cased_key = key file_section[cased_key] = file_section.pop(key) return file_dict def get_windows_path(*args): """Sanitize a path for windows environments Accepts an arbitrary list of arguments and makes a clean windows path""" return os.path.normpath(os.path.join(*args)) def find_windows_executable(bin_path, exe_name): """Given an executable name, search the given location for an executable""" requested_path = get_windows_path(bin_path, exe_name) if os.path.exists(requested_path): return requested_path # Ensure we aren't adding two layers of file extensions exe_name = os.path.splitext(exe_name)[0] files = [ '{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat'] ] exec_paths = [get_windows_path(bin_path, f) for f in files] exec_files = [ filename for filename in exec_paths if os.path.isfile(filename) ] if exec_files: return exec_files[0] return find_executable(exe_name) def path_to_url(path): return Path(normalize_drive(os.path.abspath(path))).as_uri() def get_converted_relative_path(path, relative_to=os.curdir): """Given a vague relative path, return the path relative to the given location""" return os.path.join('.', os.path.relpath(path, start=relative_to)) def walk_up(bottom): """Mimic os.walk, but walk 'up' instead of down the directory tree. From: https://gist.github.com/zdavkeos/1098474 """ bottom = os.path.realpath(bottom) # Get files in current dir. try: names = os.listdir(bottom) except Exception: return dirs, nondirs = [], [] for name in names: if os.path.isdir(os.path.join(bottom, name)): dirs.append(name) else: nondirs.append(name) yield bottom, dirs, nondirs new_path = os.path.realpath(os.path.join(bottom, '..')) # See if we are at the top. if new_path == bottom: return for x in walk_up(new_path): yield x def find_requirements(max_depth=3): """Returns the path of a Pipfile in parent directories.""" i = 0 for c, d, f in walk_up(os.getcwd()): i += 1 if i < max_depth: if 'requirements.txt': r = os.path.join(c, 'requirements.txt') if os.path.isfile(r): return r raise RuntimeError('No requirements.txt found!') # Borrowed from pew to avoid importing pew which imports psutil # See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82 @contextmanager def temp_environ(): """Allow the ability to set os.environ temporarily""" environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(environ) def is_valid_url(url): """Checks if a given string is an url""" pieces = urlparse(url) return all([pieces.scheme, pieces.netloc]) def download_file(url, filename): """Downloads file from url to a path with filename""" r = requests.get(url, stream=True) if not r.ok: raise IOError('Unable to download file') with open(filename, 'wb') as f: f.write(r.content) def need_update_check(): """Determines whether we need to check for updates.""" mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) if not os.path.exists(p): return True out_of_date_time = time() - (24 * 60 * 60) if os.path.isfile(p) and os.path.getmtime(p) <= out_of_date_time: return True else: return False def touch_update_stamp(): """Touches PIPENV_CACHE_DIR/.pipenv_update_check""" mkdir_p(PIPENV_CACHE_DIR) p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check')) try: os.utime(p, None) except OSError: with open(p, 'w') as fh: fh.write('') def normalize_drive(path): """Normalize drive in path so they stay consistent. This currently only affects local drives on Windows, which can be identified with either upper or lower cased drive names. The case is always converted to uppercase because it seems to be preferred. See: <https://github.com/pypa/pipenv/issues/1218> """ if os.name != 'nt' or not isinstance(path, six.string_types): return path drive, tail = os.path.splitdrive(path) # Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts. if drive.islower() and len(drive) == 2 and drive[1] == ':': return '{}{}'.format(drive.upper(), tail) return path def is_readonly_path(fn): """Check if a provided path exists and is readonly. Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)` """ if os.path.exists(fn): return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access( fn, os.W_OK ) return False def set_write_bit(fn): if os.path.exists(fn): os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR) return def rmtree(directory, ignore_errors=False): shutil.rmtree( directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly ) def handle_remove_readonly(func, path, exc): """Error handler for shutil.rmtree. Windows source repo folders are read-only by default, so this error handler attempts to set them as writeable and then proceed with deletion.""" # Check for read-only attribute default_warning_message = 'Unable to remove file due to permissions restriction: {!r}' # split the initial exception out into its type, exception, and traceback exc_type, exc_exception, exc_tb = exc if is_readonly_path(path): # Apply write permission and call original function set_write_bit(path) try: func(path) except (OSError, IOError) as e: if e.errno in [errno.EACCES, errno.EPERM]: warnings.warn( default_warning_message.format(path), ResourceWarning ) return if exc_exception.errno in [errno.EACCES, errno.EPERM]: warnings.warn(default_warning_message.format(path), ResourceWarning) return raise class TemporaryDirectory(object): """Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. For example: with TemporaryDirectory() as tmpdir: ... Upon exiting the context, the directory and everything contained in it are removed. """ def __init__(self, suffix, prefix, dir=None): if 'RAM_DISK' in os.environ: import uuid name = uuid.uuid4().hex dir_name = os.path.join(os.environ['RAM_DISK'].strip(), name) os.mkdir(dir_name) self.name = dir_name else: self.name = tempfile.mkdtemp(suffix, prefix, dir) self._finalizer = finalize( self, self._cleanup, self.name, warn_message="Implicitly cleaning up {!r}".format(self), ) @classmethod def _cleanup(cls, name, warn_message): rmtree(name) warnings.warn(warn_message, ResourceWarning) def __repr__(self): return "<{} {!r}>".format(self.__class__.__name__, self.name) def __enter__(self): return self def __exit__(self, exc, value, tb): self.cleanup() def cleanup(self): if self._finalizer.detach(): rmtree(self.name)
[]
[]
[ "PIP_PYTHON_VERSION", "PIPENV_PACKAGES", "RAM_DISK", "PIP_PYTHON_PATH" ]
[]
["PIP_PYTHON_VERSION", "PIPENV_PACKAGES", "RAM_DISK", "PIP_PYTHON_PATH"]
python
4
0
config.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_TRACK_MODIFICATIONS = False
[]
[]
[ "SECRET_KEY", "DATABASE_URL" ]
[]
["SECRET_KEY", "DATABASE_URL"]
python
2
0
tests/system_test.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """System test library, provides tools for tests that start multiple processes, with special support for qdrouter processes. Features: - Create separate directories for each test. - Save logs, sub-process output, core files etc. - Automated clean-up after tests: kill sub-processes etc. - Tools to manipulate qdrouter configuration files. - Sundry other tools. """ from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function import errno, os, time, socket, random, subprocess, shutil, unittest, __main__, re, sys from datetime import datetime from subprocess import PIPE, STDOUT from copy import copy try: import queue as Queue # 3.x except ImportError: import Queue as Queue # 2.7 from threading import Thread from threading import Event import json import uuid is_python2 = sys.version_info[0] == 2 # DISPATCH-1443: for python < 2.7 use unittest2 since the default unittest for # older versions lacks features we need: # if is_python2 and sys.version_info[1] < 7: # python < 2.7: try: import unittest2 as unittest except ImportError: raise Exception("Python unittest2 not installed - see README") else: import unittest import proton from proton import Message from proton import Delivery from proton.handlers import MessagingHandler from proton.utils import BlockingConnection from proton.reactor import AtLeastOnce, Container from proton.reactor import AtMostOnce from qpid_dispatch.management.client import Node from qpid_dispatch_internal.compat import dict_iteritems, PY_STRING_TYPE from qpid_dispatch_internal.compat import PY_TEXT_TYPE # Optional modules MISSING_MODULES = [] try: import qpidtoollibs except ImportError as err: qpidtoollibs = None # pylint: disable=invalid-name MISSING_MODULES.append(str(err)) try: import qpid_messaging as qm except ImportError as err: qm = None # pylint: disable=invalid-name MISSING_MODULES.append(str(err)) def find_exe(program): """Find an executable in the system PATH""" def is_exe(fpath): """True if fpath is executable""" return os.path.isfile(fpath) and os.access(fpath, os.X_OK) mydir = os.path.split(program)[0] if mydir: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None # The directory where this module lives. Used to locate static configuration files etc. DIR = os.path.dirname(__file__) def _check_requirements(): """If requirements are missing, return a message, else return empty string.""" missing = MISSING_MODULES required_exes = ['qdrouterd'] missing += ["No exectuable %s"%e for e in required_exes if not find_exe(e)] if missing: return "%s: %s"%(__name__, ", ".join(missing)) MISSING_REQUIREMENTS = _check_requirements() def retry_delay(deadline, delay, max_delay): """For internal use in retry. Sleep as required and return the new delay or None if retry should time out""" remaining = deadline - time.time() if remaining <= 0: return None time.sleep(min(delay, remaining)) return min(delay*2, max_delay) # Valgrind significantly slows down the response time of the router, so use a # long default timeout TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60)) def retry(function, timeout=TIMEOUT, delay=.001, max_delay=1): """Call function until it returns a true value or timeout expires. Double the delay for each retry up to max_delay. Returns what function returns or None if timeout expires. """ deadline = time.time() + timeout while True: ret = function() if ret: return ret else: delay = retry_delay(deadline, delay, max_delay) if delay is None: return None def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None): """Call function until it returns without exception or timeout expires. Double the delay for each retry up to max_delay. Calls exception_test with any exception raised by function, exception_test may itself raise an exception to terminate the retry. Returns what function returns if it succeeds before timeout. Raises last exception raised by function on timeout. """ deadline = time.time() + timeout while True: try: return function() except Exception as e: # pylint: disable=broad-except if exception_test: exception_test(e) delay = retry_delay(deadline, delay, max_delay) if delay is None: raise def get_local_host_socket(protocol_family='IPv4'): if protocol_family == 'IPv4': s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '127.0.0.1' elif protocol_family == 'IPv6': s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) host = '::1' return s, host def port_available(port, protocol_family='IPv4'): """Return true if connecting to host:port gives 'connection refused'.""" s, host = get_local_host_socket(protocol_family) available = False try: s.connect((host, port)) except socket.error as e: available = e.errno == errno.ECONNREFUSED except: pass s.close() return available def wait_port(port, protocol_family='IPv4', **retry_kwargs): """Wait up to timeout for port (on host) to be connectable. Takes same keyword arguments as retry to control the timeout""" def check(e): """Only retry on connection refused""" if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED: raise host = None def connect(): # macOS gives EINVAL for all connection attempts after a ECONNREFUSED # man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]" s, host = get_local_host_socket(protocol_family) try: s.connect((host, port)) finally: s.close() try: retry_exception(connect, exception_test=check, **retry_kwargs) except Exception as e: raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e)) def wait_ports(ports, **retry_kwargs): """Wait up to timeout for all ports (on host) to be connectable. Takes same keyword arguments as retry to control the timeout""" for port, protocol_family in dict_iteritems(ports): wait_port(port=port, protocol_family=protocol_family, **retry_kwargs) def message(**properties): """Convenience to create a proton.Message with properties set""" m = Message() for name, value in dict_iteritems(properties): getattr(m, name) # Raise exception if not a valid message attribute. setattr(m, name, value) return m class Process(subprocess.Popen): """ Popen that can be torn down at the end of a TestCase and stores its output. """ # Expected states of a Process at teardown RUNNING = -1 # Still running EXIT_OK = 0 # Exit status 0 EXIT_FAIL = 1 # Exit status 1 unique_id = 0 @classmethod def unique(cls, name): cls.unique_id += 1 return "%s-%s" % (name, cls.unique_id) def __init__(self, args, name=None, expect=EXIT_OK, **kwargs): """ Takes same arguments as subprocess.Popen. Some additional/special args: @param expect: Raise error if process staus not as expected at end of test: L{RUNNING} - expect still running. L{EXIT_OK} - expect proces to have terminated with 0 exit status. L{EXIT_FAIL} - expect proces to have terminated with exit status 1. integer - expected return code @keyword stdout: Defaults to the file name+".out" @keyword stderr: Defaults to be the same as stdout """ self.name = name or os.path.basename(args[0]) self.args, self.expect = args, expect self.outdir = os.getcwd() self.outfile = os.path.abspath(self.unique(self.name)) self.torndown = False with open(self.outfile + '.out', 'w') as out: kwargs.setdefault('stdout', out) kwargs.setdefault('stderr', subprocess.STDOUT) try: super(Process, self).__init__(args, **kwargs) with open(self.outfile + '.cmd', 'w') as f: f.write("%s\npid=%s\n" % (' '.join(args), self.pid)) except Exception as e: raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" % (args, kwargs, type(e).__name__, e)) def assert_running(self): """Assert that the process is still running""" assert self.poll() is None, "%s: exited" % ' '.join(self.args) def teardown(self): """Check process status and stop the process if necessary""" if self.torndown: return self.torndown = True def error(msg): with open(self.outfile + '.out') as f: raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % ( self.pid, msg, ' '.join(self.args), self.outfile + '.cmd', f.read())); status = self.poll() if status is None: # Still running self.terminate() if self.expect != None and self.expect != Process.RUNNING: error("still running") self.expect = 0 # Expect clean exit after terminate status = self.wait() if self.expect != None and self.expect != status: error("exit code %s, expected %s" % (status, self.expect)) def wait(self, timeout=None): """ Add support for a timeout when using Python 2 """ if timeout is None: return super(Process, self).wait() if is_python2: start = time.time() while True: rc = super(Process, self).poll() if rc is not None: return rc if time.time() - start >= timeout: raise Exception("Process did not terminate") time.sleep(0.1) else: return super(Process, self).wait(timeout=timeout) def communicate(self, input=None, timeout=None): """ Add support for a timeout when using Python 2 """ if timeout is None: return super(Process, self).communicate(input=input) if is_python2: self.wait(timeout=timeout) return super(Process, self).communicate(input=input) return super(Process, self).communicate(input=input, timeout=timeout) class Config(object): """Base class for configuration objects that provide a convenient way to create content for configuration files.""" def write(self, name, suffix=".conf"): """Write the config object to file name.suffix. Returns name.suffix.""" name = name+suffix with open(name, 'w') as f: f.write(str(self)) return name class Qdrouterd(Process): """Run a Qpid Dispatch Router Daemon""" class Config(list, Config): """ A router configuration. The Config class is a list of tuples in the following format: [ ('section-name', {attribute-map}), ...] where attribute-map is a dictionary of key+value pairs. Key is an attribute name (string), value can be any of [scalar | string | dict] When written to a configuration file to be loaded by the router: o) there is no ":' between the section-name and the opening brace o) attribute keys are separated by a ":" from their values o) attribute values that are scalar or string follow the ":" on the same line. o) attribute values do not have trailing commas o) The section-name and attribute keywords are written without enclosing quotes o) string type attribute values are not enclosed in quotes o) attribute values of type dict are written in their JSON representation. Fills in some default values automatically, see Qdrouterd.DEFAULTS """ DEFAULTS = { 'listener': {'host':'0.0.0.0', 'saslMechanisms':'ANONYMOUS', 'idleTimeoutSeconds': '120', 'authenticatePeer': 'no', 'role': 'normal'}, 'connector': {'host':'127.0.0.1', 'saslMechanisms':'ANONYMOUS', 'idleTimeoutSeconds': '120'}, 'router': {'mode': 'standalone', 'id': 'QDR'} } def sections(self, name): """Return list of sections named name""" return [p for n, p in self if n == name] @property def router_id(self): return self.sections("router")[0]["id"] def defaults(self): """Fill in default values in gconfiguration""" for name, props in self: if name in Qdrouterd.Config.DEFAULTS: for n,p in dict_iteritems(Qdrouterd.Config.DEFAULTS[name]): props.setdefault(n,p) def __str__(self): """Generate config file content. Calls default() first.""" def tabs(level): if level: return " " * level return "" def value(item, level): if isinstance(item, dict): result = "{\n" result += "".join(["%s%s: %s,\n" % (tabs(level + 1), json.dumps(k), json.dumps(v)) for k,v in item.items()]) result += "%s}" % tabs(level) return result return "%s" % item def attributes(e, level): assert(isinstance(e, dict)) # k = attribute name # v = string | scalar | dict return "".join(["%s%s: %s\n" % (tabs(level), k, value(v, level + 1)) for k, v in dict_iteritems(e)]) self.defaults() # top level list of tuples ('section-name', dict) return "".join(["%s {\n%s}\n"%(n, attributes(p, 1)) for n, p in self]) def __init__(self, name=None, config=Config(), pyinclude=None, wait=True, perform_teardown=True, cl_args=None, expect=Process.RUNNING): """ @param name: name used for for output files, default to id from config. @param config: router configuration @keyword wait: wait for router to be ready (call self.wait_ready()) """ cl_args = cl_args or [] self.config = copy(config) self.perform_teardown = perform_teardown if not name: name = self.config.router_id assert name # setup log and debug dump files self.dumpfile = os.path.abspath('%s-qddebug.txt' % name) self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')] if not default_log: config.append( ('log', {'module':'DEFAULT', 'enable':'trace+', 'includeSource': 'true', 'outputFile':name+'.log'})) args = ['qdrouterd', '-c', config.write(name)] + cl_args env_home = os.environ.get('QPID_DISPATCH_HOME') if pyinclude: args += ['-I', pyinclude] elif env_home: args += ['-I', os.path.join(env_home, 'python')] args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args super(Qdrouterd, self).__init__(args, name=name, expect=expect) self._management = None self._wait_ready = False if wait: self.wait_ready() @property def management(self): """Return a management agent proxy for this router""" if not self._management: self._management = Node.connect(self.addresses[0], timeout=TIMEOUT) return self._management def teardown(self): if self._management: try: self._management.close() except: pass self._management = None if not self.perform_teardown: return super(Qdrouterd, self).teardown() # check router's debug dump file for anything interesting (should be # empty) and dump it to stderr for perusal by organic lifeforms try: if os.stat(self.dumpfile).st_size > 0: with open(self.dumpfile) as f: sys.stderr.write("\nRouter %s debug dump file:\n" % self.config.router_id) sys.stderr.write(f.read()) sys.stderr.flush() except OSError: # failed to open file. This can happen when an individual test # spawns a temporary router (i.e. not created as part of the # TestCase setUpClass method) that gets cleaned up by the test. pass @property def ports_family(self): """ Return a dict of listener ports and the respective port family Example - { 23456: 'IPv4', 243455: 'IPv6' } """ ports_fam = {} for l in self.config.sections('listener'): if l.get('protocolFamily'): ports_fam[l['port']] = l['protocolFamily'] else: ports_fam[l['port']] = 'IPv4' return ports_fam @property def ports(self): """Return list of configured ports for all listeners""" return [l['port'] for l in self.config.sections('listener')] def _cfg_2_host_port(self, c): host = c['host'] port = c['port'] protocol_family = c.get('protocolFamily', 'IPv4') if protocol_family == 'IPv6': return "[%s]:%s" % (host, port) elif protocol_family == 'IPv4': return "%s:%s" % (host, port) raise Exception("Unknown protocol family: %s" % protocol_family) @property def addresses(self): """Return amqp://host:port addresses for all listeners""" cfg = self.config.sections('listener') return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg] @property def connector_addresses(self): """Return list of amqp://host:port for all connectors""" cfg = self.config.sections('connector') return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg] @property def hostports(self): """Return host:port for all listeners""" return [self._cfg_2_host_port(l) for l in self.config.sections('listener')] def is_connected(self, port, host='127.0.0.1'): """If router has a connection to host:port:identity return the management info. Otherwise return None""" try: ret_val = False response = self.management.query(type="org.apache.qpid.dispatch.connection") index_host = response.attribute_names.index('host') for result in response.results: outs = '%s:%s' % (host, port) if result[index_host] == outs: ret_val = True return ret_val except: return False def wait_address(self, address, subscribers=0, remotes=0, containers=0, count=1, **retry_kwargs ): """ Wait for an address to be visible on the router. @keyword subscribers: Wait till subscriberCount >= subscribers @keyword remotes: Wait till remoteCount >= remotes @keyword containers: Wait till containerCount >= remotes @keyword count: Wait until >= count matching addresses are found @param retry_kwargs: keyword args for L{retry} """ def check(): # TODO aconway 2014-06-12: this should be a request by name, not a query. # Need to rationalize addresses in management attributes. # endswith check is because of M0/L/R prefixes addrs = self.management.query( type='org.apache.qpid.dispatch.router.address', attribute_names=[u'name', u'subscriberCount', u'remoteCount', u'containerCount']).get_entities() addrs = [a for a in addrs if a['name'].endswith(address)] return (len(addrs) >= count and addrs[0]['subscriberCount'] >= subscribers and addrs[0]['remoteCount'] >= remotes and addrs[0]['containerCount'] >= containers) assert retry(check, **retry_kwargs) def get_host(self, protocol_family): if protocol_family == 'IPv4': return '127.0.0.1' elif protocol_family == 'IPv6': return '::1' else: return '127.0.0.1' def wait_ports(self, **retry_kwargs): wait_ports(self.ports_family, **retry_kwargs) def wait_connectors(self, **retry_kwargs): """ Wait for all connectors to be connected @param retry_kwargs: keyword args for L{retry} """ for c in self.config.sections('connector'): assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('protocolFamily'))), **retry_kwargs), "Port not connected %s" % c['port'] def wait_ready(self, **retry_kwargs): """Wait for ports and connectors to be ready""" if not self._wait_ready: self._wait_ready = True self.wait_ports(**retry_kwargs) self.wait_connectors(**retry_kwargs) return self def is_router_connected(self, router_id, **retry_kwargs): try: self.management.read(identity="router.node/%s" % router_id) # TODO aconway 2015-01-29: The above check should be enough, we # should not advertise a remote router in managment till it is fully # connected. However we still get a race where the router is not # actually ready for traffic. Investigate. # Meantime the following actually tests send-thru to the router. node = Node.connect(self.addresses[0], router_id, timeout=1) return retry_exception(lambda: node.query('org.apache.qpid.dispatch.router')) except: return False def wait_router_connected(self, router_id, **retry_kwargs): retry(lambda: self.is_router_connected(router_id), **retry_kwargs) class Tester(object): """Tools for use by TestCase - Create a directory for the test. - Utilities to create processes and servers, manage ports etc. - Clean up processes on teardown""" # Top level directory above any Tester directories. # CMake-generated configuration may be found here. top_dir = os.getcwd() # The root directory for Tester directories, under top_dir root_dir = os.path.abspath(__name__+'.dir') def __init__(self, id): """ @param id: module.class.method or False if no directory should be created """ self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None self.cleanup_list = [] def rmtree(self): """Remove old test class results directory""" if self.directory: shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True) def setup(self): """Called from test setup and class setup.""" if self.directory: os.makedirs(self.directory) os.chdir(self.directory) def teardown(self): """Clean up (tear-down, stop or close) objects recorded via cleanup()""" self.cleanup_list.reverse() errors = [] for obj in self.cleanup_list: try: for method in ["teardown", "tearDown", "stop", "close"]: cleanup = getattr(obj, method, None) if cleanup: cleanup() break except Exception as exc: errors.append(exc) if errors: raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors])) def cleanup(self, x): """Record object x for clean-up during tear-down. x should have on of the methods teardown, tearDown, stop or close""" self.cleanup_list.append(x) return x def popen(self, *args, **kwargs): """Start a Process that will be cleaned up on teardown""" return self.cleanup(Process(*args, **kwargs)) def qdrouterd(self, *args, **kwargs): """Return a Qdrouterd that will be cleaned up on teardown""" return self.cleanup(Qdrouterd(*args, **kwargs)) port_range = (20000, 30000) next_port = random.randint(port_range[0], port_range[1]) @classmethod def get_port(cls, protocol_family='IPv4'): """Get an unused port""" def advance(): """Advance with wrap-around""" cls.next_port += 1 if cls.next_port >= cls.port_range[1]: cls.next_port = cls.port_range[0] start = cls.next_port while not port_available(cls.next_port, protocol_family): advance() if cls.next_port == start: raise Exception("No available ports in range %s", cls.port_range) p = cls.next_port advance() return p class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods """A TestCase that sets up its own working directory and is also a Tester.""" def __init__(self, test_method): unittest.TestCase.__init__(self, test_method) Tester.__init__(self, self.id()) @classmethod def setUpClass(cls): cls.maxDiff = None cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass'])) cls.tester.rmtree() cls.tester.setup() @classmethod def tearDownClass(cls): if hasattr(cls, 'tester'): cls.tester.teardown() del cls.tester def setUp(self): # Python < 2.7 will call setUp on the system_test.TestCase class # itself as well as the subclasses. Ignore that. if self.__class__ is TestCase: return # Hack to support setUpClass on older python. # If the class has not already been set up, do it now. if not hasattr(self.__class__, 'tester'): try: self.setUpClass() except: if hasattr(self.__class__, 'tester'): self.__class__.tester.teardown() raise Tester.setup(self) def tearDown(self): # Python < 2.7 will call tearDown on the system_test.TestCase class # itself as well as the subclasses. Ignore that. if self.__class__ is TestCase: return Tester.teardown(self) # Hack to support tearDownClass on older versions of python. if hasattr(self.__class__, '_tear_down_class'): self.tearDownClass() def skipTest(self, reason): """Workaround missing unittest.TestCase.skipTest in python 2.6. The caller must return in order to end the test""" if hasattr(unittest.TestCase, 'skipTest'): unittest.TestCase.skipTest(self, reason) else: print("Skipping test %s: %s" % (self.id(), reason)) # Hack to support tearDownClass on older versions of python. # The default TestLoader sorts tests alphabetically so we insert # a fake tests that will run last to call tearDownClass. # NOTE: definitely not safe for a parallel test-runner. if not hasattr(unittest.TestCase, 'tearDownClass'): def test_zzzz_teardown_class(self): """Fake test to call tearDownClass""" if self.__class__ is not TestCase: self.__class__._tear_down_class = True def assert_fair(self, seq): avg = sum(seq)/len(seq) for i in seq: assert i > avg/2, "Work not fairly distributed: %s"%seq def assertIn(self, item, items): assert item in items, "%s not in %s" % (item, items) if not hasattr(unittest.TestCase, 'assertRegexpMatches'): def assertRegexpMatches(self, text, regexp, msg=None): """For python < 2.7: assert re.search(regexp, text)""" assert re.search(regexp, text), msg or "Can't find %r in '%s'" %(regexp, text) class SkipIfNeeded(object): """ Decorator class that can be used along with test methods to provide skip test behavior when using both python2.6 or a greater version. This decorator can be used in test methods and a boolean condition must be provided (skip parameter) to define whether or not the test will be skipped. """ def __init__(self, skip, reason): """ :param skip: if True the method wont be called :param reason: reason why test was skipped """ self.skip = skip self.reason = reason def __call__(self, f): def wrap(*args, **kwargs): """ Wraps original test method's invocation and dictates whether or not the test will be executed based on value (boolean) of the skip parameter. When running test with python < 2.7, if the "skip" parameter is true, the original method won't be called. If running python >= 2.7, then skipTest will be called with given "reason" and original method will be invoked. :param args: :return: """ instance = args[0] if self.skip: if sys.version_info < (2, 7): print("%s -> skipping (python<2.7) [%s] ..." % (f.__name__, self.reason)) return else: instance.skipTest(self.reason) return f(*args, **kwargs) return wrap def main_module(): """ Return the module name of the __main__ module - i.e. the filename with the path and .py extension stripped. Useful to run the tests in the current file but using the proper module prefix instead of '__main__', as follows: if __name__ == '__main__': unittest.main(module=main_module()) """ return os.path.splitext(os.path.basename(__main__.__file__))[0] class AsyncTestReceiver(MessagingHandler): """ A simple receiver that runs in the background and queues any received messages. Messages can be retrieved from this thread via the queue member. :param wait: block the constructor until the link has been fully established. :param recover_link: restart on remote link detach """ Empty = Queue.Empty def __init__(self, address, source, conn_args=None, container_id=None, wait=True, recover_link=False, msg_args={}): super(AsyncTestReceiver, self).__init__(**msg_args) self.address = address self.source = source self.conn_args = conn_args self.queue = Queue.Queue() self._conn = None self._container = Container(self) cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4()) self._container.container_id = cid self._ready = Event() self._recover_link = recover_link self._recover_count = 0 self._stop_thread = False self._thread = Thread(target=self._main) self._thread.daemon = True self._thread.start() if wait and self._ready.wait(timeout=TIMEOUT) is False: raise Exception("Timed out waiting for receiver start") def _main(self): self._container.timeout = 5.0 self._container.start() while self._container.process(): if self._stop_thread: if self._conn: self._conn.close() self._conn = None def stop(self, timeout=TIMEOUT): self._stop_thread = True self._container.wakeup() self._thread.join(timeout=TIMEOUT) if self._thread.is_alive(): raise Exception("AsyncTestReceiver did not exit") def on_start(self, event): kwargs = {'url': self.address} if self.conn_args: kwargs.update(self.conn_args) self._conn = event.container.connect(**kwargs) def on_connection_opened(self, event): kwargs = {'source': self.source} rcv = event.container.create_receiver(event.connection, **kwargs) def on_link_opened(self, event): self._ready.set() def on_link_closing(self, event): event.link.close() if self._recover_link and not self._stop_thread: # lesson learned: the generated link name will be the same as the # old link (which is bad) so we specify a new one self._recover_count += 1 kwargs = {'source': self.source, 'name': "%s:%s" % (event.link.name, self._recover_count)} rcv = event.container.create_receiver(event.connection, **kwargs) def on_message(self, event): self.queue.put(event.message) def on_disconnected(self, event): # if remote terminates the connection kill the thread else it will spin # on the cpu if self._conn: self._conn.close() self._conn = None class AsyncTestSender(MessagingHandler): """ A simple sender that runs in the background and sends 'count' messages to a given target. """ class TestSenderException(Exception): def __init__(self, error=None): super(AsyncTestSender.TestSenderException, self).__init__(error) def __init__(self, address, target, count=1, message=None, container_id=None, presettle=False): super(AsyncTestSender, self).__init__(auto_accept=False, auto_settle=False) self.address = address self.target = target self.total = count self.presettle = presettle self.accepted = 0 self.released = 0 self.modified = 0 self.rejected = 0 self.sent = 0 self.error = None self.link_stats = None self._message = message or Message(body="test") self._container = Container(self) cid = container_id or "ATS-%s:%s" % (target, uuid.uuid4()) self._container.container_id = cid self._link_name = "%s-%s" % (cid, "tx") self._thread = Thread(target=self._main) self._thread.daemon = True self._thread.start() def _main(self): self._container.timeout = 5.0 self._container.start() while self._container.process(): self._check_if_done() def wait(self): # don't stop it - wait until everything is sent self._thread.join(timeout=TIMEOUT) assert not self._thread.is_alive(), "sender did not complete" if self.error: raise AsyncTestSender.TestSenderException(self.error) def on_start(self, event): self._conn = self._container.connect(self.address) def on_connection_opened(self, event): option = AtMostOnce if self.presettle else AtLeastOnce self._sender = self._container.create_sender(self._conn, target=self.target, options=option(), name=self._link_name) def on_sendable(self, event): if self.sent < self.total: self._sender.send(self._message) self.sent += 1 def _check_if_done(self): done = (self.sent == self.total and (self.presettle or (self.accepted + self.released + self.modified + self.rejected == self.sent))) if done and self._conn: self.link_stats = get_link_info(self._link_name, self.address) self._conn.close() self._conn = None def on_accepted(self, event): self.accepted += 1; event.delivery.settle() def on_released(self, event): # for some reason Proton 'helpfully' calls on_released even though the # delivery state is actually MODIFIED if event.delivery.remote_state == Delivery.MODIFIED: return self.on_modified(event) self.released += 1 event.delivery.settle() def on_modified(self, event): self.modified += 1 event.delivery.settle() def on_rejected(self, event): self.rejected += 1 event.delivery.settle() def on_link_error(self, event): self.error = "link error:%s" % str(event.link.remote_condition) if self._conn: self._conn.close() self._conn = None def on_disconnected(self, event): # if remote terminates the connection kill the thread else it will spin # on the cpu self.error = "connection to remote dropped" if self._conn: self._conn.close() self._conn = None class QdManager(object): """ A means to invoke qdmanage during a testcase """ def __init__(self, tester=None, address=None, timeout=TIMEOUT, router_id=None, edge_router_id=None): # 'tester' - can be 'self' when called in a test, # or an instance any class derived from Process (like Qdrouterd) self._tester = tester or Tester(None) self._timeout = timeout self._address = address self.router_id = router_id self.edge_router_id = edge_router_id self.router = [] if self.router_id: self.router = self.router + ['--router', self.router_id] elif self.edge_router_id: self.router = self.router + ['--edge-router', self.edge_router_id] def __call__(self, cmd, address=None, input=None, expect=Process.EXIT_OK, timeout=None): assert address or self._address, "address missing" p = self._tester.popen( ['qdmanage'] + cmd.split(' ') + self.router + ['--bus', address or self._address, '--indent=-1', '--timeout', str(timeout or self._timeout)], stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect, universal_newlines=True) out = p.communicate(input)[0] try: p.teardown() except Exception as e: raise Exception("%s\n%s" % (e, out)) return out def create(self, long_type, kwargs): cmd = "CREATE --type=%s" % long_type for k, v in kwargs.items(): cmd += " %s=%s" % (k, v) return json.loads(self(cmd)) def update(self, long_type, kwargs, name=None, identity=None): cmd = 'UPDATE --type=%s' % long_type if identity is not None: cmd += " --identity=%s" % identity elif name is not None: cmd += " --name=%s" % name for k, v in kwargs.items(): cmd += " %s=%s" % (k, v) return json.loads(self(cmd)) def delete(self, long_type, name=None, identity=None): cmd = 'DELETE --type=%s' % long_type if identity is not None: cmd += " --identity=%s" % identity elif name is not None: cmd += " --name=%s" % name else: assert False, "name or identity not supplied!" self(cmd) def query(self, long_type): return json.loads(self('QUERY --type=%s' % long_type)) def get_log(self, limit=None): cmd = 'GET-LOG' if (limit): cmd += " limit=%s" % limit return json.loads(self(cmd)) class MgmtMsgProxy(object): """ Utility for creating and inspecting management messages """ class _Response(object): def __init__(self, status_code, status_description, body): self.status_code = status_code self.status_description = status_description if body.__class__ == dict and len(body.keys()) == 2 and 'attributeNames' in body.keys() and 'results' in body.keys(): results = [] names = body['attributeNames'] for result in body['results']: result_map = {} for i in range(len(names)): result_map[names[i]] = result[i] results.append(MgmtMsgProxy._Response(status_code, status_description, result_map)) self.attrs = {'results': results} else: self.attrs = body def __getattr__(self, key): return self.attrs[key] def __init__(self, reply_addr): self.reply_addr = reply_addr def response(self, msg): ap = msg.properties return self._Response(ap['statusCode'], ap['statusDescription'], msg.body) def query_router(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router'} return Message(properties=ap, reply_to=self.reply_addr) def query_connections(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.connection'} return Message(properties=ap, reply_to=self.reply_addr) def query_links(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.link'} return Message(properties=ap, reply_to=self.reply_addr) def query_link_routes(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.config.linkRoute'} return Message(properties=ap, reply_to=self.reply_addr) def query_addresses(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.address'} return Message(properties=ap, reply_to=self.reply_addr) def create_link_route(self, name, kwargs): ap = {'operation': 'CREATE', 'type': 'org.apache.qpid.dispatch.router.config.linkRoute', 'name': name} return Message(properties=ap, reply_to=self.reply_addr, body=kwargs) def delete_link_route(self, name): ap = {'operation': 'DELETE', 'type': 'org.apache.qpid.dispatch.router.config.linkRoute', 'name': name} return Message(properties=ap, reply_to=self.reply_addr) def create_connector(self, name, **kwargs): ap = {'operation': 'CREATE', 'type': 'org.apache.qpid.dispatch.connector', 'name': name} return Message(properties=ap, reply_to=self.reply_addr, body=kwargs) def delete_connector(self, name): ap = {'operation': 'DELETE', 'type': 'org.apache.qpid.dispatch.connector', 'name': name} return Message(properties=ap, reply_to=self.reply_addr) def query_conn_link_routes(self): ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.connection.linkRoute'} return Message(properties=ap, reply_to=self.reply_addr) def create_conn_link_route(self, name, kwargs): ap = {'operation': 'CREATE', 'type': 'org.apache.qpid.dispatch.router.connection.linkRoute', 'name': name} return Message(properties=ap, reply_to=self.reply_addr, body=kwargs) def delete_conn_link_route(self, name): ap = {'operation': 'DELETE', 'type': 'org.apache.qpid.dispatch.router.connection.linkRoute', 'name': name} return Message(properties=ap, reply_to=self.reply_addr) def read_conn_link_route(self, name): ap = {'operation': 'READ', 'type': 'org.apache.qpid.dispatch.router.connection.linkRoute', 'name': name} return Message(properties=ap, reply_to=self.reply_addr) class TestTimeout(object): """ A callback object for MessagingHandler class parent: A MessagingHandler with a timeout() method """ def __init__(self, parent): self.parent = parent def on_timer_task(self, event): self.parent.timeout() class PollTimeout(object): """ A callback object for MessagingHandler scheduled timers parent: A MessagingHandler with a poll_timeout() method """ def __init__(self, parent): self.parent = parent def on_timer_task(self, event): self.parent.poll_timeout() def get_link_info(name, address): """ Query the router at address for the status and statistics of the named link """ qdm = QdManager(address=address) rc = qdm.query('org.apache.qpid.dispatch.router.link') for item in rc: if item.get('name') == name: return item return None def has_mobile_dest_in_address_table(address, dest): qdm = QdManager(address=address) rc = qdm.query('org.apache.qpid.dispatch.router.address') has_dest = False for item in rc: if dest in item.get("name"): has_dest = True break return has_dest def get_inter_router_links(address): """ Return a list of all links with type="inter-router :param address: """ inter_router_links = [] qdm = QdManager(address=address) rc = qdm.query('org.apache.qpid.dispatch.router.link') for item in rc: if item.get("linkType") == "inter-router": inter_router_links.append(item) return inter_router_links class Timestamp(object): """ Time stamps for logging. """ def __init__(self): self.ts = datetime.now() def __str__(self): return self.ts.strftime("%Y-%m-%d %H:%M:%S.%f") class Logger(object): """ Record an event log for a self test. May print per-event or save events to be printed later. """ def __init__(self, title="Logger", print_to_console=False, save_for_dump=True): self.title = title self.print_to_console = print_to_console self.save_for_dump = save_for_dump self.logs = [] def log(self, msg): ts = Timestamp() if self.save_for_dump: self.logs.append( (ts, msg) ) if self.print_to_console: print("%s %s" % (ts, msg)) sys.stdout.flush() def dump(self): print(self) sys.stdout.flush() def __str__(self): lines = [] lines.append(self.title) for ts, msg in self.logs: lines.append("%s %s" % (ts, msg)) res = str('\n'.join(lines)) return res
[]
[]
[ "QPID_DISPATCH_HOME", "PATH", "QPID_SYSTEM_TEST_TIMEOUT", "QPID_DISPATCH_RUNNER" ]
[]
["QPID_DISPATCH_HOME", "PATH", "QPID_SYSTEM_TEST_TIMEOUT", "QPID_DISPATCH_RUNNER"]
python
4
0
server/test/thred_multi.py
from threading import Thread
[]
[]
[]
[]
[]
python
null
null
null
test/backward_compatibility/try_hdfs.py
# BEGIN_COPYRIGHT # # Copyright 2009-2015 CRS4. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # END_COPYRIGHT """ Check that resetting the hdfs module after changing os.environ['HADOOP_CONF_DIR'] works (i.e., Pydoop references the correct HDFS service). Note that it does **NOT** work if you've already instantiated an hdfs handle, and this is NOT due to the caching system. """ import sys import os import argparse import pydoop.hdfs as hdfs def dump_status(fs): print "(host, port, user) = %r" % ((fs.host, fs.port, fs.user),) print "_CACHE = %r" % (fs._CACHE,) print "_ALIASES = %r" % (fs._ALIASES,) print def main(argv=sys.argv[1:]): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--conf-dir", metavar="HADOOP_CONF_DIR") args = parser.parse_args(argv) if args.conf_dir: os.environ["HADOOP_CONF_DIR"] = os.path.abspath(args.conf_dir) hdfs.reset() fs = hdfs.hdfs() print "--- OPEN ---" dump_status(fs) print "cwd:", fs.working_directory() print fs.close() print "--- CLOSED ---" dump_status(fs) if __name__ == "__main__": main()
[]
[]
[ "HADOOP_CONF_DIR" ]
[]
["HADOOP_CONF_DIR"]
python
1
0
nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from operator import itemgetter from typing import Any, Dict, Optional import torch import torch.nn as nn from omegaconf.dictconfig import DictConfig from pytorch_lightning.trainer.trainer import Trainer from nemo.collections.nlp.data.language_modeling.megatron.data_samplers import ( MegatronPretrainingRandomSampler, MegatronPretrainingSampler, ) from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel from nemo.collections.nlp.modules.common.megatron.clip_grads import clip_grad_norm_fp32 from nemo.collections.nlp.modules.common.megatron.token_level_encoder_decoder import ( MegatronTokenLevelEncoderDecoderModule, ) from nemo.collections.nlp.modules.common.megatron.utils import average_losses_across_data_parallel_group from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer from nemo.utils import AppState, logging try: from apex.transformer import parallel_state, tensor_parallel HAVE_APEX = True except (ImportError, ModuleNotFoundError): HAVE_APEX = False __all__ = ["MegatronLMEncoderDecoderModel"] class MegatronLMEncoderDecoderModel(MegatronBaseModel): """ Megatron encoder-decoder base class """ def __init__(self, cfg: DictConfig, trainer: Trainer): super().__init__(cfg, trainer=trainer) # build tokenizer (defaults to nemo supported tokenizers) self._build_tokenizer() # manipulate vocabulary (e.g., pad vocabulary for better efficiency) self._build_vocab() # TODO: create get_encoder_decoder_model()here for different losses (e..g, nll, vae, mim) self.enc_dec_model = MegatronTokenLevelEncoderDecoderModule( encoder_arch=cfg.encoder_arch, decoder_arch=cfg.decoder_arch, vocab_size=self.padded_vocab_size, hidden_size=cfg.hidden_size, max_position_embeddings=cfg.max_position_embeddings, num_layers=cfg.num_layers, num_attention_heads=cfg.num_attention_heads, apply_query_key_layer_scaling=cfg.get('apply_query_key_layer_scaling', True), kv_channels=cfg.get('kv_channels', None), ffn_hidden_size=cfg.ffn_hidden_size, num_tokentypes=0, parallel_output=True, pre_process=cfg.get('pre_process', True), post_process=cfg.get('post_process', True), init_method_std=cfg.get('init_method_std', 0.02), fp16_cross_entropy=cfg.get('fp16_lm_cross_entropy', False), use_cpu_initialization=cfg.get('use_cpu_initialization', False), hidden_dropout=cfg.get('hidden_dropout', 0.1), precision=cfg.get('precision', 16), fp32_residual_connection=cfg.get('fp32_residual_connection', False), activations_checkpoint_method=cfg.get('activations_checkpoint_method', None), activations_checkpoint_num_layers=cfg.get('activations_checkpoint_num_layers', 1), layernorm_epsilon=cfg.get('layernorm_epsilon', 1e-5), persist_layer_norm=cfg.get('persist_layer_norm', False), bias_gelu_fusion=True, onnx_safe=cfg.get('onnx_safe', False), ) def _build_tokenizer(self): """ Default tokenizer is based on available nemo tokenizers. Override this method to use an external tokenizer. All tokenizers are expected to provide compatible interface. Override default Encoder-decoder tokenizer to use legacy=True for sentencepiece. """ self.tokenizer = get_nmt_tokenizer( library=self._cfg.tokenizer.library, model_name=self._cfg.tokenizer.type, tokenizer_model=self.register_artifact("tokenizer_model", self._cfg.tokenizer.model), vocab_file=self.register_artifact("vocab_file", self._cfg.tokenizer.vocab_file), merges_file=self.register_artifact("merges_file", self._cfg.tokenizer.merge_file), legacy=True if self._cfg.tokenizer.library == 'sentencepiece' else False, ) def _build_vocab(self): """ Manipulate vocabulary (e.g., pad vocabulary for increased performance)/ """ # TODO: add config to allow to disable it? self.padded_vocab_size = self._vocab_size_with_padding( orig_vocab_size=self.tokenizer.vocab_size, make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128), tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1), ) def forward( self, encoder_input_ids, decoder_input_ids, encoder_attn_mask, decoder_attn_mask, tokentype_ids=None, lm_labels=None, enc_hidden_states=None, output_enc_hidden_only=False, ): ret_dict = self.enc_dec_model( enc_input_ids=encoder_input_ids, dec_input_ids=decoder_input_ids, enc_attn_mask=encoder_attn_mask, dec_attn_mask=decoder_attn_mask, tokentype_ids=tokentype_ids, labels=lm_labels, enc_hidden_states=enc_hidden_states, output_enc_hidden_only=output_enc_hidden_only, ) return ret_dict def training_step(self, batch, batch_idx): tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask = self.process_batch(batch) tokens_loss = itemgetter("tokens_loss")( self(tokens_enc, tokens_dec, enc_mask, dec_mask, tokentype_ids=None, lm_labels=labels,) ) loss = self.loss_func(loss_mask, tokens_loss) self.log('train_loss', loss) # Reduced loss for logging. This averages the loss across all workers unlike "loss" above which is specific to a DDP rank. reduced_loss = average_losses_across_data_parallel_group([loss]) # cache reduced loss while accumulating gradients self._reduced_loss_buffer.append(reduced_loss[0]) if (batch_idx + 1) % self.trainer.accumulate_grad_batches == 0: # Reduced loss for logging. average_reduced_loss = sum(self._reduced_loss_buffer) / len(self._reduced_loss_buffer) self.log('reduced_train_loss', average_reduced_loss, prog_bar=True) lr = self._optimizer.param_groups[0]['lr'] self.log('lr', lr) self.log('global_step', self.trainer.global_step, prog_bar=True) self.log('consumed_samples', self.compute_consumed_samples(self.trainer.global_step), prog_bar=True) self._reduced_loss_buffer = [] return loss def validation_step(self, batch, batch_idx): tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask = self.process_batch(batch) tokens_loss = itemgetter("tokens_loss")( self(tokens_enc, tokens_dec, enc_mask, dec_mask, tokentype_ids=None, lm_labels=labels,) ) loss = self.loss_func(loss_mask, tokens_loss) reduced_loss = average_losses_across_data_parallel_group([loss]) return reduced_loss def validation_epoch_end(self, outputs): averaged_loss = average_losses_across_data_parallel_group(outputs) self.log('val_loss', averaged_loss[0], prog_bar=True) self.log('consumed_samples', self.compute_consumed_samples(self.trainer.global_step)) def test_step(self, batch, batch_idx): return self.validation_step(batch, batch_idx) def test_epoch_end(self, outputs): averaged_loss = average_losses_across_data_parallel_group(outputs) logging.info(f'test_loss: {averaged_loss[0]}') def loss_func(self, loss_mask, tokens_loss): """ This function takes as input per-token loss and masks non-required values. """ losses = tokens_loss.view(-1).float() loss_mask = loss_mask.view(-1).float() # TODO: add nemo version here loss = torch.sum(losses * loss_mask) / loss_mask.sum() # sequence level nll return loss def process_batch(self, batch): """Build the batch.""" keys = ['text_enc', 'text_dec', 'labels', 'loss_mask', 'enc_mask', 'dec_mask'] datatype = torch.int64 data = batch data_b = tensor_parallel.broadcast_data(keys, data, datatype) # Unpack. tokens_enc = data_b['text_enc'].long() tokens_dec = data_b['text_dec'].long() labels = data_b['labels'].long() loss_mask = data_b['loss_mask'].float() enc_mask = data_b['enc_mask'] dec_mask = data_b['dec_mask'] return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask def build_train_valid_test_datasets(self): raise NotImplementedError("Please implement this method in child-class") def build_pretraining_data_loader(self, dataset, consumed_samples): """Buld dataloader given an input dataset.""" if dataset is None: return None # Megatron sampler if self._cfg.data.dataloader_type == 'single': batch_sampler = MegatronPretrainingSampler( total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=self._cfg.micro_batch_size, data_parallel_rank=parallel_state.get_data_parallel_rank(), data_parallel_size=parallel_state.get_data_parallel_world_size(), ) elif self._cfg.data.dataloader_type == 'cyclic': batch_sampler = MegatronPretrainingRandomSampler( total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=self._cfg.micro_batch_size, data_parallel_rank=parallel_state.get_data_parallel_rank(), data_parallel_size=parallel_state.get_data_parallel_world_size(), ) else: raise Exception('{} dataloader type is not supported.'.format(self._cfg.dataloader_type)) # Torch dataloader. return torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, num_workers=self._cfg.data.num_workers, pin_memory=True, ) def setup(self, stage=None): """A PTL method to setup the training, validation and test datasets.""" if stage == 'predict': return if self._train_dl is not None and self._validation_dl is not None: return self.build_train_valid_test_datasets() self.setup_training_data(self._cfg.data) self.setup_validation_data(self._cfg.data) self.setup_test_data(self._cfg.data) def setup_training_data(self, cfg): if hasattr(self, '_train_ds'): resume_checkpoint_path = self.trainer.checkpoint_connector.resume_checkpoint_path if resume_checkpoint_path: consumed_samples = int( float(re.findall(r"consumed_samples\=([0-9]+.[0-9]+)", resume_checkpoint_path)[0]) ) else: consumed_samples = 0 self._train_dl = self.build_pretraining_data_loader(self._train_ds, consumed_samples) def setup_validation_data(self, cfg): if hasattr(self, '_validation_ds'): consumed_samples = 0 self._validation_dl = self.build_pretraining_data_loader(self._validation_ds, consumed_samples) def setup_test_data(self, cfg): if hasattr(self, '_test_ds'): consumed_samples = 0 self._test_dl = self.build_pretraining_data_loader(self._test_ds, consumed_samples) def compute_consumed_samples(self, global_step): app_state = AppState() consumed_samples = ( global_step * app_state.data_parallel_size * self._cfg.micro_batch_size * self.trainer.accumulate_grad_batches ) return int(consumed_samples) def configure_gradient_clipping(self, *args, **kwargs): """PTL hook to configure gradients. We use gradient clipping implementation from megatron-lm. """ clip_val = self.trainer.gradient_clip_val if clip_val is None: return clip_val = float(clip_val) if clip_val <= 0: return parameters = self.enc_dec_model.parameters() clip_grad_norm_fp32(parameters=parameters, max_norm=clip_val) def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any: request = batch response = self.complete(request) logging.info(f"response: {response}") return response def decode(self, tokens_enc, enc_mask, num_tokens_to_generate): # TODO: move method into a class inside MegatronTokenLevelEncoderDecoderModule (?) encoder_hidden_states = itemgetter("enc_output")( self( encoder_input_ids=tokens_enc, decoder_input_ids=None, encoder_attn_mask=enc_mask, decoder_attn_mask=None, tokentype_ids=None, lm_labels=None, enc_hidden_states=None, output_enc_hidden_only=True, ) ) predicted_tokens_dec = torch.LongTensor([self.tokenizer.bos_id]).unsqueeze(0).to(tokens_enc.device) for _ in range(num_tokens_to_generate): dec_mask = predicted_tokens_dec != self.tokenizer.pad_id token_logits = itemgetter("token_logits")( self( encoder_input_ids=tokens_enc, decoder_input_ids=predicted_tokens_dec, encoder_attn_mask=enc_mask, decoder_attn_mask=dec_mask, tokentype_ids=None, lm_labels=None, enc_hidden_states=encoder_hidden_states, output_enc_hidden_only=False, ) ) token_logits = tensor_parallel.gather_from_tensor_model_parallel_region(token_logits) # FIXME: already log softmax? log_probs, token_ids = torch.max(nn.functional.log_softmax(token_logits, dim=-1), dim=-1) predicted_tokens_dec = torch.cat([predicted_tokens_dec, token_ids[:, -1].unsqueeze(1)], 1) if token_ids[:, -1] == self.tokenizer.eos_id: break return predicted_tokens_dec, log_probs def complete(self, request: Dict): """ Autoregressively invokes language model in the inference mode Args: request: Dictionary with the following fields * prompt: a string which text the model should complete. * tokens_to_generate: how many tokens to generate while doing prompt completion. Returns: response: A python dictionary with the following fields * prompt: original text of the prompt * tokenized_prompt: list of (str) tokens from prompt * completion: a python dictionary with the following subfields: * tokens: a list of triples (token, token_id, log_prob) comprising completion * text: completion text (as a single string) """ response = {} self.freeze() # naive greedy slow loop # TODO: add option for BeamSearchDecoder response['prompt'] = request['prompt'][0] response['completion'] = {} tokens_enc = request['masked_sample'] response['masked_input'] = ' '.join(self.tokenizer.ids_to_tokens(tokens_enc[0])) enc_mask = tokens_enc != self.tokenizer.pad_id enc_mask = enc_mask < 0.5 predicted_tokens_ids, log_probs = self.decode(tokens_enc, enc_mask, int(request['tokens_to_generate'])) predicted_tokens_ids = predicted_tokens_ids.cpu().numpy()[0].tolist() log_probs = log_probs.cpu().numpy()[0].tolist() if self.tokenizer.eos_id in predicted_tokens_ids: idx = predicted_tokens_ids.index(self.tokenizer.eos_id) predicted_tokens_ids = predicted_tokens_ids[:idx] else: predicted_tokens_ids = [id for id in predicted_tokens_ids if id != self.tokenizer.pad_id] predicted_tokens_dec = self.tokenizer.ids_to_tokens(predicted_tokens_ids) response['completion']['text'] = self.tokenizer.tokens_to_text(predicted_tokens_dec) response['completion']['tokens'] = list(zip(predicted_tokens_ids, predicted_tokens_dec, log_probs)) self.unfreeze() return response def _vocab_size_with_padding(self, orig_vocab_size, make_vocab_size_divisible_by, tensor_model_parallel_size): """Pad vocab size so it is divisible by model parallel size and still having GPU friendly size.""" after = orig_vocab_size multiple = make_vocab_size_divisible_by * tensor_model_parallel_size while (after % multiple) != 0: after += 1 logging.info( f'Padded vocab_size: {after}, original vocab_size: {orig_vocab_size}, dummy tokens: {after - orig_vocab_size}.' ) return after def _enable_nvidia_optimizations(self): "These optimizations are present in NVIDIA NGC PyTorch Containers" # Version check nvidia_torch_version = os.getenv('NVIDIA_PYTORCH_VERSION', None) if nvidia_torch_version is not None: NVIDIA_TORCH_MAJOR = int(nvidia_torch_version.split('.')[0]) NVIDIA_TORCH_MINOR = int(nvidia_torch_version.split('.')[1]) # Apex Persistent layer norm is supported from Nvidia PyTorch container v21.11 if NVIDIA_TORCH_MAJOR < 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR < 11): self._cfg.persist_layer_norm = False if NVIDIA_TORCH_MAJOR >= 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR >= 11): # NVFUSER torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(True) torch._C._debug_set_autodiff_subgraph_inlining(False) else: # Not a Nvidia container. Dependency check is on users pass def list_available_models(self): pass
[]
[]
[ "NVIDIA_PYTORCH_VERSION" ]
[]
["NVIDIA_PYTORCH_VERSION"]
python
1
0
pkg/adapter/runtime.go
// +build !remoteclient package adapter import ( "bufio" "context" "io" "io/ioutil" "os" "text/template" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/buildah" "github.com/containers/buildah/imagebuildah" "github.com/containers/buildah/pkg/parse" "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" "k8s.io/api/core/v1" ) // LocalRuntime describes a typical libpod runtime type LocalRuntime struct { *libpod.Runtime Remote bool } // ContainerImage ... type ContainerImage struct { *image.Image } // Container ... type Container struct { *libpod.Container } // Pod encapsulates the libpod.Pod structure, helps with remote vs. local type Pod struct { *libpod.Pod } // Volume ... type Volume struct { *libpod.Volume } // VolumeFilter is for filtering volumes on the client type VolumeFilter func(*Volume) bool // GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) { runtime, err := libpodruntime.GetRuntime(ctx, c) if err != nil { return nil, err } return &LocalRuntime{ Runtime: runtime, }, nil } // GetImages returns a slice of images in containerimages func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) { var containerImages []*ContainerImage images, err := r.Runtime.ImageRuntime().GetImages() if err != nil { return nil, err } for _, i := range images { containerImages = append(containerImages, &ContainerImage{i}) } return containerImages, nil } // NewImageFromLocal returns a containerimage representation of a image from local storage func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) { img, err := r.Runtime.ImageRuntime().NewFromLocal(name) if err != nil { return nil, err } return &ContainerImage{img}, nil } // LoadFromArchiveReference calls into local storage to load an image from an archive func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) { var containerImages []*ContainerImage imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer) if err != nil { return nil, err } for _, i := range imgs { ci := ContainerImage{i} containerImages = append(containerImages, &ci) } return containerImages, nil } // New calls into local storage to look for an image in local storage or to pull it func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) { img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label) if err != nil { return nil, err } return &ContainerImage{img}, nil } // RemoveImage calls into local storage and removes an image func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) { return r.Runtime.RemoveImage(ctx, img.Image, force) } // PruneImages is wrapper into PruneImages within the image pkg func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) { return r.ImageRuntime().PruneImages(ctx, all) } // Export is a wrapper to container export to a tarfile func (r *LocalRuntime) Export(name string, path string) error { ctr, err := r.Runtime.LookupContainer(name) if err != nil { return errors.Wrapf(err, "error looking up container %q", name) } return ctr.Export(path) } // Import is a wrapper to import a container image func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) { return r.Runtime.Import(ctx, source, reference, changes, history, quiet) } // CreateVolume is a wrapper to create volumes func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) { var ( options []libpod.VolumeCreateOption volName string ) if len(c.InputArgs) > 0 { volName = c.InputArgs[0] options = append(options, libpod.WithVolumeName(volName)) } if c.Flag("driver").Changed { options = append(options, libpod.WithVolumeDriver(c.Driver)) } if len(labels) != 0 { options = append(options, libpod.WithVolumeLabels(labels)) } if len(options) != 0 { options = append(options, libpod.WithVolumeOptions(opts)) } newVolume, err := r.NewVolume(ctx, options...) if err != nil { return "", err } return newVolume.Name(), nil } // RemoveVolumes is a wrapper to remove volumes func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) { return r.Runtime.RemoveVolumes(ctx, c.InputArgs, c.All, c.Force) } // Push is a wrapper to push an image to a registry func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { newImage, err := r.ImageRuntime().NewFromLocal(srcName) if err != nil { return err } return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil) } // InspectVolumes returns a slice of volumes based on an arg list or --all func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) { var ( volumes []*libpod.Volume err error ) if c.All { volumes, err = r.GetAllVolumes() } else { for _, v := range c.InputArgs { vol, err := r.GetVolume(v) if err != nil { return nil, err } volumes = append(volumes, vol) } } if err != nil { return nil, err } return libpodVolumeToVolume(volumes), nil } // Volumes returns a slice of localruntime volumes func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) { vols, err := r.GetAllVolumes() if err != nil { return nil, err } return libpodVolumeToVolume(vols), nil } // libpodVolumeToVolume converts a slice of libpod volumes to a slice // of localruntime volumes (same as libpod) func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume { var vols []*Volume for _, v := range volumes { newVol := Volume{ v, } vols = append(vols, &newVol) } return vols } // Build is the wrapper to build images func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error { namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c.PodmanCommand.Command) if err != nil { return errors.Wrapf(err, "error parsing namespace-related options") } usernsOption, idmappingOptions, err := parse.IDMappingOptions(c.PodmanCommand.Command, options.Isolation) if err != nil { return errors.Wrapf(err, "error parsing ID mapping options") } namespaceOptions.AddOrReplace(usernsOption...) systemContext, err := parse.SystemContextFromOptions(c.PodmanCommand.Command) if err != nil { return errors.Wrapf(err, "error building system context") } authfile := c.Authfile if len(c.Authfile) == 0 { authfile = os.Getenv("REGISTRY_AUTH_FILE") } systemContext.AuthFilePath = authfile commonOpts, err := parse.CommonBuildOptions(c.PodmanCommand.Command) if err != nil { return err } options.NamespaceOptions = namespaceOptions options.ConfigureNetwork = networkPolicy options.IDMappingOptions = idmappingOptions options.CommonBuildOpts = commonOpts options.SystemContext = systemContext if c.Flag("runtime").Changed { options.Runtime = r.GetOCIRuntimePath() } if c.Quiet { options.ReportWriter = ioutil.Discard } if rootless.IsRootless() { options.Isolation = buildah.IsolationOCIRootless } return r.Runtime.Build(ctx, options, dockerfiles...) } // PruneVolumes is a wrapper function for libpod PruneVolumes func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) { return r.Runtime.PruneVolumes(ctx) } // SaveImage is a wrapper function for saving an image to the local filesystem func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error { source := c.InputArgs[0] additionalTags := c.InputArgs[1:] newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source) if err != nil { return err } return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress) } // LoadImage is a wrapper function for libpod PruneVolumes func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) { var ( writer io.Writer ) if !cli.Quiet { writer = os.Stderr } return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy) } // IsImageNotFound checks if the error indicates that no image was found. func IsImageNotFound(err error) bool { if errors.Cause(err) == image.ErrNoSuchImage { return true } return false } // HealthCheck is a wrapper to same named function in libpod func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) { return r.Runtime.HealthCheck(c.InputArgs[0]) } // Events is a wrapper to libpod to obtain libpod/podman events func (r *LocalRuntime) Events(c *cliconfig.EventValues) error { var ( fromStart bool eventsError error ) tmpl, err := template.New("events").Parse(c.Format) if err != nil { return err } if len(c.Since) > 0 || len(c.Until) > 0 { fromStart = true } eventChannel := make(chan *events.Event) go func() { readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until} eventsError = r.Runtime.Events(readOpts) }() if eventsError != nil { return eventsError } if err != nil { return errors.Wrapf(err, "unable to tail the events log") } w := bufio.NewWriter(os.Stdout) for event := range eventChannel { if len(c.Format) > 0 { if err := tmpl.Execute(w, event); err != nil { return err } } else { if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil { return err } } if _, err := w.Write([]byte("\n")); err != nil { return err } if err := w.Flush(); err != nil { return err } } return nil } // Diff shows the difference in two objects func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) { return r.Runtime.GetDiff("", to) } // GenerateKube creates kubernetes email from containers and pods func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) { return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime) } // GetPodsByStatus returns a slice of pods filtered by a libpod status func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) { filterFunc := func(p *libpod.Pod) bool { state, _ := shared.GetPodStatus(p) for _, status := range statuses { if state == status { return true } } return false } pods, err := r.Runtime.Pods(filterFunc) if err != nil { return nil, err } return pods, nil } // GetVersion is an alias to satisfy interface{} func (r *LocalRuntime) GetVersion() (libpod.Version, error) { return libpod.GetVersion() }
[ "\"REGISTRY_AUTH_FILE\"" ]
[]
[ "REGISTRY_AUTH_FILE" ]
[]
["REGISTRY_AUTH_FILE"]
go
1
0
pwr/bowl/bowl_overlay.go
package bowl import ( "encoding/gob" "fmt" "io" "os" "path/filepath" "sort" "github.com/itchio/headway/state" "github.com/itchio/savior/filesource" "github.com/itchio/screw" "github.com/itchio/wharf/pwr/overlay" "github.com/itchio/lake" "github.com/itchio/lake/pools/fspool" "github.com/itchio/lake/tlc" "github.com/pkg/errors" ) var debugBrokenRename = os.Getenv("BOWL_DEBUG_BROKEN_RENAME") == "1" var overlayVerbose = os.Getenv("BOWL_OVERLAY_VERBOSE") == "1" func debugf(format string, args ...interface{}) { if overlayVerbose { fmt.Printf("[overlayBowl] %s\n", fmt.Sprintf(format, args...)) } } type overlayBowl struct { TargetContainer *tlc.Container TargetPool lake.Pool SourceContainer *tlc.Container OutputFolder string StageFolder string Consumer *state.Consumer stagePool *fspool.FsPool targetFilesByPath map[string]int64 // files we'll have to move transpositions []Transposition // files we'll have to patch using an overlay (indices in SourceContainer) overlayFiles []int64 // files we'll have to move from the staging folder to the dest moveFiles []int64 } type OverlayBowlCheckpoint struct { Transpositions []Transposition OverlayFiles []int64 MoveFiles []int64 } var _ Bowl = (*overlayBowl)(nil) type OverlayBowlParams struct { TargetContainer *tlc.Container SourceContainer *tlc.Container OutputFolder string StageFolder string Consumer *state.Consumer } func NewOverlayBowl(params OverlayBowlParams) (Bowl, error) { // input validation if params.TargetContainer == nil { return nil, errors.New("overlaybowl: TargetContainer must not be nil") } // TargetContainer gets modified for case-insensitive filesystems params.TargetContainer = params.TargetContainer.Clone() if params.SourceContainer == nil { return nil, errors.New("overlaybowl: SourceContainer must not be nil") } { if params.OutputFolder == "" { return nil, errors.New("overlaybowl: OutputFolder must not be nil") } stats, err := screw.Lstat(params.OutputFolder) if err != nil { return nil, errors.New("overlaybowl: OutputFolder must exist") } if !stats.IsDir() { return nil, errors.New("overlaybowl: OutputFolder must exist and be a directory") } } if params.StageFolder == "" { return nil, errors.New("overlaybowl: StageFolder must not be nil") } var err error err = screw.MkdirAll(params.StageFolder, 0755) if err != nil { return nil, errors.WithStack(err) } targetPool := fspool.New(params.TargetContainer, params.OutputFolder) stagePool := fspool.New(params.SourceContainer, params.StageFolder) targetFilesByPath := make(map[string]int64) for index, tf := range params.TargetContainer.Files { targetFilesByPath[tf.Path] = int64(index) } return &overlayBowl{ TargetContainer: params.TargetContainer, TargetPool: targetPool, SourceContainer: params.SourceContainer, OutputFolder: params.OutputFolder, StageFolder: params.StageFolder, Consumer: params.Consumer, stagePool: stagePool, targetFilesByPath: targetFilesByPath, }, nil } func (b *overlayBowl) Save() (*BowlCheckpoint, error) { c := &BowlCheckpoint{ Data: &OverlayBowlCheckpoint{ MoveFiles: b.moveFiles, OverlayFiles: b.overlayFiles, Transpositions: b.transpositions, }, } return c, nil } func (b *overlayBowl) Resume(c *BowlCheckpoint) error { if c == nil { return nil } if cc, ok := c.Data.(*OverlayBowlCheckpoint); ok { b.transpositions = cc.Transpositions b.moveFiles = cc.MoveFiles b.overlayFiles = cc.OverlayFiles } return nil } func (b *overlayBowl) GetWriter(sourceFileIndex int64) (EntryWriter, error) { sourceFile := b.SourceContainer.Files[sourceFileIndex] if sourceFile == nil { return nil, errors.Errorf("overlayBowl: unknown source file %d", sourceFileIndex) } if targetIndex, ok := b.targetFilesByPath[sourceFile.Path]; ok { debugf("returning overlay writer for '%s'", sourceFile.Path) // oh damn, that file already exists in the output - let's make an overlay b.markOverlay(sourceFileIndex) r, err := b.TargetPool.GetReadSeeker(targetIndex) if err != nil { return nil, errors.WithStack(err) } wPath := b.stagePool.GetPath(sourceFileIndex) return &overlayEntryWriter{path: wPath, readSeeker: r}, nil } // guess it's a new file! let's write it to staging anyway b.markMove(sourceFileIndex) debugf("returning move writer for '%s'", sourceFile.Path) wPath := b.stagePool.GetPath(sourceFileIndex) return &freshEntryWriter{path: wPath, file: sourceFile}, nil } func (b *overlayBowl) markOverlay(sourceFileIndex int64) { // make sure we don't double mark it for _, i := range b.overlayFiles { if i == sourceFileIndex { // oh cool it's already marked return } } // mark it b.overlayFiles = append(b.overlayFiles, sourceFileIndex) } func (b *overlayBowl) markMove(index int64) { // make sure we don't double mark it for _, i := range b.moveFiles { if i == index { // oh cool it's already marked return } } // mark it b.moveFiles = append(b.moveFiles, index) } func (b *overlayBowl) Transpose(t Transposition) error { // ok, say we resumed, maybe we already have a transposition for this source file? for i, tt := range b.transpositions { if tt.SourceIndex == t.SourceIndex { // and so we do! let's replace it. b.transpositions[i] = t return nil } } // if we didn't already have one, let's record it for when we commit b.transpositions = append(b.transpositions, t) return nil } func (b *overlayBowl) Commit() error { // oy, do we have work to do! var err error // - close the target pool, in case it still has a reader open! err = b.TargetPool.Close() if err != nil { return errors.WithStack(err) } // - same with stage pool, we might have it open for overlay purposes err = b.stagePool.Close() if err != nil { return errors.WithStack(err) } if screw.IsCaseInsensitiveFS() { // fix casing on-disk, reflect that on renames/etc. err = b.fixExistingCase() if err != nil { return err } } // - ensure dirs and symlinks err = b.ensureDirsAndSymlinks() if err != nil { return err } // - apply transpositions err = b.applyTranspositions() if err != nil { return errors.WithStack(err) } // - move files we need to move err = b.applyMoves() if err != nil { return errors.WithStack(err) } // - merge overlays err = b.applyOverlays() if err != nil { return errors.WithStack(err) } // - delete ghosts err = b.deleteGhosts() if err != nil { return errors.WithStack(err) } return nil } func (b *overlayBowl) Close() error { var err error err = b.TargetPool.Close() if err != nil { return errors.WithStack(err) } err = b.stagePool.Close() if err != nil { return errors.WithStack(err) } return nil } func (b *overlayBowl) fixExistingCase() error { fsp := fspool.New(b.SourceContainer, b.OutputFolder) var stats lake.CaseFixStats err := fsp.FixExistingCase(lake.CaseFixParams{ Stats: &stats, Consumer: b.Consumer, }) if err != nil { return err } for _, fix := range stats.Fixes { b.TargetContainer.ForEachEntry(func(e tlc.Entry) tlc.ForEachOutcome { if newPath, changed := fix.Apply(e.GetPath()); changed { e.SetPath(newPath) } // if strings.HasPrefix(e.GetPath(), fix.Old) { // e.SetPath(strings.Replace(e.GetPath(), fix.Old, fix.New, 1)) // } return tlc.ForEachContinue }) } return nil } func (b *overlayBowl) ensureDirsAndSymlinks() error { outputPath := b.OutputFolder processDir := func(dir *tlc.Dir) error { path := filepath.Join(outputPath, filepath.FromSlash(dir.Path)) stats, err := screw.Lstat(path) if err == nil { // did stat if stats.IsDir() { // good! return nil } // probably a file or a symlink, clear out err = screw.RemoveAll(path) if err != nil { return errors.WithStack(err) } } err = screw.MkdirAll(path, 0755) if err != nil { // If path is already a directory, MkdirAll does nothing and returns nil. // so if we get a non-nil error, we know it's serious business (permissions, etc.) return errors.WithStack(err) } return nil } for _, dir := range b.SourceContainer.Dirs { err := processDir(dir) if err != nil { return err } } // TODO: behave like github.com/itchio/savior for symlinks on windows ? processSymlink := func(symlink *tlc.Symlink) error { path := filepath.Join(outputPath, filepath.FromSlash(symlink.Path)) stats, err := screw.Lstat(path) if err == nil { // did stat if stats.Mode()&os.ModeSymlink == 0 { // not a symlink! clear out err = screw.RemoveAll(path) if err != nil { return errors.WithStack(err) } } } dest, err := screw.Readlink(path) if err != nil { if os.IsNotExist(err) { // symlink was missing debugf("Was missing, linking (%s) => (%s)\n", path, symlink.Dest) err = screw.Symlink(filepath.FromSlash(symlink.Dest), path) if err != nil { return errors.WithStack(err) } return nil } else { return errors.WithStack(err) } } // symlink is there if dest != filepath.FromSlash(symlink.Dest) { // wrong dest, fixing that err = screw.Remove(path) if err != nil { return errors.WithStack(err) } debugf("Was wrong path, removed and linking (%s) => (%s)\n", path, symlink.Dest) err = screw.Symlink(filepath.FromSlash(symlink.Dest), path) if err != nil { return errors.WithStack(err) } return nil } // existed, was symlink, and pointed to right file return nil } for _, symlink := range b.SourceContainer.Symlinks { err := processSymlink(symlink) if err != nil { return err } } return nil } type pathTranspo struct { TargetPath string OutputPath string } type mkdirBehavior int const ( mkdirBehaviorNever mkdirBehavior = 0xf8792 + iota mkdirBehaviorIfNeeded ) type transpoBehavior int const ( transpoBehaviorMove transpoBehavior = 0x1923 + iota transpoBehaviorCopy ) func (b *overlayBowl) applyTranspositions() error { transpositions := make(map[string][]*pathTranspo) outputPath := b.OutputFolder for _, t := range b.transpositions { targetFile := b.TargetContainer.Files[t.TargetIndex] sourceFile := b.SourceContainer.Files[t.SourceIndex] transpositions[targetFile.Path] = append(transpositions[targetFile.Path], &pathTranspo{ TargetPath: targetFile.Path, OutputPath: sourceFile.Path, }) } applyMultipleTranspositions := func(behavior transpoBehavior, targetPath string, group []*pathTranspo) error { // a file got duplicated! var noop *pathTranspo for _, transpo := range group { if targetPath == transpo.OutputPath { noop = transpo break } } for i, transpo := range group { if noop == nil { if i == 0 { // arbitrary pick first transposition as being the rename - do // all the others as copies first continue } } else if transpo == noop { // no need to copy for the noop continue } oldAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(targetPath)) newAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(transpo.OutputPath)) err := b.copy(oldAbsolutePath, newAbsolutePath, mkdirBehaviorIfNeeded) if err != nil { return errors.WithStack(err) } } if noop == nil { // we treated the first transpo as being the rename, gotta do it now transpo := group[0] oldAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(targetPath)) newAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(transpo.OutputPath)) switch behavior { case transpoBehaviorCopy: // no, wait, the target file is itself being patched, meaning it has a pending overlay. // in order for that overlay to apply cleanly, we must copy the file, not move it. // we should also not need mkdir, since we already ensured dirs and symlinks. err := b.copy(oldAbsolutePath, newAbsolutePath, mkdirBehaviorNever) if err != nil { return errors.WithStack(err) } case transpoBehaviorMove: err := b.move(oldAbsolutePath, newAbsolutePath) if err != nil { return errors.WithStack(err) } } } else { // muffin! } return nil } var cleanupRenames []*pathTranspo alreadyDone := make(map[string]bool) renameSeed := int64(0) for _, group := range transpositions { for _, transpo := range group { if transpo.TargetPath == transpo.OutputPath { // no-ops can't clash continue } if _, ok := transpositions[transpo.OutputPath]; ok { // transpo is doing A=>B, and another transpo is doing B=>C // instead, have transpo do A=>B2, the other do B=>C // then have a cleanup phase rename B2 to B renameSeed++ safePath := transpo.OutputPath + fmt.Sprintf(".butler-rename-%d", renameSeed) cleanupRenames = append(cleanupRenames, &pathTranspo{ TargetPath: safePath, OutputPath: transpo.OutputPath, }) transpo.OutputPath = safePath } } } overlayFilesByPath := make(map[string]bool) for _, overlayFileSourceIndex := range b.overlayFiles { f := b.SourceContainer.Files[overlayFileSourceIndex] overlayFilesByPath[f.Path] = true } for groupTargetPath, group := range transpositions { if alreadyDone[groupTargetPath] { continue } alreadyDone[groupTargetPath] = true behavior := transpoBehaviorMove _, hasPendingOverlay := overlayFilesByPath[groupTargetPath] if hasPendingOverlay { // if the target file is itself patched (it has a pending overlay), // then it must never be renamed to something else, only copied. behavior = transpoBehaviorCopy } if len(group) == 1 { transpo := group[0] if transpo.TargetPath == transpo.OutputPath { // file wasn't touched at all } else { // file was renamed oldAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(transpo.TargetPath)) newAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(transpo.OutputPath)) switch behavior { case transpoBehaviorCopy: // we should never need to mkdir, because we already ensured dirs and symlinks. err := b.copy(oldAbsolutePath, newAbsolutePath, mkdirBehaviorNever) if err != nil { return errors.WithStack(err) } case transpoBehaviorMove: err := b.move(oldAbsolutePath, newAbsolutePath) if err != nil { return errors.WithStack(err) } } } } else { err := applyMultipleTranspositions(behavior, groupTargetPath, group) if err != nil { return errors.WithStack(err) } } } for _, rename := range cleanupRenames { oldAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(rename.TargetPath)) newAbsolutePath := filepath.Join(outputPath, filepath.FromSlash(rename.OutputPath)) err := b.move(oldAbsolutePath, newAbsolutePath) if err != nil { return errors.WithStack(err) } } return nil } func (b *overlayBowl) copy(oldAbsolutePath string, newAbsolutePath string, mkdirBehavior mkdirBehavior) error { debugf("cp '%s' '%s'", oldAbsolutePath, newAbsolutePath) if mkdirBehavior == mkdirBehaviorIfNeeded { err := screw.MkdirAll(filepath.Dir(newAbsolutePath), os.FileMode(0755)) if err != nil { return errors.WithStack(err) } } // fall back to copy + remove reader, err := screw.Open(oldAbsolutePath) if err != nil { return errors.WithStack(err) } defer reader.Close() stats, err := reader.Stat() if err != nil { return errors.WithStack(err) } writer, err := screw.OpenFile(newAbsolutePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, stats.Mode()|tlc.ModeMask) if err != nil { return errors.WithStack(err) } defer writer.Close() _, err = io.Copy(writer, reader) if err != nil { return errors.WithStack(err) } return nil } func (b *overlayBowl) move(oldAbsolutePath string, newAbsolutePath string) error { debugf("mv '%s' '%s'", oldAbsolutePath, newAbsolutePath) err := screw.Remove(newAbsolutePath) if err != nil { if !os.IsNotExist(err) { return errors.WithStack(err) } } err = screw.MkdirAll(filepath.Dir(newAbsolutePath), os.FileMode(0755)) if err != nil { return errors.WithStack(err) } if debugBrokenRename { err = &os.PathError{} } else { err = screw.Rename(oldAbsolutePath, newAbsolutePath) } if err != nil { debugf("falling back to copy because of %s", err.Error()) if os.IsNotExist(err) { debugf("mhh our rename error was that old does not exist") } cErr := b.copy(oldAbsolutePath, newAbsolutePath, mkdirBehaviorNever) if cErr != nil { return cErr } cErr = screw.Remove(oldAbsolutePath) if cErr != nil { return cErr } } return nil } func (b *overlayBowl) applyMoves() error { for _, moveIndex := range b.moveFiles { file := b.SourceContainer.Files[moveIndex] if file == nil { return errors.Errorf("overlaybowl: applyMoves: no such file %d", moveIndex) } debugf("applying move '%s'", file.Path) nativePath := filepath.FromSlash(file.Path) stagePath := filepath.Join(b.StageFolder, nativePath) outputPath := filepath.Join(b.OutputFolder, nativePath) err := b.move(stagePath, outputPath) if err != nil { return errors.WithStack(err) } } return nil } func (b *overlayBowl) applyOverlays() error { ctx := &overlay.OverlayPatchContext{} handleOverlay := func(overlaySourceFileIndex int64) error { file := b.SourceContainer.Files[overlaySourceFileIndex] if file == nil { return errors.Errorf("overlaybowl: applyOverlays: no such file %d", overlaySourceFileIndex) } debugf("applying overlay '%s'", file.Path) nativePath := filepath.FromSlash(file.Path) stagePath := filepath.Join(b.StageFolder, nativePath) r, err := filesource.Open(stagePath) if err != nil { return errors.WithStack(err) } defer r.Close() outputPath := filepath.Join(b.OutputFolder, nativePath) w, err := screw.OpenFile(outputPath, os.O_WRONLY, os.FileMode(file.Mode|tlc.ModeMask)) if err != nil { return errors.WithStack(err) } defer w.Close() err = ctx.Patch(r, w) if err != nil { return errors.WithStack(err) } finalSize, err := w.Seek(0, io.SeekCurrent) if err != nil { return errors.WithStack(err) } err = w.Truncate(finalSize) if err != nil { return errors.WithStack(err) } return nil } for _, overlayIndex := range b.overlayFiles { err := handleOverlay(overlayIndex) if err != nil { return errors.WithStack(err) } } return nil } // ghosts // GhostKind determines what went missing: a file, a directory, or a symlink type GhostKind int const ( // GhostKindDir indicates that a directory has disappeared between two containers GhostKindDir GhostKind = iota + 0xfaf0 // GhostKindFile indicates that a file has disappeared between two containers GhostKindFile // GhostKindSymlink indicates that a symbolic link has disappeared between two containers GhostKindSymlink ) // A Ghost is a file, directory, or symlink, that has disappeared from one // container (target) to the next (source) type Ghost struct { Kind GhostKind Path string } func detectGhosts(sourceContainer *tlc.Container, targetContainer *tlc.Container) []Ghost { // first make a map of all the file paths in source, for later lookup sourceFileMap := make(map[string]bool) for _, f := range sourceContainer.Files { sourceFileMap[f.Path] = true } for _, s := range sourceContainer.Symlinks { sourceFileMap[s.Path] = true } for _, d := range sourceContainer.Dirs { sourceFileMap[d.Path] = true } // then walk through target container paths, if they're not in source, they were deleted var ghosts []Ghost for _, f := range targetContainer.Files { if !sourceFileMap[f.Path] { ghosts = append(ghosts, Ghost{ Kind: GhostKindFile, Path: f.Path, }) } } for _, s := range targetContainer.Symlinks { if !sourceFileMap[s.Path] { ghosts = append(ghosts, Ghost{ Kind: GhostKindSymlink, Path: s.Path, }) } } for _, d := range targetContainer.Dirs { if !sourceFileMap[d.Path] { ghosts = append(ghosts, Ghost{ Kind: GhostKindDir, Path: d.Path, }) } } return ghosts } type byDecreasingLength []Ghost func (s byDecreasingLength) Len() int { return len(s) } func (s byDecreasingLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byDecreasingLength) Less(i, j int) bool { return len(s[j].Path) < len(s[i].Path) } func (b *overlayBowl) deleteGhosts() error { ghosts := detectGhosts(b.SourceContainer, b.TargetContainer) debugf("%d total ghosts", len(ghosts)) sort.Sort(byDecreasingLength(ghosts)) for _, ghost := range ghosts { debugf("ghost: %v", ghost) op := filepath.Join(b.OutputFolder, filepath.FromSlash(ghost.Path)) _, err := screw.Lstat(op) if err != nil { debugf("ghost already gone, bye bye! (%v)", err) continue } err = screw.Remove(op) if err == nil || os.IsNotExist(err) { // removed or already removed, good debugf("ghost removed or already gone '%s'", ghost.Path) } else { if ghost.Kind == GhostKindDir { // sometimes we can't delete directories, it's okay debugf("ghost dir left behind '%s'", ghost.Path) } else { return errors.WithStack(err) } } } return nil } // notifyWriteCloser type onCloseFunc func() error type notifyWriteCloser struct { w io.WriteCloser onClose onCloseFunc } var _ io.WriteCloser = (*notifyWriteCloser)(nil) func (nwc *notifyWriteCloser) Write(buf []byte) (int, error) { return nwc.w.Write(buf) } func (nwc *notifyWriteCloser) Close() (rErr error) { defer func() { if nwc.onClose != nil { cErr := nwc.onClose() if cErr != nil && rErr == nil { rErr = cErr } } }() err := nwc.w.Close() if err != nil { rErr = errors.WithStack(err) return } return } // overlayEntryWriter type overlayEntryWriter struct { path string readSeeker io.ReadSeeker file *os.File overlay overlay.OverlayWriter // this is how far into the source (new) file we are. // it doesn't correspond with `OverlayOffset`, which is // how many bytes of output the OverlayWriter has produced. sourceOffset int64 } type OverlayEntryWriterCheckpoint struct { // This offset is how many bytes we've written into the // overlay, not how many bytes into the new file we are. OverlayOffset int64 // This offset is how many bytes we've read from the target (old) file ReadOffset int64 } func (w *overlayEntryWriter) Tell() int64 { return w.sourceOffset } func (w *overlayEntryWriter) Save() (*WriterCheckpoint, error) { err := w.overlay.Flush() if err != nil { return nil, errors.WithStack(err) } err = w.file.Sync() if err != nil { return nil, errors.WithStack(err) } debugf("saving checkpoint: Offset = %d, ReadOffset = %d, OverlayOffset = %d", w.sourceOffset, w.overlay.ReadOffset(), w.overlay.OverlayOffset()) c := &WriterCheckpoint{ Offset: w.sourceOffset, Data: &OverlayEntryWriterCheckpoint{ ReadOffset: w.overlay.ReadOffset(), OverlayOffset: w.overlay.OverlayOffset(), }, } return c, nil } func (w *overlayEntryWriter) Resume(c *WriterCheckpoint) (int64, error) { err := screw.MkdirAll(filepath.Dir(w.path), 0755) if err != nil { return 0, errors.WithStack(err) } f, err := screw.OpenFile(w.path, os.O_CREATE|os.O_WRONLY, os.FileMode(0644)) if err != nil { return 0, errors.WithStack(err) } w.file = f if c != nil { // we might need to seek y'all cc, ok := c.Data.(*OverlayEntryWriterCheckpoint) if !ok { return 0, errors.New("invalid checkpoint for overlayEntryWriter") } // seek the reader first r := w.readSeeker _, err = r.Seek(cc.ReadOffset, io.SeekStart) if err != nil { return 0, errors.WithStack(err) } // now the writer _, err = f.Seek(cc.OverlayOffset, io.SeekStart) if err != nil { return 0, errors.WithStack(err) } w.sourceOffset = c.Offset debugf("making overlaywriter with ReadOffset %d, OverlayOffset %d", cc.ReadOffset, cc.OverlayOffset) w.overlay, err = overlay.NewOverlayWriter(r, cc.ReadOffset, f, cc.OverlayOffset) if err != nil { return 0, errors.WithStack(err) } } else { // the pool we got the readSeeker from doesn't need to give us a reader from 0, // so we need to seek here _, err = w.readSeeker.Seek(0, io.SeekStart) if err != nil { return 0, errors.WithStack(err) } r := w.readSeeker debugf("making overlaywriter with 0 ReadOffset and OverlayOffset") w.overlay, err = overlay.NewOverlayWriter(r, 0, f, 0) if err != nil { return 0, errors.WithStack(err) } } return w.sourceOffset, nil } func (w *overlayEntryWriter) Write(buf []byte) (int, error) { if w.overlay == nil { return 0, ErrUninitializedWriter } n, err := w.overlay.Write(buf) w.sourceOffset += int64(n) return n, err } func (w *overlayEntryWriter) Finalize() error { if w.overlay != nil { err := w.overlay.Finalize() if err != nil { return errors.WithMessage(err, "finalizing overlay writer") } w.overlay = nil } err := w.file.Sync() if err != nil { return errors.WithMessage(err, "syncing overlay patch file") } return nil } func (w *overlayEntryWriter) Close() error { return w.file.Close() } func init() { gob.Register(&OverlayEntryWriterCheckpoint{}) gob.Register(&OverlayBowlCheckpoint{}) }
[ "\"BOWL_DEBUG_BROKEN_RENAME\"", "\"BOWL_OVERLAY_VERBOSE\"" ]
[]
[ "BOWL_DEBUG_BROKEN_RENAME", "BOWL_OVERLAY_VERBOSE" ]
[]
["BOWL_DEBUG_BROKEN_RENAME", "BOWL_OVERLAY_VERBOSE"]
go
2
0
src/tests/ftest/launch.py
#!/usr/bin/python2 """ (C) Copyright 2018-2019 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE The Government's rights to use, modify, reproduce, release, perform, display, or disclose this software are subject to the terms of the Apache License as provided in Contract No. B609815. Any reproduction of computer software, computer software documentation, or portions thereof marked with this legend must also reproduce the markings. """ from __future__ import print_function from argparse import ArgumentParser, RawDescriptionHelpFormatter import json import os import re import socket import subprocess from sys import version_info import time import yaml from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import task_self try: # For python versions >= 3.2 from tempfile import TemporaryDirectory except ImportError: # Basic implementation of TemporaryDirectory for python versions < 3.2 from tempfile import mkdtemp from shutil import rmtree class TemporaryDirectory(object): # pylint: disable=too-few-public-methods """Create a temporary directory. When the last reference of this object goes out of scope the directory and its contents are removed. """ def __init__(self): """Initialize a TemporaryDirectory object.""" self.name = mkdtemp() def __del__(self): """Destroy a TemporaryDirectory object.""" rmtree(self.name) TEST_DAOS_SERVER_YAML = "daos_avocado_test.yaml" BASE_LOG_FILE_YAML = "./data/daos_server_baseline.yaml" SERVER_KEYS = ( "test_servers", ) CLIENT_KEYS = ( "test_clients", ) def get_build_environment(): """Obtain DAOS build environment variables from the .build_vars.json file. Returns: dict: a dictionary of DAOS build environment variable names and values """ build_vars_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "../../.build_vars.json") with open(build_vars_file) as vars_file: return json.load(vars_file) def get_temporary_directory(base_dir=None): """Get the temporary directory used by functional tests. Args: base_dir (str, optional): base installation directory. Defaults to None. Returns: str: the full path of the temporary directory """ if base_dir is None: base_dir = get_build_environment()["PREFIX"] if base_dir == "/usr": tmp_dir = os.getenv( "DAOS_TEST_SHARED_DIR", os.path.expanduser("~/daos_test")) else: tmp_dir = os.path.join(base_dir, "tmp") # Make sure the temporary directory exists to prevent pydaos import errors if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) return tmp_dir def set_test_environment(): """Set up the test environment. Returns: None """ base_dir = get_build_environment()["PREFIX"] bin_dir = os.path.join(base_dir, "bin") sbin_dir = os.path.join(base_dir, "sbin") # /usr/sbin is not setup on non-root user for CI nodes. # SCM formatting tool mkfs.ext4 is located under # /usr/sbin directory. usr_sbin = os.path.sep + os.path.join("usr", "sbin") path = os.environ.get("PATH") # Get the default interface to use if OFI_INTERFACE is not set print("Detecting network devices") available_interfaces = {} # Find all the /sys/class/net interfaces on the launch node (excluding lo) net_path = os.path.join(os.path.sep, "sys", "class", "net") for device in sorted([dev for dev in os.listdir(net_path) if dev != "lo"]): # Get the interface state - only include active (up) interfaces with open(os.path.join(net_path, device, "operstate"), "r") as buffer: state = buffer.read().strip() # Get the interface speed - used to select the fastest available with open(os.path.join(net_path, device, "speed"), "r") as buffer: speed = int(buffer.read().strip()) print( " - {0:<5} (speed: {1:>6} state: {2})".format( device, speed, state)) # Only include the first active interface for each speed - first is # determined by an alphabetic sort: ib0 will be checked before ib1 if state.lower() == "up" and speed not in available_interfaces: available_interfaces[speed] = device try: # Select the fastest active interface available by sorting the speeds interface = available_interfaces[sorted(available_interfaces)[-1]] except IndexError: print( "Error obtaining a default interface from: {}".format( os.listdir(net_path))) exit(1) print( "Using {} as the default interface from {}".format( interface, available_interfaces)) # Update env definitions os.environ["PATH"] = ":".join([bin_dir, sbin_dir, usr_sbin, path]) os.environ["DAOS_SINGLETON_CLI"] = "1" os.environ["CRT_CTX_SHARE_ADDR"] = "1" os.environ["OFI_INTERFACE"] = os.environ.get("OFI_INTERFACE", interface) os.environ["CRT_ATTACH_INFO_PATH"] = get_temporary_directory(base_dir) # Python paths required for functional testing python_version = "python{}{}".format( version_info.major, "" if version_info.major > 2 else ".{}".format(version_info.minor)) required_python_paths = [ os.path.abspath("util/apricot"), os.path.abspath("util"), os.path.join(base_dir, "lib64", python_version, "site-packages"), ] # Check the PYTHONPATH env definition python_path = os.environ.get("PYTHONPATH") if python_path is None or python_path == "": # Use the required paths to define the PYTHONPATH env if it is not set os.environ["PYTHONPATH"] = ":".join(required_python_paths) else: # Append any missing required paths to the existing PYTHONPATH env defined_python_paths = [ os.path.abspath(os.path.expanduser(path)) for path in python_path.split(":")] for required_path in required_python_paths: if required_path not in defined_python_paths: python_path += ":" + required_path os.environ["PYTHONPATH"] = python_path def get_output(cmd): """Get the output of given command executed on this host. Args: cmd (str): command from which to obtain the output Returns: str: command output """ try: print("Running {}".format(cmd)) return subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as err: print("Error executing '{}':\n\t{}".format(cmd, err)) exit(1) def time_command(cmd): """Execute the command on this host and display its duration. Args: cmd (str): command to time Returns: int: return code of the command """ print("Running {}".format(cmd)) start_time = int(time.time()) return_code = subprocess.call(cmd, shell=True) end_time = int(time.time()) print("Total test time: {}s".format(end_time - start_time)) return return_code def spawn_commands(host_list, command, timeout=120): """Run the command on each specified host in parallel. Args: host_list (list): list of hosts command (str): command to run on each host timeout (int): number of seconds to wait for all jobs to complete Returns: bool: True if the command completed successfully (rc=0) on each specified host; False otherwise """ # Create a ClusterShell Task to run the command in parallel on the hosts nodes = NodeSet.fromlist(host_list) task = task_self() # task.set_info('debug', True) # Enable forwarding of the ssh authentication agent connection task.set_info("ssh_options", "-oForwardAgent=yes") print("Running on {}: {}".format(nodes, command)) task.run(command=command, nodes=nodes, timeout=timeout) # Create a dictionary of hosts for each unique return code results = {code: hosts for code, hosts in task.iter_retcodes()} # Determine if the command completed successfully across all the hosts status = len(results) == 1 and 0 in results if not status: print(" Errors detected running \"{}\":".format(command)) # Display the command output for code in sorted(results): output_data = list(task.iter_buffers(results[code])) if len(output_data) == 0: err_nodes = NodeSet.fromlist(results[code]) print(" {}: rc={}, output: <NONE>".format(err_nodes, code)) else: for output, o_hosts in output_data: n_set = NodeSet.fromlist(o_hosts) lines = str(output).splitlines() if len(lines) > 1: output = "\n {}".format("\n ".join(lines)) print(" {}: rc={}, output: {}".format(n_set, code, output)) # List any hosts that timed out timed_out = [str(hosts) for hosts in task.iter_keys_timeout()] if timed_out: print(" {}: timeout detected".format(NodeSet.fromlist(timed_out))) return status def find_values(obj, keys, key=None, val_type=list): """Find dictionary values of a certain type specified with certain keys. Args: obj (obj): a python object; initailly the dictionary to search keys (list): list of keys to find their matching list values key (str, optional): key to check for a match. Defaults to None. Returns: dict: a dictionary of each matching key and its value """ matches = {} if isinstance(obj, val_type) and isinstance(key, str) and key in keys: # Match found matches[key] = obj elif isinstance(obj, dict): # Recursively look for matches in each dictionary entry for key, val in obj.items(): matches.update(find_values(val, keys, key, val_type)) elif isinstance(obj, list): # Recursively look for matches in each list entry for item in obj: matches.update(find_values(item, keys, None, val_type)) return matches def get_test_list(tags): """Generate a list of tests and avocado tag filter from a list of tags. Args: tags (list): a list of tag or test file names Returns: (str, list): a tuple of the avacado tag filter and lists of tests """ test_tags = [] test_list = [] for tag in tags: if ".py" in tag: # Assume '.py' indicates a test and just add it to the list test_list.append(tag) else: # Otherwise it is assumed that this is a tag test_tags.append(" --filter-by-tags={}".format(tag)) # Add to the list of tests any test that matches the specified tags. If no # tags and no specific tests have been specified then all of the functional # tests will be added. if test_tags or not test_list: command = " | ".join([ "avocado list --paginator off{} ./".format(" ".join(test_tags)), r"sed -ne '/INSTRUMENTED/s/.* \([^:]*\):.*/\1/p'", "uniq"]) test_list.extend(get_output(command).splitlines()) return " ".join(test_tags), test_list def get_test_files(test_list, args, tmp_dir): """Get a list of the test scripts to run and their yaml files. Args: test_list (list): list of test scripts to run args (argparse.Namespace): command line arguments for this program tmp_dir (TemporaryDirectory): temporary directory object to use to write modified yaml files Returns: list: a list of dictionaries of each test script and yaml file; If there was an issue replacing a yaml host placeholder the yaml dictionary entry will be set to None. """ test_files = [{"py": test, "yaml": None} for test in test_list] for test_file in test_files: base, _ = os.path.splitext(test_file["py"]) test_file["yaml"] = replace_yaml_file( "{}.yaml".format(base), args, tmp_dir) return test_files def replace_yaml_file(yaml_file, args, tmp_dir): """Replace the server/client yaml file placeholders. Replace any server or client yaml file placeholder names with the host names provided by the command line arguments in a copy of the original test yaml file. If no replacements are specified return the original test yaml file. Args: yaml_file (str): test yaml file args (argparse.Namespace): command line arguments for this program tmp_dir (TemporaryDirectory): temporary directory object to use to write modified yaml files Returns: str: the test yaml file; None if the yaml file contains placeholders w/o replacements """ if args.test_servers: # Determine which placeholder names need to be replaced in this yaml by # getting the lists of hosts specified in the yaml file unique_hosts = {"servers": set(), "clients": set()} for key, placeholders in find_yaml_hosts(yaml_file).items(): if key in SERVER_KEYS: unique_hosts["servers"].update(placeholders) elif key in CLIENT_KEYS: # If no specific clients are specified use a specified server key = "clients" if args.test_clients else "servers" unique_hosts[key].update(placeholders) # Map the placeholder names to values provided by the user mapping_pairings = [("servers", args.test_servers.split(","))] if args.test_clients: mapping_pairings.append(("clients", args.test_clients.split(","))) mapping = { tmp: node_list[index] if index < len(node_list) else None for key, node_list in mapping_pairings for index, tmp in enumerate(sorted(unique_hosts[key]))} # Read in the contents of the yaml file to retain the !mux entries print("Reading {}".format(yaml_file)) with open(yaml_file) as yaml_buffer: file_str = yaml_buffer.read() # Apply the placeholder replacements missing_replacements = [] for placeholder, host in mapping.items(): if host: # Replace the host entries with their mapped values file_str = re.sub( "- {}".format(placeholder), "- {}".format(host), file_str) elif args.discard: # Discard any host entries without a replacement value file_str = re.sub(r"\s+- {}".format(placeholder), "", file_str) else: # Keep track of any placeholders without a replacement value missing_replacements.append(placeholder) if missing_replacements: # Report an error for all of the placeholders w/o a replacement print( "Error: Placeholders missing replacements in {}:\n {}".format( yaml_file, ", ".join(missing_replacements))) return None # Write the modified yaml file into a temporary file. Use the path to # ensure unique yaml files for tests with the same filename. yaml_name = get_test_category(yaml_file) yaml_file = os.path.join(tmp_dir.name, "{}.yaml".format(yaml_name)) print("Creating {}".format(yaml_file)) with open(yaml_file, "w") as yaml_buffer: yaml_buffer.write(file_str) # Return the untouched or modified yaml file return yaml_file def run_tests(test_files, tag_filter, args): """Run or display the test commands. Args: test_files (dict): a list of dictionaries of each test script/yaml file tag_filter (str): the avocado tag filter command line argument args (argparse.Namespace): command line arguments for this program Returns: int: a bitwise-or of all the return codes of each 'avocado run' command """ return_code = 0 # Determine the location of the avocado logs for archiving or renaming avocado_logs_dir = None if args.archive or args.rename: avocado_logs_dir = get_output( "avocado config | sed -ne '/logs_dir/s/.* *//p'").strip() avocado_logs_dir = os.path.expanduser(avocado_logs_dir) print("Avocado logs stored in {}".format(avocado_logs_dir)) # Create the base avocado run command command_list = [ "avocado", "run", "--ignore-missing-references on", "--show-job-log" if not args.sparse else "", "--html-job-result on", tag_filter ] # Run each test for test_file in test_files: if isinstance(test_file["yaml"], str): # Optionally clean the log files before running this test on the # servers and clients specified for this test if args.clean: if not clean_logs(test_file["yaml"], args): return 128 # Execute this test test_command_list = list(command_list) test_command_list.append("--mux-yaml {}".format(test_file["yaml"])) test_command_list.append("-- {}".format(test_file["py"])) return_code |= time_command( " ".join([item for item in test_command_list if item != ""])) # Optionally store all of the doas server and client log files # along with the test results if args.archive: archive_logs(avocado_logs_dir, test_file["yaml"], args) # Optionally rename the test results directory for this test if args.rename: rename_logs(avocado_logs_dir, test_file["py"]) else: # The test was not run due to an error replacing host placeholders # in the yaml file. Treat this like a failed avocado command. return_code |= 4 return return_code def get_yaml_data(yaml_file): """Get the contents of a yaml file as a dictionary. Args: yaml_file (str): yaml file to read Raises: Exception: if an error is encountered reading the yaml file Returns: dict: the contents of the yaml file """ yaml_data = {} if os.path.isfile(yaml_file): with open(yaml_file, "r") as open_file: try: file_data = open_file.read() yaml_data = yaml.safe_load(file_data.replace("!mux", "")) except yaml.YAMLError as error: print("Error reading {}: {}".format(yaml_file, error)) exit(1) return yaml_data def get_log_files(config_yaml, daos_files=None): """Get a list of DAOS files used by the specified yaml file. Args: config_yaml (str): yaml file defining log file locations daos_files (dict, optional): dictionary of default DAOS log files whose keys define which yaml log parameters to use to update the default values. Defaults to None. Returns: dict: a dictionary of DAOS file name keys and full path values """ # List of default DAOS files if daos_files is None: daos_core_test_dir = os.path.split( os.getenv("D_LOG_FILE", "/tmp/server.log"))[0] daos_files = { "log_file": "/tmp/server.log", "admin_log_file": "/tmp/daos_admin.log", "server_log_file": "/tmp/server.log", "agent_log_file": "/tmp/daos_agent.log", "control_log_file": "/tmp/daos_control.log", "helper_log_file": "/tmp/daos_admin.log", "socket_dir": "/tmp/daos_sockets", "debug_log_default": os.getenv("D_LOG_FILE", "/tmp/daos.log"), "test_variant_client_logs": "{}/*_client_daos.log".format(daos_core_test_dir), "test_variant_server_logs": "{}/*_server_daos.log".format(daos_core_test_dir), } # Determine the log file locations defined by the last run test print("Checking {} for daos log file locations".format(config_yaml)) yaml_data = get_yaml_data(config_yaml) # Replace any default log file with its yaml definition matches = find_values(yaml_data, daos_files.keys(), val_type=str) for key, value in matches.items(): if value != daos_files[key]: print( " Update found for {}: {} -> {}".format( key, daos_files[key], value)) daos_files[key] = value return daos_files def find_yaml_hosts(test_yaml): """Find the all the host values in the specified yaml file. Args: test_yaml (str): test yaml file Returns: dict: a dictionary of each host key and its host values """ return find_values(get_yaml_data(test_yaml), SERVER_KEYS + CLIENT_KEYS) def get_hosts_from_yaml(test_yaml, args): """Extract the list of hosts from the test yaml file. This host will be included in the list if no clients are explicitly called out in the test's yaml file. Args: test_yaml (str): test yaml file args (argparse.Namespace): command line arguments for this program Returns: list: a unique list of hosts specified in the test's yaml file """ host_set = set() if args.include_localhost: host_set.add(socket.gethostname().split(".")[0]) found_client_key = False for key, value in find_yaml_hosts(test_yaml).items(): host_set.update(value) if key in CLIENT_KEYS: found_client_key = True # Include this host as a client if no clients are specified if not found_client_key: host_set.add(socket.gethostname().split(".")[0]) return sorted(list(host_set)) def clean_logs(test_yaml, args): """Remove the test log files on each test host. Args: test_yaml (str): yaml file containing host names args (argparse.Namespace): command line arguments for this program """ # Use the default server yaml and then the test yaml to update the default # DAOS log file locations. This should simulate how the test defines which # log files it will use when it is run. log_files = get_log_files(test_yaml, get_log_files(BASE_LOG_FILE_YAML)) host_list = get_hosts_from_yaml(test_yaml, args) command = "sudo rm -fr {}".format(" ".join(log_files.values())) print("Cleaning logs on {}".format(host_list)) if not spawn_commands(host_list, command): print("Error cleaning logs, aborting") return False return True def archive_logs(avocado_logs_dir, test_yaml, args): """Copy all of the host test log files to the avocado results directory. Args: avocado_logs_dir ([type]): [description] test_yaml (str): yaml file containing host names args (argparse.Namespace): command line arguments for this program """ this_host = socket.gethostname().split(".")[0] log_files = get_log_files( os.path.join(get_temporary_directory(), TEST_DAOS_SERVER_YAML)) host_list = get_hosts_from_yaml(test_yaml, args) doas_logs_dir = os.path.join(avocado_logs_dir, "latest", "daos_logs") # Create a subdirectory in the avocado logs directory for this test print("Archiving host logs from {} in {}".format(host_list, doas_logs_dir)) get_output("mkdir {}".format(doas_logs_dir)) # Create a list of log files that are not directories non_dir_files = [ log_file for log_file in log_files.values() if os.path.splitext(os.path.basename(log_file))[1] != ""] # Copy any log files that exist on the test hosts and remove them from the # test host if the copy is successful. Attempt all of the commands and # report status at the end of the loop. Include a listing of the file # related to any failed command. commands = [ "set -eu", "rc=0", "copied=()", "for file in {}".format(" ".join(non_dir_files)), "do if [ -e $file ]", "then if scp $file {}:{}/${{file##*/}}-$(hostname -s)".format( this_host, doas_logs_dir), "then copied+=($file)", "if ! sudo rm -fr $file", "then ((rc++))", "ls -al $file", "fi", "else ((rc++))", "ls -al $file", "fi", "fi", "done", "echo Copied ${copied[@]:-no files}", "exit $rc", ] spawn_commands(host_list, "; ".join(commands)) def rename_logs(avocado_logs_dir, test_file): """Append the test name to its avocado job-results directory name. Args: avocado_logs_dir (str): avocado job-results directory test_file (str): the test python file """ test_name = get_test_category(test_file) test_logs_lnk = os.path.join(avocado_logs_dir, "latest") test_logs_dir = os.path.realpath(test_logs_lnk) new_test_logs_dir = "{}-{}".format(test_logs_dir, test_name) try: os.rename(test_logs_dir, new_test_logs_dir) os.remove(test_logs_lnk) os.symlink(new_test_logs_dir, test_logs_lnk) print("Renamed {} to {}".format(test_logs_dir, new_test_logs_dir)) except OSError as error: print( "Error renaming {} to {}: {}".format( test_logs_dir, new_test_logs_dir, error)) def get_test_category(test_file): """Get a category for the specified test using its path and name. Args: test_file (str): the test python file Returns: str: concatenation of the test path and base filename joined by dashes """ file_parts = os.path.split(test_file) return "-".join( [os.path.splitext(os.path.basename(part))[0] for part in file_parts]) def main(): """Launch DAOS functional tests.""" # Parse the command line arguments description = [ "DAOS functional test launcher", "", "Launches tests by specifying a test tag. For example:", "\tbadconnect --run pool connect tests that pass NULL ptrs, etc.", "\tbadevict --run pool client evict tests that pass NULL ptrs, " "etc.", "\tbadexclude --run pool target exclude tests that pass NULL ptrs, " "etc.", "\tbadparam --run tests that pass NULL ptrs, etc.", "\tbadquery --run pool query tests that pass NULL ptrs, etc.", "\tmulticreate --run tests that create multiple pools at once", "\tmultitarget --run tests that create pools over multiple servers", "\tpool --run all pool related tests", "\tpoolconnect --run all pool connection related tests", "\tpooldestroy --run all pool destroy related tests", "\tpoolevict --run all client pool eviction related tests", "\tpoolinfo --run all pool info retrieval related tests", "\tquick --run tests that complete quickly, with minimal " "resources", "", "Multiple tags can be specified:", "\ttag_a,tag_b -- run all tests with both tag_a and tag_b", "\ttag_a tag_b -- run all tests with either tag_a or tag_b", "", "Specifying no tags will run all of the available tests.", "", "Tests can also be launched by specifying a path to the python script " "instead of its tag.", "", "The placeholder server and client names in the yaml file can also be " "replaced with the following options:", "\tlaunch.py -ts node1,node2 -tc node3 <tag>", "\t - Use node[1-2] to run the daos server in each test", "\t - Use node3 to run the daos client in each test", "\tlaunch.py -ts node1,node2 <tag>", "\t - Use node[1-2] to run the daos server or client in each test", "\tlaunch.py -ts node1,node2 -d <tag>", "\t - Use node[1-2] to run the daos server or client in each test", "\t - Discard of any additional server or client placeholders for " "each test", "", "You can also specify the sparse flag -s to limit output to " "pass/fail.", "\tExample command: launch.py -s pool" ] parser = ArgumentParser( prog="launcher.py", formatter_class=RawDescriptionHelpFormatter, description="\n".join(description)) parser.add_argument( "-a", "--archive", action="store_true", help="archive host log files in the avocado job-results directory") parser.add_argument( "-c", "--clean", action="store_true", help="remove daos log files from the test hosts prior to the test") parser.add_argument( "-d", "--discard", action="store_true", help="when replacing server/client yaml file placeholders, discard " "any placeholders that do not end up with a replacement value") parser.add_argument( "-i", "--include_localhost", action="store_true", help="include the local host when cleaning and archiving") parser.add_argument( "-l", "--list", action="store_true", help="list the python scripts that match the specified tags") parser.add_argument( "-r", "--rename", action="store_true", help="rename the avocado test logs directory to include the test name") parser.add_argument( "-s", "--sparse", action="store_true", help="limit output to pass/fail") parser.add_argument( "tags", nargs="*", type=str, help="test category or file to run") parser.add_argument( "-tc", "--test_clients", action="store", help="comma-separated list of hosts to use as replacement values for " "client placeholders in each test's yaml file") parser.add_argument( "-ts", "--test_servers", action="store", help="comma-separated list of hosts to use as replacement values for " "server placeholders in each test's yaml file. If the " "'--test_clients' argument is not specified, this list of hosts " "will also be used to replace client placeholders.") args = parser.parse_args() print("Arguments: {}".format(args)) # Setup the user environment set_test_environment() # Process the tags argument to determine which tests to run tag_filter, test_list = get_test_list(args.tags) # Verify at least one test was requested if len(test_list) == 0: print("ERROR: No tests or tags found via {}".format(args.tags)) exit(1) # Display a list of the tests matching the tags print("Detected tests: \n{}".format(" \n".join(test_list))) if args.list: exit(0) # Create a temporary directory tmp_dir = TemporaryDirectory() # Create a dictionary of test and their yaml files test_files = get_test_files(test_list, args, tmp_dir) # Run all the tests status = run_tests(test_files, tag_filter, args) # Process the avocado run return codes and only treat job and command # failures as errors. ret_code = 0 if status == 0: print("All avocado tests passed!") else: if status & 1 == 1: print("Detected one or more avocado test failures!") if status & 8 == 8: print("Detected one or more interrupted avocado jobs!") if status & 2 == 2: print("ERROR: Detected one or more avocado job failures!") ret_code = 1 if status & 4 == 4: print("ERROR: Detected one or more failed avocado commands!") ret_code = 1 if status & 128 == 128: print("ERROR: Failed to clean logs in preparation for test run!") ret_code = 1 exit(ret_code) if __name__ == "__main__": main()
[]
[]
[ "DAOS_TEST_SHARED_DIR", "DAOS_SINGLETON_CLI", "CRT_CTX_SHARE_ADDR", "CRT_ATTACH_INFO_PATH", "OFI_INTERFACE", "D_LOG_FILE", "PATH", "PYTHONPATH" ]
[]
["DAOS_TEST_SHARED_DIR", "DAOS_SINGLETON_CLI", "CRT_CTX_SHARE_ADDR", "CRT_ATTACH_INFO_PATH", "OFI_INTERFACE", "D_LOG_FILE", "PATH", "PYTHONPATH"]
python
8
0
hiren/settings.py
""" Django settings for hiren project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import json from celery.schedules import crontab BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # load json file baby :D try: with open('config.json') as f: JSON_DATA = json.load(f) except FileNotFoundError: with open('config.sample.json') as f: JSON_DATA = json.load(f) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key']) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DEBUG', False) ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'debug_toolbar', 'github' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'hiren.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hiren.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases if 'TRAVIS' in os.environ: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'travisci', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'hiren_github_management', 'USER': 'hiren', 'PASSWORD': 'hiren', 'HOST': 'localhost', 'PORT': '', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Dhaka' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder" ) STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) LOGIN_URL = '/' # CELERY STUFF BROKER_URL = 'redis://localhost:6379' CELERY_RESULT_BACKEND = 'redis://localhost:6379' CELERY_ACCEPT_CONTENT = ['application/json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERYBEAT_SCHEDULE = { 'add-every-30-seconds': { 'task': 'github.tasks.get_data', 'schedule': crontab(minute=0, hour='22'), # execute every day at 10 pm }, }
[]
[]
[ "SECRET_KEY", "DEBUG" ]
[]
["SECRET_KEY", "DEBUG"]
python
2
0
examples/service/sync/document/page/document_page_example.go
package main import ( "log" "os" "github.com/RJPearson94/twilio-sdk-go" v1 "github.com/RJPearson94/twilio-sdk-go/service/sync/v1" "github.com/RJPearson94/twilio-sdk-go/service/sync/v1/service/documents" "github.com/RJPearson94/twilio-sdk-go/session/credentials" ) var syncClient *v1.Sync func init() { creds, err := credentials.New(credentials.Account{ Sid: os.Getenv("TWILIO_ACCOUNT_SID"), AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"), }) if err != nil { log.Panicf("%s", err.Error()) } syncClient = twilio.NewWithCredentials(creds).Sync.V1 } func main() { resp, err := syncClient. Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"). Documents. Page(&documents.DocumentsPageOptions{}) if err != nil { log.Panicf("%s", err.Error()) } log.Printf("%v document(s) found on page", len(resp.Documents)) }
[ "\"TWILIO_ACCOUNT_SID\"", "\"TWILIO_AUTH_TOKEN\"" ]
[]
[ "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID" ]
[]
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
go
2
0
common/_termcolor.py
# coding: utf-8 # Copyright (c) 2008-2011 Volvox Development Team # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Author: Konstantin Lepa <[email protected]> """ANSII Color formatting for output in terminal.""" from __future__ import print_function import os __ALL__ = [ 'colored', 'cprint' ] VERSION = (1, 1, 0) ATTRIBUTES = dict( list(zip([ 'bold', 'dark', '', 'underline', 'blink', '', 'reverse', 'concealed' ], list(range(1, 9)) )) ) del ATTRIBUTES[''] HIGHLIGHTS = dict( list(zip([ 'on_grey', 'on_red', 'on_green', 'on_yellow', 'on_blue', 'on_magenta', 'on_cyan', 'on_white' ], list(range(40, 48)) )) ) COLORS = dict( list(zip([ 'grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', ], list(range(30, 38)) )) ) RESET = '\033[0m' def colored(text, color=None, on_color=None, attrs=None): """Colorize text. Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Example: colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink']) colored('Hello, World!', 'green') """ if os.getenv('ANSI_COLORS_DISABLED') is None: fmt_str = '\033[%dm%s' if color is not None: text = fmt_str % (COLORS[color], text) if on_color is not None: text = fmt_str % (HIGHLIGHTS[on_color], text) if attrs is not None: for attr in attrs: text = fmt_str % (ATTRIBUTES[attr], text) text += RESET return text def cprint(text, color=None, on_color=None, attrs=None, **kwargs): """Print colorize text. It accepts arguments of print function. """ print((colored(text, color, on_color, attrs)), **kwargs) if __name__ == '__main__': print('Current terminal type: %s' % node.getenv('TERM')) print('Test basic colors:') cprint('Grey color', 'grey') cprint('Red color', 'red') cprint('Green color', 'green') cprint('Yellow color', 'yellow') cprint('Blue color', 'blue') cprint('Magenta color', 'magenta') cprint('Cyan color', 'cyan') cprint('White color', 'white') print(('-' * 78)) print('Test highlights:') cprint('On grey color', on_color='on_grey') cprint('On red color', on_color='on_red') cprint('On green color', on_color='on_green') cprint('On yellow color', on_color='on_yellow') cprint('On blue color', on_color='on_blue') cprint('On magenta color', on_color='on_magenta') cprint('On cyan color', on_color='on_cyan') cprint('On white color', color='grey', on_color='on_white') print('-' * 78) print('Test attributes:') cprint('Bold grey color', 'grey', attrs=['bold']) cprint('Dark red color', 'red', attrs=['dark']) cprint('Underline green color', 'green', attrs=['underline']) cprint('Blink yellow color', 'yellow', attrs=['blink']) cprint('Reversed blue color', 'blue', attrs=['reverse']) cprint('Concealed Magenta color', 'magenta', attrs=['concealed']) cprint('Bold underline reverse cyan color', 'cyan', attrs=['bold', 'underline', 'reverse']) cprint('Dark blink concealed white color', 'white', attrs=['dark', 'blink', 'concealed']) print(('-' * 78)) print('Test mixing:') cprint('Underline red on grey color', 'red', 'on_grey', ['underline']) cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
[]
[]
[ "ANSI_COLORS_DISABLED" ]
[]
["ANSI_COLORS_DISABLED"]
python
1
0
qa/rpc-tests/p2p-acceptblock.py
#!/usr/bin/env python2 # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase ''' AcceptBlockTest -- test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.create_callback_map() self.connection = None def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("MONALISACOIND", "monalisacoind"), help="monalisacoind binary to test") def setup_chain(self): initialize_chain_clean(self.options.tmpdir, 2) def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain for i in xrange(2): blocks_h2.append(create_block(tips[i], create_coinbase(), time.time()+1)) blocks_h2[i].solve() test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) time.sleep(1) assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print "First height 2 block accepted by both nodes" # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in xrange(2): blocks_h2f.append(create_block(tips[i], create_coinbase(), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) time.sleep(1) # Give time to process the block for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print "Second height 2 block accepted only from whitelisted peer" # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in xrange(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) time.sleep(1) # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. try: self.nodes[0].getblock(blocks_h3[0].hash) print "Unrequested more-work block accepted from non-whitelisted peer" except: raise AssertionError("Unrequested more work block was not processed") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print "Successfully reorged to length 3 chain from whitelisted peer" # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. time.sleep(1) assert_equal(self.nodes[0].getblockcount(), 2) print "Unrequested block that would complete more-work chain was ignored" # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) time.sleep(1) with mininode_lock: getdata = test_node.last_getdata # Check that the getdata is for the right block assert_equal(len(getdata.inv), 1) assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print "Inv at tip triggered getdata for unprocessed block" # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) time.sleep(1) assert_equal(self.nodes[0].getblockcount(), 3) print "Successfully reorged to length 3 chain from non-whitelisted peer" [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
[]
[]
[ "MONALISACOIND" ]
[]
["MONALISACOIND"]
python
1
0
tests/test_config.py
# Modified by SignalFx # Copyright (c) 2016-2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import unittest import opentracing.tracer from jaeger_client import Config, ConstSampler, ProbabilisticSampler, RateLimitingSampler from jaeger_client.config import DEFAULT_THROTTLER_PORT from jaeger_client.metrics import MetricsFactory from jaeger_client.reporter import NullReporter from jaeger_client.senders import HTTPSender from jaeger_client import constants class ConfigTests(unittest.TestCase): def setUp(self): if Config.initialized(): Config._initialized = False def test_enabled(self): c = Config({'enabled': True}, service_name='x') assert c.enabled c = Config({'enabled': False}, service_name='x') assert not c.enabled def test_reporter_batch_size(self): c = Config({'reporter_batch_size': 12345}, service_name='x') assert c.reporter_batch_size == 12345 c = Config({}, service_name='x') assert c.reporter_batch_size == 10 def test_tags(self): os.environ['JAEGER_TAGS'] = 'a=b,c=d' c = Config({'tags': {'e': 'f'}}, service_name='x') assert c.tags == {'a': 'b', 'c': 'd', 'e': 'f'} c.create_tracer(NullReporter(), ConstSampler(True)) def test_no_sampler(self): c = Config({}, service_name='x') assert c.sampler is None def test_const_sampler(self): c = Config({'sampler': {'type': 'const', 'param': True}}, service_name='x') assert type(c.sampler) is ConstSampler assert c.sampler.decision c = Config({'sampler': {'type': 'const', 'param': False}}, service_name='x') assert type(c.sampler) is ConstSampler assert not c.sampler.decision def test_probabilistic_sampler(self): with self.assertRaises(Exception): cfg = {'sampler': {'type': 'probabilistic', 'param': 'xx'}} Config(cfg, service_name='x').sampler c = Config({'sampler': {'type': 'probabilistic', 'param': 0.5}}, service_name='x') assert type(c.sampler) is ProbabilisticSampler assert c.sampler.rate == 0.5 def test_rate_limiting_sampler(self): with self.assertRaises(Exception): cfg = {'sampler': {'type': 'rate_limiting', 'param': 'xx'}} Config(cfg, service_name='x').sampler c = Config({'sampler': {'type': 'rate_limiting', 'param': 1234}}, service_name='x') assert type(c.sampler) is RateLimitingSampler assert c.sampler.traces_per_second == 1234 def test_bad_sampler(self): c = Config({'sampler': {'type': 'bad-sampler'}}, service_name='x') with self.assertRaises(ValueError): c.sampler.is_sampled(0) def test_agent_reporting_host(self): c = Config({}, service_name='x') assert c.local_agent_reporting_host == 'localhost' c = Config({'local_agent': {'reporting_host': 'jaeger.local'}}, service_name='x') assert c.local_agent_reporting_host == 'jaeger.local' os.environ['JAEGER_AGENT_HOST'] = 'jaeger-env.local' c = Config({}, service_name='x') assert c.local_agent_reporting_host == 'jaeger-env.local' def test_max_tag_value_length(self): c = Config({}, service_name='x') assert c.max_tag_value_length == constants.MAX_TAG_VALUE_LENGTH c = Config({'max_tag_value_length': 333}, service_name='x') assert c.max_tag_value_length == 333 t = c.create_tracer(NullReporter(), ConstSampler(True)) assert t.max_tag_value_length == 333 def test_propagation(self): c = Config({}, service_name='x') assert c.propagation == {} c = Config({'propagation': 'b3'}, service_name='x') assert len(c.propagation) == 1 def test_throttler(self): c = Config({ 'throttler': {} }, service_name='x') assert not c.throttler_group() assert c.throttler_port == DEFAULT_THROTTLER_PORT assert c.throttler_refresh_interval == constants.DEFAULT_THROTTLER_REFRESH_INTERVAL c = Config({ 'throttler': { 'port': '1234', 'refresh_interval': '10' } }, service_name='x') assert c.throttler_group() assert c.throttler_port == 1234 assert c.throttler_refresh_interval == 10 c = Config({}, service_name='x') assert c.throttler_group() is None assert c.throttler_port is None assert c.throttler_refresh_interval is None def test_for_unexpected_config_entries(self): with self.assertRaises(Exception): Config({'unexpected': 'value'}, validate=True) def test_reporter_queue_size_valid(self): config = Config({'reporter_queue_size': 100}, service_name='x', validate=True) assert config.reporter_queue_size == 100 def test_missing_service_name(self): with self.assertRaises(ValueError): Config({}) def test_disable_metrics(self): config = Config({'metrics': False}, service_name='x') assert isinstance(config._metrics_factory, MetricsFactory) def test_initialize_tracer(self): c = Config({}, service_name='x') tracer = c.initialize_tracer() assert opentracing.tracer == tracer tracer.close() def test_default_local_agent_reporting_port(self): c = Config({}, service_name='x') assert c.local_agent_reporting_port == 6831 def test_generate_128bit_trace_id(self): c = Config({}, service_name='x') assert c.generate_128bit_trace_id is False c = Config({'generate_128bit_trace_id': True}, service_name='x') assert c.generate_128bit_trace_id is True os.environ['JAEGER_TRACEID_128BIT'] = 'true' c = Config({'generate_128bit_trace_id': False}, service_name='x') assert c.generate_128bit_trace_id is False c = Config({}, service_name='x') assert c.generate_128bit_trace_id is True os.environ.pop('JAEGER_TRACEID_128BIT') assert os.getenv('JAEGER_TRACEID_128BIT', None) is None def test_global_tracer_initializaion(self): c = Config({}, service_name='x') tracer = c.initialize_tracer() assert tracer attempt = c.initialize_tracer() assert attempt is None tracer.close() def test_jaeger_endpoint(self): c = Config({'jaeger_endpoint': 'some_endpoint'}, service_name='x') assert c.jaeger_endpoint == 'some_endpoint' os.environ['JAEGER_ENDPOINT'] = 'SomeEndpoint' c = Config({}, service_name='x') assert c.jaeger_endpoint == 'SomeEndpoint' del os.environ['JAEGER_ENDPOINT'] def test_jaeger_auth_token(self): c = Config({'jaeger_auth_token': 'some_token'}, service_name='x', validate=True) assert c.jaeger_auth_token == 'some_token' os.environ['JAEGER_AUTH_TOKEN'] = 'SomeToken' c = Config({}, service_name='x') assert c.jaeger_auth_token == 'SomeToken' del os.environ['JAEGER_AUTH_TOKEN'] def test_jaeger_user_and_password(self): c = Config({'jaeger_user': 'some_user', 'jaeger_password': 'some_password'}, service_name='x', validate=True) assert c.jaeger_user == 'some_user' assert c.jaeger_password == 'some_password' os.environ['JAEGER_USER'] = 'SomeUser' os.environ['JAEGER_PASSWORD'] = 'SomePassword' c = Config({}, service_name='x') assert c.jaeger_user == 'SomeUser' assert c.jaeger_password == 'SomePassword' del os.environ['JAEGER_USER'] del os.environ['JAEGER_PASSWORD'] def test_jaeger_auth_token_and_basic_mutually_exclusive(self): with self.assertRaises(ValueError) as e: Config({'jaeger_auth_token': 'some_token', 'jaeger_user': 'some_user', 'jaeger_password': 'some_password'}, service_name='x', validate=True) assert e.exception.args[0] == ('Cannot accept both jaeger_auth_token and ' 'jaeger_user/jaeger_password for authentication') def test_jaeger_user_and_password_required(self): for cfg in ({'jaeger_user': 'some_user'}, {'jaeger_password': 'some_password'}): with self.assertRaises(ValueError) as e: Config(cfg, service_name='x', validate=True) assert e.exception.args[0] == ('Must provide both jaeger_user and ' 'jaeger_password for authentication.') def test_specifying_jaeger_endpoint_creates_http_sender(self): c = Config({'jaeger_endpoint': 'some_endpoint'}, service_name='x') tracer = c.initialize_tracer() assert isinstance(tracer.reporter._sender, HTTPSender) tracer.close()
[]
[]
[ "JAEGER_TAGS", "JAEGER_AUTH_TOKEN", "JAEGER_TRACEID_128BIT", "JAEGER_USER", "JAEGER_PASSWORD", "JAEGER_AGENT_HOST", "JAEGER_ENDPOINT" ]
[]
["JAEGER_TAGS", "JAEGER_AUTH_TOKEN", "JAEGER_TRACEID_128BIT", "JAEGER_USER", "JAEGER_PASSWORD", "JAEGER_AGENT_HOST", "JAEGER_ENDPOINT"]
python
7
0
termcolor.py
# coding: utf-8 # Copyright (c) 2008-2011 Volvox Development Team # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Author: Konstantin Lepa <[email protected]> """ANSII Color formatting for output in terminal.""" from __future__ import print_function import os import re import sys __ALL__ = [ 'colored', 'cprint' ] VERSION = (1, 1, 0) ATTRIBUTES = dict( list(zip([ 'bold', 'dark', '', 'underline', 'blink', '', 'reverse', 'concealed' ], list(range(1, 9)) )) ) del ATTRIBUTES[''] ATTRIBUTES_RE = '\033\[(?:%s)m' % '|'.join(['%d' % v for v in ATTRIBUTES.values()]) HIGHLIGHTS = dict( list(zip([ 'on_grey', 'on_red', 'on_green', 'on_yellow', 'on_blue', 'on_magenta', 'on_cyan', 'on_white' ], list(range(40, 48)) )) ) HIGHLIGHTS_RE = '\033\[(?:%s)m' % '|'.join(['%d' % v for v in HIGHLIGHTS.values()]) COLORS = dict( list(zip([ 'grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', ], list(range(30, 38)) )) ) COLORS_RE = '\033\[(?:%s)m' % '|'.join(['%d' % v for v in COLORS.values()]) RESET = '\033[0m' RESET_RE = '\033\[0m' def colored(text, color=None, on_color=None, attrs=None): """Colorize text, while stripping nested ANSI color sequences. Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Example: colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink']) colored('Hello, World!', 'green') """ if os.getenv('ANSI_COLORS_DISABLED') is None and sys.stdout.isatty(): fmt_str = '\033[%dm%s' if color is not None: text = re.sub(COLORS_RE + '(.*?)' + RESET_RE, r'\1', text) text = fmt_str % (COLORS[color], text) if on_color is not None: text = re.sub(HIGHLIGHTS_RE + '(.*?)' + RESET_RE, r'\1', text) text = fmt_str % (HIGHLIGHTS[on_color], text) if attrs is not None: text = re.sub(ATTRIBUTES_RE + '(.*?)' + RESET_RE, r'\1', text) for attr in attrs: text = fmt_str % (ATTRIBUTES[attr], text) return text + RESET else: return text def cprint(text, color=None, on_color=None, attrs=None, **kwargs): """Print colorize text. It accepts arguments of print function. """ print((colored(text, color, on_color, attrs)), **kwargs) if __name__ == '__main__': print('Current terminal type: %s' % os.getenv('TERM')) print('Test basic colors:') cprint('Grey color', 'grey') cprint('Red color', 'red') cprint('Green color', 'green') cprint('Yellow color', 'yellow') cprint('Blue color', 'blue') cprint('Magenta color', 'magenta') cprint('Cyan color', 'cyan') cprint('White color', 'white') print(('-' * 78)) print('Test highlights:') cprint('On grey color', on_color='on_grey') cprint('On red color', on_color='on_red') cprint('On green color', on_color='on_green') cprint('On yellow color', on_color='on_yellow') cprint('On blue color', on_color='on_blue') cprint('On magenta color', on_color='on_magenta') cprint('On cyan color', on_color='on_cyan') cprint('On white color', color='grey', on_color='on_white') print('-' * 78) print('Test attributes:') cprint('Bold grey color', 'grey', attrs=['bold']) cprint('Dark red color', 'red', attrs=['dark']) cprint('Underline green color', 'green', attrs=['underline']) cprint('Blink yellow color', 'yellow', attrs=['blink']) cprint('Reversed blue color', 'blue', attrs=['reverse']) cprint('Concealed Magenta color', 'magenta', attrs=['concealed']) cprint('Bold underline reverse cyan color', 'cyan', attrs=['bold', 'underline', 'reverse']) cprint('Dark blink concealed white color', 'white', attrs=['dark', 'blink', 'concealed']) print(('-' * 78)) print('Test mixing:') cprint('Underline red on grey color', 'red', 'on_grey', ['underline']) cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
[]
[]
[ "ANSI_COLORS_DISABLED", "TERM" ]
[]
["ANSI_COLORS_DISABLED", "TERM"]
python
2
0
examples/stomp/python/stompest/sync/listener.py
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import sys import time from stompest.config import StompConfig from stompest.sync import Stomp user = os.getenv('APOLLO_USER') or 'admin' password = os.getenv('APOLLO_PASSWORD') or 'password' host = os.getenv('APOLLO_HOST') or 'localhost' port = int(os.getenv('APOLLO_PORT') or 61613) destination = sys.argv[1:2] or ['/topic/event'] destination = destination[0] config = StompConfig('tcp://%s:%d' % (host, port), login=user, passcode=password, version='1.1') client = Stomp(config) client.connect(host='mybroker') client.subscribe(destination=destination, headers={'id': 'required-for-STOMP-1.1'}) count = 0 start = time.time() while (not count) or client.canRead(0): client.receiveFrame() count += 1 diff = time.time() - start print 'Received %s frames in %f seconds' % (count, diff) client.disconnect(receipt='bye') client.receiveFrame() client.close()
[]
[]
[ "APOLLO_PASSWORD", "APOLLO_USER", "APOLLO_PORT", "APOLLO_HOST" ]
[]
["APOLLO_PASSWORD", "APOLLO_USER", "APOLLO_PORT", "APOLLO_HOST"]
python
4
0
lib-python/3/test/test_site.py
"""Tests for 'site'. Tests assume the initial paths in sys.path once the interpreter has begun executing have not been removed. """ import unittest from test.support import run_unittest, TESTFN, EnvironmentVarGuard from test.support import captured_stderr, check_impl_detail import builtins import os import sys import re import encodings import subprocess import sysconfig from copy import copy # Need to make sure to not import 'site' if someone specified ``-S`` at the # command-line. Detect this by just making sure 'site' has not been imported # already. if "site" in sys.modules: import site else: raise unittest.SkipTest("importation of site.py suppressed") if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE): # need to add user site directory for tests os.makedirs(site.USER_SITE) site.addsitedir(site.USER_SITE) class HelperFunctionsTests(unittest.TestCase): """Tests for helper functions. """ def setUp(self): """Save a copy of sys.path""" self.sys_path = sys.path[:] self.old_base = site.USER_BASE self.old_site = site.USER_SITE self.old_prefixes = site.PREFIXES self.old_vars = copy(sysconfig._CONFIG_VARS) def tearDown(self): """Restore sys.path""" sys.path[:] = self.sys_path site.USER_BASE = self.old_base site.USER_SITE = self.old_site site.PREFIXES = self.old_prefixes sysconfig._CONFIG_VARS = self.old_vars def test_makepath(self): # Test makepath() have an absolute path for its first return value # and a case-normalized version of the absolute path for its # second value. path_parts = ("Beginning", "End") original_dir = os.path.join(*path_parts) abs_dir, norm_dir = site.makepath(*path_parts) self.assertEqual(os.path.abspath(original_dir), abs_dir) if original_dir == os.path.normcase(original_dir): self.assertEqual(abs_dir, norm_dir) else: self.assertEqual(os.path.normcase(abs_dir), norm_dir) def test_init_pathinfo(self): dir_set = site._init_pathinfo() for entry in [site.makepath(path)[1] for path in sys.path if path and os.path.isdir(path)]: self.assertIn(entry, dir_set, "%s from sys.path not found in set returned " "by _init_pathinfo(): %s" % (entry, dir_set)) def pth_file_tests(self, pth_file): """Contain common code for testing results of reading a .pth file""" self.assertIn(pth_file.imported, sys.modules, "%s not in sys.modules" % pth_file.imported) self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path) self.assertFalse(os.path.exists(pth_file.bad_dir_path)) def test_addpackage(self): # Make sure addpackage() imports if the line starts with 'import', # adds directories to sys.path for any line in the file that is not a # comment or import that is a valid directory name for where the .pth # file resides; invalid directories are not added pth_file = PthFile() pth_file.cleanup(prep=True) # to make sure that nothing is # pre-existing that shouldn't be try: pth_file.create() site.addpackage(pth_file.base_dir, pth_file.filename, set()) self.pth_file_tests(pth_file) finally: pth_file.cleanup() def make_pth(self, contents, pth_dir='.', pth_name=TESTFN): # Create a .pth file and return its (abspath, basename). pth_dir = os.path.abspath(pth_dir) pth_basename = pth_name + '.pth' pth_fn = os.path.join(pth_dir, pth_basename) pth_file = open(pth_fn, 'w', encoding='utf-8') self.addCleanup(lambda: os.remove(pth_fn)) pth_file.write(contents) pth_file.close() return pth_dir, pth_basename def test_addpackage_import_bad_syntax(self): # Issue 10642 pth_dir, pth_fn = self.make_pth("import bad)syntax\n") with captured_stderr() as err_out: site.addpackage(pth_dir, pth_fn, set()) self.assertRegex(err_out.getvalue(), "line 1") self.assertRegex(err_out.getvalue(), re.escape(os.path.join(pth_dir, pth_fn))) # XXX: the previous two should be independent checks so that the # order doesn't matter. The next three could be a single check # but my regex foo isn't good enough to write it. self.assertRegex(err_out.getvalue(), 'Traceback') self.assertRegex(err_out.getvalue(), r'import bad\)syntax') self.assertRegex(err_out.getvalue(), 'SyntaxError') def test_addpackage_import_bad_exec(self): # Issue 10642 pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n") with captured_stderr() as err_out: site.addpackage(pth_dir, pth_fn, set()) self.assertRegex(err_out.getvalue(), "line 2") self.assertRegex(err_out.getvalue(), re.escape(os.path.join(pth_dir, pth_fn))) # XXX: ditto previous XXX comment. self.assertRegex(err_out.getvalue(), 'Traceback') self.assertRegex(err_out.getvalue(), 'ImportError') @unittest.skipIf(sys.platform == "win32", "Windows does not raise an " "error for file paths containing null characters") def test_addpackage_import_bad_pth_file(self): # Issue 5258 pth_dir, pth_fn = self.make_pth("abc\x00def\n") with captured_stderr() as err_out: site.addpackage(pth_dir, pth_fn, set()) self.assertRegex(err_out.getvalue(), "line 1") self.assertRegex(err_out.getvalue(), re.escape(os.path.join(pth_dir, pth_fn))) # XXX: ditto previous XXX comment. self.assertRegex(err_out.getvalue(), 'Traceback') self.assertRegex(err_out.getvalue(), 'TypeError') def test_addsitedir(self): # Same tests for test_addpackage since addsitedir() essentially just # calls addpackage() for every .pth file in the directory pth_file = PthFile() pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing # that is tested for try: pth_file.create() site.addsitedir(pth_file.base_dir, set()) self.pth_file_tests(pth_file) finally: pth_file.cleanup() @unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 " "user-site (site.ENABLE_USER_SITE)") def test_s_option(self): usersite = site.USER_SITE self.assertIn(usersite, sys.path) env = os.environ.copy() rc = subprocess.call([sys.executable, '-c', 'import sys; sys.exit(%r in sys.path)' % usersite], env=env) self.assertEqual(rc, 1) env = os.environ.copy() rc = subprocess.call([sys.executable, '-s', '-c', 'import sys; sys.exit(%r in sys.path)' % usersite], env=env) self.assertEqual(rc, 0) env = os.environ.copy() env["PYTHONNOUSERSITE"] = "1" rc = subprocess.call([sys.executable, '-c', 'import sys; sys.exit(%r in sys.path)' % usersite], env=env) self.assertEqual(rc, 0) env = os.environ.copy() env["PYTHONUSERBASE"] = "/tmp" rc = subprocess.call([sys.executable, '-c', 'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'], env=env) self.assertEqual(rc, 1) def test_getuserbase(self): site.USER_BASE = None user_base = site.getuserbase() # the call sets site.USER_BASE self.assertEqual(site.USER_BASE, user_base) # let's set PYTHONUSERBASE and see if it uses it site.USER_BASE = None import sysconfig sysconfig._CONFIG_VARS = None with EnvironmentVarGuard() as environ: environ['PYTHONUSERBASE'] = 'xoxo' self.assertTrue(site.getuserbase().startswith('xoxo'), site.getuserbase()) def test_getusersitepackages(self): site.USER_SITE = None site.USER_BASE = None user_site = site.getusersitepackages() # the call sets USER_BASE *and* USER_SITE self.assertEqual(site.USER_SITE, user_site) self.assertTrue(user_site.startswith(site.USER_BASE), user_site) def test_getsitepackages(self): site.PREFIXES = ['xoxo'] dirs = site.getsitepackages() if sys.platform in ('os2emx', 'riscos'): self.assertEqual(len(dirs), 1) wanted = os.path.join('xoxo', 'Lib', 'site-packages') self.assertEqual(dirs[0], wanted) elif '__pypy__' in sys.builtin_module_names: self.assertEquals(len(dirs), 1) wanted = os.path.join('xoxo', 'site-packages') self.assertEquals(dirs[0], wanted) elif (sys.platform == "darwin" and sysconfig.get_config_var("PYTHONFRAMEWORK")): # OS X framework builds site.PREFIXES = ['Python.framework'] dirs = site.getsitepackages() self.assertEqual(len(dirs), 3) wanted = os.path.join('/Library', sysconfig.get_config_var("PYTHONFRAMEWORK"), sys.version[:3], 'site-packages') self.assertEqual(dirs[2], wanted) elif os.sep == '/': # OS X non-framwework builds, Linux, FreeBSD, etc self.assertEqual(len(dirs), 2) wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3], 'site-packages') self.assertEqual(dirs[0], wanted) wanted = os.path.join('xoxo', 'lib', 'site-python') self.assertEqual(dirs[1], wanted) else: # other platforms self.assertEqual(len(dirs), 2) self.assertEqual(dirs[0], 'xoxo') wanted = os.path.join('xoxo', 'lib', 'site-packages') self.assertEqual(dirs[1], wanted) class PthFile(object): """Helper class for handling testing of .pth files""" def __init__(self, filename_base=TESTFN, imported="time", good_dirname="__testdir__", bad_dirname="__bad"): """Initialize instance variables""" self.filename = filename_base + ".pth" self.base_dir = os.path.abspath('') self.file_path = os.path.join(self.base_dir, self.filename) self.imported = imported self.good_dirname = good_dirname self.bad_dirname = bad_dirname self.good_dir_path = os.path.join(self.base_dir, self.good_dirname) self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname) def create(self): """Create a .pth file with a comment, blank lines, an ``import <self.imported>``, a line with self.good_dirname, and a line with self.bad_dirname. Creation of the directory for self.good_dir_path (based off of self.good_dirname) is also performed. Make sure to call self.cleanup() to undo anything done by this method. """ FILE = open(self.file_path, 'w') try: print("#import @bad module name", file=FILE) print("\n", file=FILE) print("import %s" % self.imported, file=FILE) print(self.good_dirname, file=FILE) print(self.bad_dirname, file=FILE) finally: FILE.close() os.mkdir(self.good_dir_path) def cleanup(self, prep=False): """Make sure that the .pth file is deleted, self.imported is not in sys.modules, and that both self.good_dirname and self.bad_dirname are not existing directories.""" if os.path.exists(self.file_path): os.remove(self.file_path) if prep: self.imported_module = sys.modules.get(self.imported) if self.imported_module: del sys.modules[self.imported] else: if self.imported_module: sys.modules[self.imported] = self.imported_module if os.path.exists(self.good_dir_path): os.rmdir(self.good_dir_path) if os.path.exists(self.bad_dir_path): os.rmdir(self.bad_dir_path) class ImportSideEffectTests(unittest.TestCase): """Test side-effects from importing 'site'.""" def setUp(self): """Make a copy of sys.path""" self.sys_path = sys.path[:] def tearDown(self): """Restore sys.path""" sys.path[:] = self.sys_path def test_abs_paths(self): # Make sure all imported modules have their __file__ and __cached__ # attributes as absolute paths. Arranging to put the Lib directory on # PYTHONPATH would cause the os module to have a relative path for # __file__ if abs_paths() does not get run. sys and builtins (the # only other modules imported before site.py runs) do not have # __file__ or __cached__ because they are built-in. parent = os.path.relpath(os.path.dirname(os.__file__)) env = os.environ.copy() env['PYTHONPATH'] = parent code = ('import os, sys', # use ASCII to avoid locale issues with non-ASCII directories 'os_file = os.__file__.encode("ascii", "backslashreplace")', r'sys.stdout.buffer.write(os_file + b"\n")', 'os_cached = os.__cached__.encode("ascii", "backslashreplace")', r'sys.stdout.buffer.write(os_cached + b"\n")') command = '\n'.join(code) # First, prove that with -S (no 'import site'), the paths are # relative. proc = subprocess.Popen([sys.executable, '-S', '-c', command], env=env, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() self.assertEqual(proc.returncode, 0) os__file__, os__cached__ = stdout.splitlines()[:2] if check_impl_detail(cpython=True): self.assertFalse(os.path.isabs(os__file__)) self.assertFalse(os.path.isabs(os__cached__)) # Now, with 'import site', it works. proc = subprocess.Popen([sys.executable, '-c', command], env=env, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() self.assertEqual(proc.returncode, 0) os__file__, os__cached__ = stdout.splitlines()[:2] self.assertTrue(os.path.isabs(os__file__)) self.assertTrue(os.path.isabs(os__cached__)) def test_no_duplicate_paths(self): # No duplicate paths should exist in sys.path # Handled by removeduppaths() site.removeduppaths() seen_paths = set() for path in sys.path: self.assertNotIn(path, seen_paths) seen_paths.add(path) def test_add_build_dir(self): # Test that the build directory's Modules directory is used when it # should be. # XXX: implement pass def test_setting_quit(self): # 'quit' and 'exit' should be injected into builtins self.assertTrue(hasattr(builtins, "quit")) self.assertTrue(hasattr(builtins, "exit")) def test_setting_copyright(self): # 'copyright' and 'credits' should be in builtins self.assertTrue(hasattr(builtins, "copyright")) self.assertTrue(hasattr(builtins, "credits")) def test_setting_help(self): # 'help' should be set in builtins self.assertTrue(hasattr(builtins, "help")) def test_aliasing_mbcs(self): if sys.platform == "win32": import locale if locale.getdefaultlocale()[1].startswith('cp'): for value in encodings.aliases.aliases.values(): if value == "mbcs": break else: self.fail("did not alias mbcs") def test_sitecustomize_executed(self): # If sitecustomize is available, it should have been imported. if "sitecustomize" not in sys.modules: try: import sitecustomize except ImportError: pass else: self.fail("sitecustomize not imported automatically") def test_main(): run_unittest(HelperFunctionsTests, ImportSideEffectTests) if __name__ == "__main__": test_main()
[]
[]
[]
[]
[]
python
0
0
internal/api/api.go
//This file contains the cli API and configs it package api import ( "fmt" "log" "os" "strconv" "time" "github.com/ddmin/logo-ls/internal/sysState" "github.com/pborman/getopt/v2" "golang.org/x/crypto/ssh/terminal" ) // flags with corresponding bit values // frequently used flags should be higher in the list // help (-?) and version (-V) not included const ( Flag_l uint = 1 << iota Flag_a Flag_alpha // sort in alphabetic order (default) Flag_i // stop printing of icons Flag_c // stop printing of colors Flag_D // stop printing of git status Flag_A Flag_h Flag_R Flag_r Flag_S Flag_t Flag_X Flag_s Flag_v Flag_U Flag_1 Flag_d Flag_o Flag_g Flag_G ) // flagVector has all the options set in it. Each bit represent an option. var FlagVector uint // time formate var timeFormate string func TimeFormate(t string) { timeFormate = t } func GetTimeFormate() string { return timeFormate } var FileList []string func Bootstrap() { getopt.SetParameters("[files ...]") // content flags f_a := getopt.BoolLong("all", 'a', "do not ignore entries starting with .") f_A := getopt.BoolLong("almost-all", 'A', "do not list implied . and ..") // disable Stuff f_D := getopt.BoolLong("git-status", 'D', "print git status of files") f_c := getopt.BoolLong("disable-color", 'c', "don't color icons, filenames and git status (use this to print to a file)") f_i := getopt.BoolLong("disable-icon", 'i', "don't print icons of the files") // display flags f_1 := getopt.Bool('1', "list one file per line.") f_d := getopt.BoolLong("directory", 'd', "list directories themselves, not their contents") f_l := getopt.Bool('l', "use a long listing format") f_o := getopt.Bool('o', "like -l, but do not list group information") f_g := getopt.Bool('g', "\nlike -l, but do not list owner") f_G := getopt.BoolLong("no-group", 'G', "in a long listing, don't print group names") f_h := getopt.BoolLong("human-readable", 'h', "with -l and -s, print sizes like 1K 234M 2G etc.") f_s := getopt.BoolLong("size", 's', "print the allocated size of each file, in blocks") // sorting flags f_S := getopt.Bool('S', "sort by file size, largest first") f_U := getopt.Bool('U', "do not sort; list entries in directory order") f_X := getopt.Bool('X', "sort alphabetically by entry extension") f_v := getopt.Bool('v', "natural sort of (version) numbers within text") f_t := getopt.Bool('t', "sort by modification time, newest first") f_r := getopt.BoolLong("reverse", 'r', "reverse order while sorting") f_R := getopt.BoolLong("recursive", 'R', "list subdirectories recursively") f_T := getopt.EnumLong("time-style", 'T', []string{"Stamp", "StampMilli", "Kitchen", "ANSIC", "UnixDate", "RubyDate", "RFC1123", "RFC1123Z", "RFC3339", "RFC822", "RFC822Z", "RFC850"}, "Stamp", "time/date format with -l; see time-style below") f_help := getopt.BoolLong("help", '?', "display this help and exit") f_V := getopt.BoolLong("version", 'V', "output version information and exit") // using getopt.Getopt instead of parse to provide custom err err := getopt.Getopt(nil) if err != nil { // code to handle error log.Printf("%v\nTry 'logo-ls -?' for more information.", err) sysState.ExitCode(sysState.Code_Serious) os.Exit(sysState.GetExitCode()) } // list of files/ dir FileList = getopt.Args() // set one of -A and -a priority -A > -a switch { case *f_A: FlagVector |= Flag_A case *f_a: FlagVector |= Flag_a } // set one of -S, -U, -X, -v, -t and alpha priority -S > -t > -X > -v > -U > alpha switch { case *f_S: FlagVector |= Flag_S case *f_t: FlagVector |= Flag_t case *f_X: FlagVector |= Flag_X case *f_v: FlagVector |= Flag_v case *f_U: FlagVector |= Flag_U default: FlagVector |= Flag_alpha } // set reverse (-r) flag if *f_r { FlagVector |= Flag_r } // set recursion (-R) flag if *f_R { FlagVector |= Flag_R } // set disable-git-status (-D) flag if *f_D { FlagVector |= Flag_D } // set disable-color (-c) flag if *f_c { FlagVector |= Flag_c } // set disable-icon (-i) flag if *f_i { FlagVector |= Flag_i } // set -1 flag if *f_1 { FlagVector |= Flag_1 } // set -d flag if *f_d { FlagVector |= Flag_d } // set -G flag if *f_G { FlagVector |= Flag_G } // set time formate switch *f_T { case "Stamp": timeFormate = time.Stamp case "StampMilli": timeFormate = time.StampMilli case "Kitchen": timeFormate = time.Kitchen case "ANSIC": timeFormate = time.ANSIC case "UnixDate": timeFormate = time.UnixDate case "RubyDate": timeFormate = time.RubyDate case "RFC1123": timeFormate = time.RFC1123 case "RFC1123Z": timeFormate = time.RFC1123Z case "RFC3339": timeFormate = time.RFC3339 case "RFC822": timeFormate = time.RFC822 case "RFC822Z": timeFormate = time.RFC822Z case "RFC850": timeFormate = time.RFC850 default: timeFormate = time.Stamp } // set -h flag if *f_h { FlagVector |= Flag_h } // set -s flag if *f_s { FlagVector |= Flag_s } // set one of -o, -g and -l priority -o > -g > -l switch { case *f_o: FlagVector |= Flag_o case *f_g: FlagVector |= Flag_g case *f_l: FlagVector |= Flag_l case *f_1: default: // screen width for custom tw w, _, e := terminal.GetSize(int(os.Stdout.Fd())) if e == nil { if w == 0 { // for systems that don’t support ‘TIOCGWINSZ’. w, _ = strconv.Atoi(os.Getenv("COLUMNS")) } sysState.TerminalWidth(w) } } // if f_help is provided print help and exit(0) if *f_help { fmt.Println("List information about the FILEs with ICONS and GIT STATUS (the current dir \nby default). Sort entries alphabetically if none of -tvSUX is specified.") getopt.PrintUsage(os.Stdout) fmt.Println("\nPossible value for --time-style (-T)") fmt.Printf(" %-11s %-32q\n", "ANSIC", "Mon Jan _2 15:04:05 2006") fmt.Printf(" %-11s %-32q\n", "UnixDate", "Mon Jan _2 15:04:05 MST 2006") fmt.Printf(" %-11s %-32q\n", "RubyDate", "Mon Jan 02 15:04:05 -0700 2006") fmt.Printf(" %-11s %-32q\n", "RFC822", "02 Jan 06 15:04 MST") fmt.Printf(" %-11s %-32q\n", "RFC822Z", "02 Jan 06 15:04 -0700") fmt.Printf(" %-11s %-32q\n", "RFC850", "Monday, 02-Jan-06 15:04:05 MST") fmt.Printf(" %-11s %-32q\n", "RFC1123", "Mon, 02 Jan 2006 15:04:05 MST") fmt.Printf(" %-11s %-32q\n", "RFC1123Z", "Mon, 02 Jan 2006 15:04:05 -0700") fmt.Printf(" %-11s %-32q\n", "RFC3339", "2006-01-02T15:04:05Z07:00") fmt.Printf(" %-11s %-32q\n", "Kitchen", "3:04PM") fmt.Printf(" %-11s %-32q [Default]\n", "Stamp", "Mon Jan _2 15:04:05") fmt.Printf(" %-11s %-32q\n", "StampMilli", "Jan _2 15:04:05.000") fmt.Println("\nExit status:") fmt.Println(" 0 if OK,") fmt.Println(" 1 if minor problems (e.g., cannot access subdirectory),") fmt.Println(" 2 if serious trouble (e.g., cannot access command-line argument).") os.Exit(sysState.GetExitCode()) } // if f_V is provided version will be printed and exit(0) if *f_V { fmt.Printf("logo-ls %s\nCopyright (c) 2020\nLicense MIT <https://opensource.org/licenses/MIT>.\nThis is free software: you are free to change and redistribute it.\nThere is NO WARRANTY, to the extent permitted by law.\n", "v1.3.6") os.Exit(sysState.GetExitCode()) } }
[ "\"COLUMNS\"" ]
[]
[ "COLUMNS" ]
[]
["COLUMNS"]
go
1
0
samples/vision/content_moderator_text_moderation_samples.py
import os.path from pprint import pprint from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient from azure.cognitiveservices.vision.contentmoderator.models import ( Screen ) from msrest.authentication import CognitiveServicesCredentials SUBSCRIPTION_KEY_ENV_NAME = "CONTENTMODERATOR_SUBSCRIPTION_KEY" CONTENTMODERATOR_LOCATION = os.environ.get("CONTENTMODERATOR_LOCATION", "westcentralus") TEXT_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), "text_files") # The number of minutes to delay after updating the search index before # performing image match operations against the list. LATENCY_DELAY = 0.5 def text_moderation(subscription_key): """TextModeration. This will moderate a given long text. """ client = ContentModeratorClient( endpoint='https://'+CONTENTMODERATOR_LOCATION+'.api.cognitive.microsoft.com', credentials=CognitiveServicesCredentials(subscription_key) ) # Screen the input text: check for profanity, # do autocorrect text, and check for personally identifying # information (PII) with open(os.path.join(TEXT_FOLDER, 'content_moderator_text_moderation.txt'), "rb") as text_fd: screen = client.text_moderation.screen_text( text_content_type="text/plain", text_content=text_fd, language="eng", autocorrect=True, pii=True ) assert isinstance(screen, Screen) pprint(screen.as_dict()) if __name__ == "__main__": import sys, os.path sys.path.append(os.path.abspath(os.path.join(__file__, "..", ".."))) from tools import execute_samples execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
[]
[]
[ "CONTENTMODERATOR_LOCATION" ]
[]
["CONTENTMODERATOR_LOCATION"]
python
1
0
engine/worker/requirement.go
package main import ( "fmt" "net" "os" "os/exec" "path" "strconv" "strings" "time" "github.com/shirou/gopsutil/mem" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/log" ) var requirementCheckFuncs = map[string]func(w *currentWorker, r sdk.Requirement) (bool, error){ sdk.BinaryRequirement: checkBinaryRequirement, sdk.HostnameRequirement: checkHostnameRequirement, sdk.ModelRequirement: checkModelRequirement, sdk.NetworkAccessRequirement: checkNetworkAccessRequirement, sdk.PluginRequirement: checkPluginRequirement, sdk.ServiceRequirement: checkServiceRequirement, sdk.MemoryRequirement: checkMemoryRequirement, sdk.VolumeRequirement: checkVolumeRequirement, sdk.OSArchRequirement: checkOSArchRequirement, } func checkRequirements(w *currentWorker, a *sdk.Action, bookedJobID int64) (bool, []sdk.Requirement) { requirementsOK := true errRequirements := []sdk.Requirement{} log.Debug("checkRequirements> for JobID:%d model of worker: %s", bookedJobID, w.model.Name) log.Debug("requirements for %s >>> %+v\n", a.Name, a.Requirements) for _, r := range a.Requirements { ok, err := checkRequirement(w, r) if err != nil { log.Warning("checkQueue> error on checkRequirement %s", err) } if !ok { requirementsOK = false errRequirements = append(errRequirements, r) continue } } log.Debug("checkRequirements> checkRequirements:%t errRequirements:%v", requirementsOK, errRequirements) return requirementsOK, errRequirements } func checkRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { check := requirementCheckFuncs[r.Type] if check == nil { return false, fmt.Errorf("checkRequirement> Unknown type of requirement: %s", r.Type) } return check(w, r) } func checkPluginRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { var currentOS = strings.ToLower(sdk.GOOS) var currentARCH = strings.ToLower(sdk.GOARCH) binary, err := w.client.PluginGetBinaryInfos(r.Name, currentOS, currentARCH) if err != nil { return false, err } // then try to download the plugin pluginBinary := path.Join(w.basedir, binary.Name) if _, err := os.Stat(pluginBinary); os.IsNotExist(err) { log.Debug("Downloading the plugin %s", binary.Name) //If the file doesn't exist. Download it. fi, err := os.OpenFile(pluginBinary, os.O_CREATE|os.O_RDWR, os.FileMode(binary.Perm)) if err != nil { return false, err } log.Debug("Get the binary plugin %s", r.Name) if err := w.client.PluginGetBinary(r.Name, currentOS, currentARCH, fi); err != nil { _ = fi.Close() return false, err } //It's downloaded. Close the file _ = fi.Close() } else { log.Debug("plugin binary is in cache %s", pluginBinary) } return true, nil } // checkHostnameRequirement returns true if current hostname is a requirement func checkHostnameRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { h, err := os.Hostname() if err != nil { return false, err } return h == r.Value, nil } // checkBinaryRequirement returns true is binary requirement is in worker's PATH func checkBinaryRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { if _, err := exec.LookPath(r.Value); err != nil { // Return nil because the error contains 'Executable file not found', that's what we wanted return false, nil } return true, nil } func checkModelRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { // if there is a model req and no model on worker -> return false if w.model.ID == 0 { return false, nil } modelName := strings.Split(r.Value, " ")[0] modelPath := strings.SplitN(modelName, "/", 2) if len(modelPath) == 2 { // if the requirement contains group info (myGroup/myModel) check that it match current worker model return modelName == fmt.Sprintf("%s/%s", w.model.Group.Name, w.model.Name), nil } isSharedInfra := w.model.Group.Name == sdk.SharedInfraGroupName && modelName == w.model.Name isSameName := modelName == w.model.Name // for backward compatibility with runs, if only the name match we considered that the model can be used, keep this condition until the workflow runs were not migrated. return isSharedInfra || isSameName, nil } func checkNetworkAccessRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { conn, err := net.DialTimeout("tcp", r.Value, 10*time.Second) if err != nil { return false, nil } conn.Close() return true, nil } func checkServiceRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { // service are supported only for Model Docker if w.model.Type != sdk.Docker { return false, nil } retry := 3 for attempt := 0; attempt < retry; attempt++ { ips, err := net.LookupIP(r.Name) if err != nil { log.Debug("Error checking requirement : %s", err) time.Sleep(2 * time.Second) continue } var s string for _, ip := range ips { s += s + ip.String() + " " } log.Info("Service requirement %s is ready %s", r.Name, s) return true, nil } return false, nil } func checkMemoryRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { var totalMemory int64 neededMemory, err := strconv.ParseInt(r.Value, 10, 64) if err != nil { return false, err } switch w.model.Type { // Check env variables in a docker is safer than mem.VirtualMemory case sdk.Docker: var err error // Useful for provisioned worker memoryEnv := os.Getenv("CDS_MODEL_MEMORY") totalMemory, err = strconv.ParseInt(memoryEnv, 10, 64) if err != nil { return false, err } totalMemory = totalMemory * 1024 * 1024 default: v, err := mem.VirtualMemory() if err != nil { return false, err } totalMemory = int64(v.Total) } //Assuming memory is in megabytes //If we have more than 90% of neededMemory, lets do it return totalMemory >= (neededMemory*1024*1024)*90/100, nil } func checkVolumeRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { // volume are supported only for Model Docker if w.model.Type != sdk.Docker { return false, nil } for _, v := range strings.Split(r.Value, ",") { if strings.HasPrefix(v, "destination=") { theMountedDir := strings.Split(v, "=")[1] if stat, err := os.Stat(theMountedDir); err != nil || !stat.IsDir() { return true, nil } } } return false, nil } func checkOSArchRequirement(w *currentWorker, r sdk.Requirement) (bool, error) { osarch := strings.Split(r.Value, "/") if len(osarch) != 2 { return false, fmt.Errorf("invalid requirement %s", r.Value) } return osarch[0] == strings.ToLower(sdk.GOOS) && osarch[1] == strings.ToLower(sdk.GOARCH), nil } // checkPluginDeployment returns true if current job: // - is not linked to a deployment integration // - is linked to a deployement integration, plugin well downloaded (in this func) and // requirements on the plugins are OK too func checkPluginDeployment(w *currentWorker, job sdk.WorkflowNodeJobRun) (bool, error) { var currentOS = strings.ToLower(sdk.GOOS) var currentARCH = strings.ToLower(sdk.GOARCH) var binary *sdk.GRPCPluginBinary if len(job.IntegrationPluginBinaries) == 0 { // current job is not linked to a deployment integration (in pipeline context) return true, nil } log.Debug("Checking plugins...(%#v)", job.IntegrationPluginBinaries) // first check OS and Architecture for _, b := range job.IntegrationPluginBinaries { if b.OS == currentOS && b.Arch == currentARCH { binary = &b break } } if binary == nil { return false, fmt.Errorf("%s %s not supported by this plugin", currentOS, currentARCH) } // then check plugin requirements for _, r := range binary.Requirements { ok, err := checkRequirement(w, r) if err != nil { log.Warning("checkQueue> error on checkRequirement %s", err) } if !ok { return false, fmt.Errorf("plugin requirement %s does not match", r.Name) } } // then try to download the plugin integrationPluginBinary := path.Join(w.basedir, binary.Name) if _, err := os.Stat(integrationPluginBinary); os.IsNotExist(err) { log.Debug("Downloading the plugin %s", binary.PluginName) //If the file doesn't exist. Download it. fi, err := os.OpenFile(integrationPluginBinary, os.O_CREATE|os.O_RDWR, os.FileMode(binary.Perm)) if err != nil { return false, err } if err := w.client.PluginGetBinary(binary.PluginName, currentOS, currentARCH, fi); err != nil { _ = fi.Close() return false, err } //It's downloaded. Close the file _ = fi.Close() } else { log.Debug("plugin binary is in cache") } log.Info("plugin successfully downloaded: %#v", binary.Name) return true, nil }
[ "\"CDS_MODEL_MEMORY\"" ]
[]
[ "CDS_MODEL_MEMORY" ]
[]
["CDS_MODEL_MEMORY"]
go
1
0
app_config.py
import datetime import os # --------------------------------------------------- # Host configuration # --------------------------------------------------- APPLICATION_ROOT='/ccbd24f370707c33603102adc7b77123' # The server name is used by Flask to limit access to the # served content to request to a particular domain. It # is also used by some authentication providers (in particular # OAuth providers) to advertise callback providers. If # not provided, it is assumed in these contexts to be # 'localhost:7000'. Be sure to specify this before deploying # into production. SERVER_NAME=None # The knowledge repository uses the secret key to sign user # sessions. If not specified, a unique secret key will be # generated every time the server starts up. If hosting # in a multi-server environment, or you want sessions # to persist accross server restarts, set this to something # static. #SECRET_KEY = None # Set DEPLOY_HTTPS to True if you want to enable encrypted # communication with Flask. When enabled, you must provide # your ssl certificate, which consists of a .crt and .key # file. # Note: Even if you set DEPLOY_HTTPS to True, you still need # to set the port to 443 manually. DEPLOY_HTTPS = False SSL_CERT = { 'cert': '/path/to/cert', 'key': '/path/to/key' } # --------------------------------------------------- # Debug configuration # --------------------------------------------------- DEBUG = False # --------------------------------------------------- # Database configuration # --------------------------------------------------- SQLALCHEMY_DATABASE_URI = os.environ['KR_APP_DB_URI'] SQLALCHEMY_ECHO = False SQLALCHEMY_TRACK_MODIFICATIONS = False # Should the database tables be automatically created DB_AUTO_CREATE = True # Should the database be automatically migrated when updates exist # Note: This is True by default if this configuration is not applied, # under the assumption that if you are concerned about this file # you are probably interested in minimising risk to stability and handling # database upgrades manually. Manual database migrations can be # performed using `knowledge_repo --repo <> db_upgrade ...`. DB_AUTO_UPGRADE = False KR_REPO_DB_PATH = "mysql+mysqlconnector://%s:%s@%s:%s/%s"%(os.environ['KR_REPO_DB_USER'],os.environ['KR_REPO_DB_PWD'],os.environ['KR_REPO_DB_URI'],os.environ['KR_REPO_DB_PORT'],os.environ['KR_REPO_DB_NAME']) # --------------------------------------------------- # Authentication configuration # --------------------------------------------------- # Authentication providers allow users to sign into the Knowledge Repo # in a variety of different ways. You can create your own subclass of # `KnowledgeAuthProvider` and add either the instance or identifier # used for that class below. # By default, the knowledge repo offers: # ['debug', 'oauth2', 'bitbucket', 'github', 'google'] AUTH_PROVIDERS = [] # If you are going to use a OAuth provider, you will need to specify client ids # and private tokens. This can be done by instantiating instances of # `OAuth2Provider` and adding them to the above list, or by specifying OAuth # connection properties as demonstrated below for the GitHub authenticator. OAUTH_GITHUB_CLIENT_ID = 'cca9f72b123039d23992' OAUTH_GITHUB_CLIENT_SECRET = '01750927087e549d1eebe1a3894f43ccd849b9ca' # To configure a generic OAuth provider that is not one of the presets # provided, you may use the provider 'oauth2' which creates an empty, # unconfigured OAuth2Provider. You must then override its configuration. # For example, for a self-managed Gitlab CE instance at gitlab.example.com: # OAUTH_OAUTH2_BASE_URL = 'https://gitlab.example.com/api/v4/' # OAUTH_OAUTH2_AUTHORIZATION_URL = 'https://gitlab.example.com/oauth/authorize' # OAUTH_OAUTH2_TOKEN_URL = 'https://gitlab.example.com/oauth/token' # OAUTH_OAUTH2_AUTO_REFRESH_URL = 'https://gitlab.example.com/oauth/token' # OAUTH_OAUTH2_SCOPES = 'api' # OAUTH_OAUTH2_USER_INFO_ENDPOINT = 'user' # OAUTH_OAUTH2_USER_INFO_MAPPING = { # 'identifier': 'username', # 'name': 'name', # 'avatar_uri': 'avatar_url' # } # OAUTH_OAUTH2_VERIFY_SSL_CERTS = '/path/to/certs/my.ca-bundle' # OAUTH_OAUTH2_CLIENT_ID = '<client id>' # OAUTH_OAUTH2_CLIENT_SECRET = '<client secret>' # The configuration OAUTH_<name>_VERIFY_SSL_CERTS is what is passed to the # 'verify' parameter in the Requests module, and can be used to disable # HTTPS verification (not recommended) or provide a custom CA bundle. See: # http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification # You may also override the .validate() method of a KnowledgeAuthProvider # to perform an additional validation step before authenticating a user. # The following example checks whether a user has access to the git remote # of the local Knowledge Repository: # def OAUTH_OAUTH2_VALIDATE(provider, user): # # if provider.app.repository.git_has_remote: # # url_parts = ( # provider.app.repository.git_remote.url.split(':') # ) # # url_subparts = url_parts[1].split('/') # # if url_parts[0] == "[email protected]": # git_project = ( # url_subparts[0] + "%2F" + url_subparts[1].split('.')[0]) # elif ( # url_parts[0] == "https" # and url_subparts[2] == "gitlab.example.com" # ): # git_project = ( # url_subparts[3] + "%2F" + url_subparts[4].split('.')[0]) # else: # provider.app.logger.warning( # "User validation failed: unexpected git remote url [" # + provider.app.repository.git_remote.url + "]") # return False # # user_validate_url = provider.base_url + "projects/" + git_project # # resp = provider.oauth_client.get( # user_validate_url, # verify=OAUTH_OAUTH2_VERIFY_HTTPS) # # if resp.status_code == 200: # return True # else: # provider.app.logger.warning( # "User validation failed: validation URL [" # + user_validate_url + "] returned HTTP status [" # + str(resp.status_code) + "]") # You can also forgo a fully-fledged sign in process for users by hosting the # knowledge repository behind a proxy server that pre-authenticates users, and # adds the appropriate user identifier to the http headers of the request. If # enabled below, then they take precedence over any other forms of # authentication. If the call to `AUTH_MAP_REQUEST_HEADERS` results in a null # user identifier, then the authentication flow will fall back to use any of the # providers specified above. AUTH_USE_REQUEST_HEADERS = True # If using headers to authenticate, the following function should be implemented # to transform a dictionary of headers into a dictionary of user attributes. # Currently only 'identifier', 'avatar_uri', 'name' and 'email' are supported. # If this method returns `None`, or `identifier` is not supplied, then the # authorization flow will fall back to other authentication methods. def AUTH_MAP_REQUEST_HEADERS(cookies): import base64,json public_token = [cookies[key] for key in cookies.keys() if (key.startswith("CognitoIdentityServiceProvider") and key.endswith("idToken"))] if len(public_token)==0: return{ 'identifier' : 'Anonymous' , 'email' : 'Anonymous' } else: public_token = public_token[0] id_token = public_token.split('.')[1] id_token += "="*((4-len(id_token)%4)%4) token_str = base64.b64decode(id_token).decode('ascii') token = json.loads(token_str) return { 'identifier': token['name'], 'name': token['name'], 'email': token['email'] } # The following AUTH_USER_IDENTIFIER* configuration keys are deprecated and # will be removed in v0.9. #AUTH_USER_IDENTIFIER_REQUEST_HEADER = 'polly-auth' def AUTH_USER_IDENTIFIER_REQUEST_HEADER_MAPPING(identifier): # print('identifier:',identifier) return identifier # If the server desires to modify the attributes of the `User` object associated with # users logged in via any of the above authentication providers, it can do so via # this configuration key. This function will be run once at user login (if using # an `AuthenticationProvider`, and then at most once during any caching lifetime # period (as specified below). Note that attributes collected via # `AuthenticationProvider`s will not be updated after initial login (user must # relogin in order to reset those attributes). def AUTH_USER_ATTRIBUTE_SETTER(user): return user # The time to wait before re-checking user attributes with the above function # for users logged in via request headers. AUTH_USER_ATTRIBUTE_CACHE_LIFETIME = 24 * 60 * 60 # 1 day # Once a user is logged in via an authentication provider, they will remain # logged in via the use of cookies. By default, this cookie will last one year. # This is managed by `flask_login`, but is copied here for convenience. # For other options regarding sessions, please refer to: # https://flask-login.readthedocs.io/en/latest/#cookie-settings REMEMBER_COOKIE_DURATION = datetime.timedelta(days=365) # --------------------------------------------------- # Policy configuration # --------------------------------------------------- # This section configures various policy related to access control. # Should anonymous users be able to view the post indices POLICY_ANONYMOUS_VIEW_INDEX = True # Should anonymous users be able to view the content of posts POLICY_ANONYMOUS_VIEW_POST = True # Should anonymous users be able to view overall statistics POLICY_ANONYMOUS_VIEW_STATS = True # Should anonymous users be able to download posts (or their source) POLICY_ANONYMOUS_DOWNLOADS = False # --------------------------------------------------- # Repository configuration # --------------------------------------------------- # You may specify a function `prepare_repo` which configures # the repository upon which this server is running. This # takes place after the repository has been instantiated # and before the server is able to serve requests. It is # possible to do anything to the repository, including # substituting the repository for another one. # By default, repositories manage their own configurations, # but this can be risky as they may run arbitrary python code, # which opens a vector for malicious users to compromise # the server. If you want to avoid this risk, pass # the '--safe' (TODO!) option to `knowledge_repo` config and # manually configure the repository here. # For example, if your server instance is sitting atop # a meta-repository, it may make sense to update the meta-repository # configuration with that of one of its children. def prepare_repo(repo): return repo # --------------------------------------------------- # Repository Indexing configuration # --------------------------------------------------- # The Knowedge Repo updates the index of available posts on a regular basis. # If the database is not thread-safe (i.e. in the case of SQLite), then the # index will be updated on the main thread before every request that is more # than `INDEX_INTERVAL` seconds after the last sync completed. Otherwise, # indexing will occur every `INDEX_INTERVAL` seconds after the previous sync. # Syncing is designed to be compatible with multiple instances of the Knowledge # Repo connected to the same database, accross multiple machines and/or # processes; and so a global indexing lock is employed. When a sync begins, # a sync lock is put in place and the responsible process is considered to be # the primary agent responsible for syncing until its last update is longer than # `INDEXING_TIMEOUT` seconds, whereby the lock is ceded to the next requesting # process. Note that `INDEXING_TIMEOUT` must be larger than `INDEXING_INTERVAL` # or strange things might begin to happen. INDEXING_INTERVAL = 60 * 1 # 5 minutes INDEXING_TIMEOUT = 60 * 2 # 10 minutes # Whether an index operation should update repositories INDEXING_UPDATES_REPOSITORIES = True # Whether repositories should be updated even without a sync lock (in which case # the repositories will be updated on the sync timers, even if the relevant # process/thread does not have a lock on updating the index). This is useful in # context of multiple Knowledge Repo servers working together to serve the # repositories across multiple machines, which each require repository syncing. # Disable this if (for some reason) you have multiple Knowledge Repo servers # running on the same machine, and you want to avoid potential clashes. This # key is ignored if `INDEXING_UPDATES_REPOSITORIES` is False INDEXING_UPDATES_REPOSITORIES_WITHOUT_LOCK = True # In some cases you may want to disable indexing entirely, which is currently # only ever used by the Knowledge Post previewer. Disabling the index means that # posts will not be discoverable, but if know the path in the repository you can # view the post with a direct link. INDEXING_ENABLED = False # --------------------------------------------------- # Flask Mail Configuration # Refer to https://pythonhosted.org/flask-mail/ # Unless specified, upstream defaults are used as indicated # provided that MAIL_SERVER is defined. # --------------------------------------------------- # MAIL_SERVER = 'localhost' # default = 'localhost' # MAIL_PORT = 25 # default = 25 # MAIL_USE_TLS = False # default = False # MAIL_USE_SSL = False # default = False # MAIL_DEBUG = False # default = app.debug # MAIL_USERNAME = None # default = None # MAIL_PASSWORD = None # default = None # MAIL_DEFAULT_SENDER = None # default = None # MAIL_MAX_EMAILS = None # default = None # MAIL_SUPPRESS_SEND = False # default = app.testing # MAIL_ASCII_ATTACHMENTS = False # default = False # -------------------------------------------------- # Web Editor Configuration # -------------------------------------------------- # The web editor can be limited to editing posts under # a limited set of parent directories by setting # WEB_EDITOR_PREFIXES to a list of supported path prefixes. # e.g. ['webposts', 'projects'] WEB_EDITOR_PREFIXES = ['webposts'] # --------------------------------------------------- # Tag configuration # --------------------------------------------------- # Posts with certain tags can be excluded from showing up # in the app. This can be useful for security purposes EXCLUDED_TAGS = ['trial']
[]
[]
[ "KR_REPO_DB_URI", "KR_APP_DB_URI", "KR_REPO_DB_PWD", "KR_REPO_DB_PORT", "KR_REPO_DB_USER", "KR_REPO_DB_NAME" ]
[]
["KR_REPO_DB_URI", "KR_APP_DB_URI", "KR_REPO_DB_PWD", "KR_REPO_DB_PORT", "KR_REPO_DB_USER", "KR_REPO_DB_NAME"]
python
6
0
cmd/hypershift/main.go
package main import ( goflag "flag" "fmt" "math/rand" "os" "runtime" "time" "github.com/openshift/origin/pkg/cmd/openshift-osinserver" "github.com/spf13/cobra" "github.com/spf13/pflag" genericapiserver "k8s.io/apiserver/pkg/server" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "github.com/openshift/library-go/pkg/serviceability" "github.com/openshift/origin/pkg/cmd/openshift-apiserver" "github.com/openshift/origin/pkg/cmd/openshift-controller-manager" "github.com/openshift/origin/pkg/cmd/openshift-etcd" "github.com/openshift/origin/pkg/cmd/openshift-kube-apiserver" "github.com/openshift/origin/pkg/cmd/openshift-network-controller" "github.com/openshift/origin/pkg/version" ) func main() { stopCh := genericapiserver.SetupSignalHandler() rand.Seed(time.Now().UTC().UnixNano()) pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) logs.InitLogs() defer logs.FlushLogs() defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), version.Get())() defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } command := NewHyperShiftCommand(stopCh) if err := command.Execute(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } func NewHyperShiftCommand(stopCh <-chan struct{}) *cobra.Command { cmd := &cobra.Command{ Use: "hypershift", Short: "Combined server command for OpenShift", Run: func(cmd *cobra.Command, args []string) { cmd.Help() os.Exit(1) }, } startEtcd, _ := openshift_etcd.NewCommandStartEtcdServer(openshift_etcd.RecommendedStartEtcdServerName, "hypershift", os.Stdout, os.Stderr) startEtcd.Deprecated = "will be removed in 3.10" startEtcd.Hidden = true cmd.AddCommand(startEtcd) startOpenShiftAPIServer := openshift_apiserver.NewOpenShiftAPIServerCommand(openshift_apiserver.RecommendedStartAPIServerName, "hypershift", os.Stdout, os.Stderr, stopCh) cmd.AddCommand(startOpenShiftAPIServer) startOpenShiftKubeAPIServer := openshift_kube_apiserver.NewOpenShiftKubeAPIServerServerCommand(openshift_kube_apiserver.RecommendedStartAPIServerName, "hypershift", os.Stdout, os.Stderr, stopCh) cmd.AddCommand(startOpenShiftKubeAPIServer) startOpenShiftControllerManager := openshift_controller_manager.NewOpenShiftControllerManagerCommand(openshift_controller_manager.RecommendedStartControllerManagerName, "hypershift", os.Stdout, os.Stderr) cmd.AddCommand(startOpenShiftControllerManager) startOpenShiftNetworkController := openshift_network_controller.NewOpenShiftNetworkControllerCommand(openshift_network_controller.RecommendedStartNetworkControllerName, "hypershift", os.Stdout, os.Stderr) cmd.AddCommand(startOpenShiftNetworkController) startOsin := openshift_osinserver.NewOpenShiftOsinServer(os.Stdout, os.Stderr, stopCh) startOsin.Deprecated = "will be removed in 4.0" startOsin.Hidden = true cmd.AddCommand(startOsin) return cmd }
[ "\"OPENSHIFT_ON_PANIC\"", "\"OPENSHIFT_PROFILE\"", "\"GOMAXPROCS\"" ]
[]
[ "OPENSHIFT_PROFILE", "OPENSHIFT_ON_PANIC", "GOMAXPROCS" ]
[]
["OPENSHIFT_PROFILE", "OPENSHIFT_ON_PANIC", "GOMAXPROCS"]
go
3
0
KayakMask/KayakMask/wsgi.py
""" WSGI config for KayakMask project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'KayakMask.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
cmd/pod-mutator/opts.go
package main import ( "github.com/liqotech/liqo/pkg/mutate" "os" ) const ( defaultCertFile = "/etc/ssl/liqo/cert.pem" defaultKeyFile = "/etc/ssl/liqo/key.pem" ) func setOptions(c *mutate.MutationConfig) { if c.KeyFile = os.Getenv("liqokey"); c.KeyFile == "" { c.KeyFile = defaultKeyFile } if c.CertFile = os.Getenv("liqocert"); c.CertFile == "" { c.CertFile = defaultCertFile } }
[ "\"liqokey\"", "\"liqocert\"" ]
[]
[ "liqocert", "liqokey" ]
[]
["liqocert", "liqokey"]
go
2
0
pkg/cluster/clusterLeaderElection.go
package cluster import ( "context" "fmt" "os" "os/signal" "path/filepath" "syscall" "time" "github.com/davecgh/go-spew/spew" "github.com/kube-vip/kube-vip/pkg/bgp" "github.com/kube-vip/kube-vip/pkg/k8s" "github.com/kube-vip/kube-vip/pkg/kubevip" "github.com/kube-vip/kube-vip/pkg/loadbalancer" "github.com/kube-vip/kube-vip/pkg/packet" "github.com/packethost/packngo" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" watchtools "k8s.io/client-go/tools/watch" ) const plunderLock = "plndr-cp-lock" // Manager degines the manager of the load-balancing services type Manager struct { KubernetesClient *kubernetes.Clientset // This channel is used to signal a shutdown SignalChan chan os.Signal } // NewManager will create a new managing object func NewManager(path string, inCluster bool, port int) (*Manager, error) { var hostname string // If the path passed is empty and not running in the cluster, // attempt to look for a kubeconfig in the default HOME dir. if len(path) == 0 && !inCluster { path = filepath.Join(os.Getenv("HOME"), ".kube", "config") // We modify the config so that we can always speak to the correct host id, err := os.Hostname() if err != nil { return nil, err } hostname = fmt.Sprintf("%s:%v", id, port) } clientset, err := k8s.NewClientset(path, inCluster, hostname) if err != nil { return nil, fmt.Errorf("error creating a new k8s clientset: %v", err) } return &Manager{ KubernetesClient: clientset, }, nil } // StartCluster - Begins a running instance of the Leader Election cluster func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer *bgp.Server) error { id, err := os.Hostname() if err != nil { return err } log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", c.Namespace, plunderLock, id) // we use the Lease lock type since edits to Leases are less common // and fewer objects in the cluster watch "all Leases". lock := &resourcelock.LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Name: plunderLock, Namespace: c.Namespace, }, Client: sm.KubernetesClient.CoordinationV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: id, }, } // use a Go context so we can tell the leaderelection code when we // want to step down ctx, cancel := context.WithCancel(context.Background()) defer cancel() // use a Go context so we can tell the arp loop code when we // want to step down ctxArp, cancelArp := context.WithCancel(context.Background()) defer cancelArp() // use a Go context so we can tell the dns loop code when we // want to step down ctxDNS, cancelDNS := context.WithCancel(context.Background()) defer cancelDNS() // listen for interrupts or the Linux SIGTERM signal and cancel // our context, which the leader election code will observe and // step down signalChan := make(chan os.Signal, 1) // Add Notification for Userland interrupt signal.Notify(signalChan, syscall.SIGINT) // Add Notification for SIGTERM (sent from Kubernetes) signal.Notify(signalChan, syscall.SIGTERM) go func() { <-signalChan log.Info("Received termination, signaling shutdown") // Cancel the context, which will in turn cancel the leadership cancel() // Cancel the arp context, which will in turn stop any broadcasts }() // (attempt to) Remove the virtual IP, incase it already exists err = cluster.Network.DeleteIP() if err != nil { log.Errorf("could not delete virtualIP: %v", err) } // Defer a function to check if the bgpServer has been created and if so attempt to close it defer func() { if bgpServer != nil { bgpServer.Close() } }() // If Packet is enabled then we can begin our preparation work var packetClient *packngo.Client if c.EnableMetal { if c.ProviderConfig != "" { key, project, err := packet.GetPacketConfig(c.ProviderConfig) if err != nil { log.Error(err) } else { // Set the environment variable with the key for the project os.Setenv("PACKET_AUTH_TOKEN", key) // Update the configuration with the project key c.MetalProjectID = project } } packetClient, err = packngo.NewClient() if err != nil { log.Error(err) } // We're using Packet with BGP, popuplate the Peer information from the API if c.EnableBGP { log.Infoln("Looking up the BGP configuration from packet") err = packet.BGPLookup(packetClient, c) if err != nil { log.Error(err) } } } if c.EnableBGP { // Lets start BGP log.Info("Starting the BGP server to advertise VIP routes to VGP peers") bgpServer, err = bgp.NewBGPServer(&c.BGPConfig) if err != nil { log.Error(err) } } // start the leader election code loop leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, // IMPORTANT: you MUST ensure that any code you have that // is protected by the lease must terminate **before** // you call cancel. Otherwise, you could have a background // loop still running and another process could // get elected before your background loop finished, violating // the stated goal of the lease. ReleaseOnCancel: true, LeaseDuration: time.Duration(c.LeaseDuration) * time.Second, RenewDeadline: time.Duration(c.RenewDeadline) * time.Second, RetryPeriod: time.Duration(c.RetryPeriod) * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { // As we're leading lets start the vip service err = cluster.vipService(ctxArp, ctxDNS, c, sm, bgpServer, packetClient) if err != nil { log.Errorf("Error starting the VIP service on the leader [%s]", err) } }, OnStoppedLeading: func() { // we can do cleanup here log.Info("This node is becoming a follower within the cluster") // Stop the dns context cancelDNS() // Stop the Arp context if it is running cancelArp() // Stop the BGP server if bgpServer != nil { err = bgpServer.Close() if err != nil { log.Warnf("%v", err) } } err = cluster.Network.DeleteIP() if err != nil { log.Warnf("%v", err) } log.Fatal("lost leadership, restarting kube-vip") }, OnNewLeader: func(identity string) { // we're notified when new leader elected log.Infof("Node [%s] is assuming leadership of the cluster", identity) }, }, }) return nil } func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer, port int) error { // Use a restartable watcher, as this should help in the event of etcd or timeout issues log.Infof("Kube-Vip is watching nodes for control-plane labels") labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"node-role.kubernetes.io/control-plane": ""}} listOptions := metav1.ListOptions{ LabelSelector: labels.Set(labelSelector.MatchLabels).String(), } rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return sm.KubernetesClient.CoreV1().Nodes().Watch(context.Background(), listOptions) }, }) if err != nil { return fmt.Errorf("error creating label watcher: %s", err.Error()) } go func() { <-sm.SignalChan log.Info("Received termination, signaling shutdown") // Cancel the context rw.Stop() }() ch := rw.ResultChan() //defer rw.Stop() for event := range ch { // We need to inspect the event and get ResourceVersion out of it switch event.Type { case watch.Added, watch.Modified: node, ok := event.Object.(*v1.Node) if !ok { return fmt.Errorf("Unable to parse Kubernetes Node from Annotation watcher") } //Find the node IP address (this isn't foolproof) for x := range node.Status.Addresses { if node.Status.Addresses[x].Type == v1.NodeInternalIP { err = lb.AddBackend(node.Status.Addresses[x].Address, port) if err != nil { log.Errorf("Add IPVS backend [%v]", err) } } } case watch.Deleted: node, ok := event.Object.(*v1.Node) if !ok { return fmt.Errorf("Unable to parse Kubernetes Node from Annotation watcher") } //Find the node IP address (this isn't foolproof) for x := range node.Status.Addresses { if node.Status.Addresses[x].Type == v1.NodeInternalIP { err = lb.RemoveBackend(node.Status.Addresses[x].Address, port) if err != nil { log.Errorf("Del IPVS backend [%v]", err) } } } log.Infof("Node [%s] has been deleted", node.Name) case watch.Bookmark: // Un-used case watch.Error: log.Error("Error attempting to watch Kubernetes Nodes") // This round trip allows us to handle unstructured status errObject := apierrors.FromObject(event.Object) statusErr, ok := errObject.(*apierrors.StatusError) if !ok { log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) } status := statusErr.ErrStatus log.Errorf("%v", status) default: } } log.Infoln("Exiting Annotations watcher") return nil }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
main.py
import os import logging import serial from picloud_client import HttpClient SERIAL_PORT = '/dev/ttyAMA0' SERIAL_RATE = 9600 PICLOUD_EVENT = 'home:thpl' PICLOUD_HTTP_URL = os.getenv('PICLOUD_HTTP_URL') assert PICLOUD_HTTP_URL PICLOUD_API_KEY = os.getenv('PICLOUD_API_KEY') assert PICLOUD_API_KEY logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', filename='/tmp/thpl-data-reporter.log', filemode='w') def main(): picloud = HttpClient( url=PICLOUD_HTTP_URL, api_key=PICLOUD_API_KEY, client_name='THPL-Data-Reporter') ser = serial.Serial(SERIAL_PORT, SERIAL_RATE) while True: try: sensor_json = ser.readline() picloud.publish(event=PICLOUD_EVENT, data=sensor_json) except Exception as e: logging.exception(e) raise e if __name__ == "__main__": main()
[]
[]
[ "PICLOUD_API_KEY", "PICLOUD_HTTP_URL" ]
[]
["PICLOUD_API_KEY", "PICLOUD_HTTP_URL"]
python
2
0
tap_list_providers/parsers/thenook.py
"""HTML scraper for The Nook""" from decimal import Decimal import logging import os from bs4 import BeautifulSoup import requests import configurations from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady # boilerplate code necessary for launching outside manage.py try: from ..base import BaseTapListProvider except (ImproperlyConfigured, AppRegistryNotReady): os.environ['DJANGO_SETTINGS_MODULE'] = 'hsv_dot_beer.config' os.environ.setdefault("DJANGO_CONFIGURATION", "Local") configurations.setup() from ..base import BaseTapListProvider from beers.models import Manufacturer from taps.models import Tap LOG = logging.getLogger(__name__) class NookParser(BaseTapListProvider): """Parser for The Nook's static HTML page Note: because The Nook uses rows inside of columns instead of the more common reverse, we essentially have to look up the components we need separately and then zip() them together. """ provider_name = 'nook_html' NAME_COLUMN_ID = 'id9' STYLE_COLUMN_ID = 'id10' BREWERY_COLUMN_ID = 'id12' ABV_COLUMN_ID = 'id17' def __init__(self): self.parser = None super().__init__() def fetch_html(self, url): self.parser = BeautifulSoup(requests.get(url).content, 'html.parser') def dump_html(self): print(self.parser.prettify()) def get_names(self): return list( i.contents[0].replace('*', '').strip() for i in self.parser.find(id=self.NAME_COLUMN_ID).find_all('p') ) def get_abvs(self): return list( Decimal(i.contents[0].strip()) # weirdly, the ABVs are in spans for i in self.parser.find(id=self.ABV_COLUMN_ID).find_all('span') ) def get_manufacturers(self): return list( i.contents[0] for i in self.parser.find(id=self.BREWERY_COLUMN_ID).find_all('p') ) def get_styles(self): return list( i.contents[0].strip() for i in self.parser.find(id=self.STYLE_COLUMN_ID).find_all('p') ) def venue_details(self): return enumerate(zip( self.get_names(), self.get_manufacturers(), self.get_abvs(), self.get_styles(), )) def handle_venue(self, venue): url = venue.api_configuration.url self.fetch_html(url) taps = {tap.tap_number: tap for tap in venue.taps.all()} manufacturers = {mfg.name: mfg for mfg in Manufacturer.objects.filter( name__in=self.get_manufacturers() )} for index, (name, mfg, abv, style) in self.venue_details(): tap_number = index + 1 # 1. get the tap try: tap = taps[tap_number] except KeyError: tap = Tap(venue=venue, tap_number=tap_number) # 2. get the mfg try: manufacturer = manufacturers[mfg] except KeyError: manufacturer = self.get_manufacturer(name=mfg) manufacturers[manufacturer.name] = manufacturer # 3. get the beer beer = self.get_beer( name, manufacturer, abv=abv, style=style, ) if tap.beer_id != beer.id: tap.beer = beer # only save if beer changed so as not to disturb updated time LOG.debug('Saving %s on tap %s', beer, tap.tap_number) tap.save() else: LOG.debug( 'Not saving changes to beer %s on tap %s', beer, tap.tap_number, )
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
vendor/honnef.co/go/tools/internal/cache/cache.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cache implements a build artifact cache. // // This package is a slightly modified fork of Go's // cmd/go/internal/cache package. package cache import ( "bytes" "github.com/studyzy/crypto/sha256" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "strconv" "strings" "time" "honnef.co/go/tools/internal/renameio" ) // An ActionID is a cache action key, the hash of a complete description of a // repeatable computation (command line, environment variables, // input file contents, executable contents). type ActionID [HashSize]byte // An OutputID is a cache output key, the hash of an output of a computation. type OutputID [HashSize]byte // A Cache is a package cache, backed by a file system directory tree. type Cache struct { dir string now func() time.Time } // Open opens and returns the cache in the given directory. // // It is safe for multiple processes on a single machine to use the // same cache directory in a local file system simultaneously. // They will coordinate using operating system file locks and may // duplicate effort but will not corrupt the cache. // // However, it is NOT safe for multiple processes on different machines // to share a cache directory (for example, if the directory were stored // in a network file system). File locking is notoriously unreliable in // network file systems and may not suffice to protect the cache. // func Open(dir string) (*Cache, error) { info, err := os.Stat(dir) if err != nil { return nil, err } if !info.IsDir() { return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} } for i := 0; i < 256; i++ { name := filepath.Join(dir, fmt.Sprintf("%02x", i)) if err := os.MkdirAll(name, 0777); err != nil { return nil, err } } c := &Cache{ dir: dir, now: time.Now, } return c, nil } // fileName returns the name of the file corresponding to the given id. func (c *Cache) fileName(id [HashSize]byte, key string) string { return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) } var errMissing = errors.New("cache entry not found") const ( // action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n" hexSize = HashSize * 2 entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 ) // verify controls whether to run the cache in verify mode. // In verify mode, the cache always returns errMissing from Get // but then double-checks in Put that the data being written // exactly matches any existing entry. This provides an easy // way to detect program behavior that would have been different // had the cache entry been returned from Get. // // verify is enabled by setting the environment variable // GODEBUG=gocacheverify=1. var verify = false // DebugTest is set when GODEBUG=gocachetest=1 is in the environment. var DebugTest = false func init() { initEnv() } func initEnv() { verify = false debugHash = false debug := strings.Split(os.Getenv("GODEBUG"), ",") for _, f := range debug { if f == "gocacheverify=1" { verify = true } if f == "gocachehash=1" { debugHash = true } if f == "gocachetest=1" { DebugTest = true } } } // Get looks up the action ID in the cache, // returning the corresponding output ID and file size, if any. // Note that finding an output ID does not guarantee that the // saved file for that output ID is still available. func (c *Cache) Get(id ActionID) (Entry, error) { if verify { return Entry{}, errMissing } return c.get(id) } type Entry struct { OutputID OutputID Size int64 Time time.Time } // get is Get but does not respect verify mode, so that Put can use it. func (c *Cache) get(id ActionID) (Entry, error) { missing := func() (Entry, error) { return Entry{}, errMissing } f, err := os.Open(c.fileName(id, "a")) if err != nil { return missing() } defer f.Close() entry := make([]byte, entrySize+1) // +1 to detect whether f is too long if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { return missing() } if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { return missing() } eid, entry := entry[3:3+hexSize], entry[3+hexSize:] eout, entry := entry[1:1+hexSize], entry[1+hexSize:] esize, entry := entry[1:1+20], entry[1+20:] //lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465 etime, entry := entry[1:1+20], entry[1+20:] var buf [HashSize]byte if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { return missing() } if _, err := hex.Decode(buf[:], eout); err != nil { return missing() } i := 0 for i < len(esize) && esize[i] == ' ' { i++ } size, err := strconv.ParseInt(string(esize[i:]), 10, 64) if err != nil || size < 0 { return missing() } i = 0 for i < len(etime) && etime[i] == ' ' { i++ } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) if err != nil || size < 0 { return missing() } c.used(c.fileName(id, "a")) return Entry{buf, size, time.Unix(0, tm)}, nil } // GetFile looks up the action ID in the cache and returns // the name of the corresponding data file. func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { entry, err = c.Get(id) if err != nil { return "", Entry{}, err } file = c.OutputFile(entry.OutputID) info, err := os.Stat(file) if err != nil || info.Size() != entry.Size { return "", Entry{}, errMissing } return file, entry, nil } // GetBytes looks up the action ID in the cache and returns // the corresponding output bytes. // GetBytes should only be used for data that can be expected to fit in memory. func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { entry, err := c.Get(id) if err != nil { return nil, entry, err } data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) if sha256.Sum256(data) != entry.OutputID { return nil, entry, errMissing } return data, entry, nil } // OutputFile returns the name of the cache file storing output with the given OutputID. func (c *Cache) OutputFile(out OutputID) string { file := c.fileName(out, "d") c.used(file) return file } // Time constants for cache expiration. // // We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), // to avoid causing many unnecessary inode updates. The mtimes therefore // roughly reflect "time of last use" but may in fact be older by at most an hour. // // We scan the cache for entries to delete at most once per trimInterval (1 day). // // When we do scan the cache, we delete entries that have not been used for // at least trimLimit (5 days). Statistics gathered from a month of usage by // Go developers found that essentially all reuse of cached entries happened // within 5 days of the previous reuse. See golang.org/issue/22990. const ( mtimeInterval = 1 * time.Hour trimInterval = 24 * time.Hour trimLimit = 5 * 24 * time.Hour ) // used makes a best-effort attempt to update mtime on file, // so that mtime reflects cache access time. // // Because the reflection only needs to be approximate, // and to reduce the amount of disk activity caused by using // cache entries, used only updates the mtime if the current // mtime is more than an hour old. This heuristic eliminates // nearly all of the mtime updates that would otherwise happen, // while still keeping the mtimes useful for cache trimming. func (c *Cache) used(file string) { info, err := os.Stat(file) if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { return } os.Chtimes(file, c.now(), c.now()) } // Trim removes old cache entries that are likely not to be reused. func (c *Cache) Trim() { now := c.now() // We maintain in dir/trim.txt the time of the last completed cache trim. // If the cache has been trimmed recently enough, do nothing. // This is the common case. data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt")) t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { return } // Trim each of the 256 subdirectories. // We subtract an additional mtimeInterval // to account for the imprecision of our "last used" mtimes. cutoff := now.Add(-trimLimit - mtimeInterval) for i := 0; i < 256; i++ { subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) c.trimSubdir(subdir, cutoff) } // Ignore errors from here: if we don't write the complete timestamp, the // cache will appear older than it is, and we'll trim it again next time. renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) } // trimSubdir trims a single cache subdirectory. func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { // Read all directory entries from subdir before removing // any files, in case removing files invalidates the file offset // in the directory scan. Also, ignore error from f.Readdirnames, // because we don't care about reporting the error and we still // want to process any entries found before the error. f, err := os.Open(subdir) if err != nil { return } names, _ := f.Readdirnames(-1) f.Close() for _, name := range names { // Remove only cache entries (xxxx-a and xxxx-d). if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { continue } entry := filepath.Join(subdir, name) info, err := os.Stat(entry) if err == nil && info.ModTime().Before(cutoff) { os.Remove(entry) } } } // putIndexEntry adds an entry to the cache recording that executing the action // with the given id produces an output with the given output id (hash) and size. func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { // Note: We expect that for one reason or another it may happen // that repeating an action produces a different output hash // (for example, if the output contains a time stamp or temp dir name). // While not ideal, this is also not a correctness problem, so we // don't make a big deal about it. In particular, we leave the action // cache entries writable specifically so that they can be overwritten. // // Setting GODEBUG=gocacheverify=1 does make a big deal: // in verify mode we are double-checking that the cache entries // are entirely reproducible. As just noted, this may be unrealistic // in some cases but the check is also useful for shaking out real bugs. entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())) if verify && allowVerify { old, err := c.get(id) if err == nil && (old.OutputID != out || old.Size != size) { // panic to show stack trace, so we can see what code is generating this cache entry. msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) panic(msg) } } file := c.fileName(id, "a") if err := ioutil.WriteFile(file, entry, 0666); err != nil { // TODO(bcmills): This Remove potentially races with another go command writing to file. // Can we eliminate it? os.Remove(file) return err } os.Chtimes(file, c.now(), c.now()) // mainly for tests return nil } // Put stores the given output in the cache as the output for the action ID. // It may read file twice. The content of file must not change between the two passes. func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { return c.put(id, file, true) } // PutNoVerify is like Put but disables the verify check // when GODEBUG=goverifycache=1 is set. // It is meant for data that is OK to cache but that we expect to vary slightly from run to run, // like test output containing times and the like. func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { return c.put(id, file, false) } func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { // Compute output ID. h := sha256.New() if _, err := file.Seek(0, 0); err != nil { return OutputID{}, 0, err } size, err := io.Copy(h, file) if err != nil { return OutputID{}, 0, err } var out OutputID h.Sum(out[:0]) // Copy to cached output file (if not already present). if err := c.copyFile(file, out, size); err != nil { return out, size, err } // Add to cache index. return out, size, c.putIndexEntry(id, out, size, allowVerify) } // PutBytes stores the given bytes in the cache as the output for the action ID. func (c *Cache) PutBytes(id ActionID, data []byte) error { _, _, err := c.Put(id, bytes.NewReader(data)) return err } // copyFile copies file into the cache, expecting it to have the given // output ID and size, if that file is not present already. func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { name := c.fileName(out, "d") info, err := os.Stat(name) if err == nil && info.Size() == size { // Check hash. if f, err := os.Open(name); err == nil { h := sha256.New() io.Copy(h, f) f.Close() var out2 OutputID h.Sum(out2[:0]) if out == out2 { return nil } } // Hash did not match. Fall through and rewrite file. } // Copy file to cache directory. mode := os.O_RDWR | os.O_CREATE if err == nil && info.Size() > size { // shouldn't happen but fix in case mode |= os.O_TRUNC } f, err := os.OpenFile(name, mode, 0666) if err != nil { return err } defer f.Close() if size == 0 { // File now exists with correct size. // Only one possible zero-length file, so contents are OK too. // Early return here makes sure there's a "last byte" for code below. return nil } // From here on, if any of the I/O writing the file fails, // we make a best-effort attempt to truncate the file f // before returning, to avoid leaving bad bytes in the file. // Copy file to f, but also into h to double-check hash. if _, err := file.Seek(0, 0); err != nil { f.Truncate(0) return err } h := sha256.New() w := io.MultiWriter(f, h) if _, err := io.CopyN(w, file, size-1); err != nil { f.Truncate(0) return err } // Check last byte before writing it; writing it will make the size match // what other processes expect to find and might cause them to start // using the file. buf := make([]byte, 1) if _, err := file.Read(buf); err != nil { f.Truncate(0) return err } h.Write(buf) sum := h.Sum(nil) if !bytes.Equal(sum, out[:]) { f.Truncate(0) return fmt.Errorf("file content changed underfoot") } // Commit cache file entry. if _, err := f.Write(buf); err != nil { f.Truncate(0) return err } if err := f.Close(); err != nil { // Data might not have been written, // but file may look like it is the right size. // To be extra careful, remove cached file. os.Remove(name) return err } os.Chtimes(name, c.now(), c.now()) // mainly for tests return nil }
[ "\"GODEBUG\"" ]
[]
[ "GODEBUG" ]
[]
["GODEBUG"]
go
1
0
pkg/cmd/pulumi/pulumi.go
// Copyright 2016-2018, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bufio" "bytes" "encoding/json" "fmt" user "github.com/tweekmonster/luser" "net/http" "net/url" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "time" "github.com/blang/semver" "github.com/djherbis/times" "github.com/docker/docker/pkg/term" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/pulumi/pulumi/pkg/v2/backend/display" "github.com/pulumi/pulumi/pkg/v2/backend/filestate" "github.com/pulumi/pulumi/pkg/v2/backend/httpstate" "github.com/pulumi/pulumi/pkg/v2/backend/httpstate/client" "github.com/pulumi/pulumi/pkg/v2/version" "github.com/pulumi/pulumi/sdk/v2/go/common/diag" "github.com/pulumi/pulumi/sdk/v2/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v2/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v2/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v2/go/common/util/httputil" "github.com/pulumi/pulumi/sdk/v2/go/common/util/logging" "github.com/pulumi/pulumi/sdk/v2/go/common/workspace" ) // NewPulumiCmd creates a new Pulumi Cmd instance. func NewPulumiCmd() *cobra.Command { var cwd string var logFlow bool var logToStderr bool var tracing string var tracingHeaderFlag string var profiling string var verbose int var color string updateCheckResult := make(chan *diag.Diag) cmd := &cobra.Command{ Use: "pulumi", Short: "Pulumi command line", Long: "Pulumi - Modern Infrastructure as Code\n" + "\n" + "To begin working with Pulumi, run the `pulumi new` command:\n" + "\n" + " $ pulumi new\n" + "\n" + "This will prompt you to create a new project for your cloud and language of choice.\n" + "\n" + "The most common commands from there are:\n" + "\n" + " - pulumi up : Deploy code and/or resource changes\n" + " - pulumi stack : Manage instances of your project\n" + " - pulumi config : Alter your stack's configuration or secrets\n" + " - pulumi destroy : Tear down your stack's resources entirely\n" + "\n" + "For more information, please visit the project page: https://www.pulumi.com/docs/", PersistentPreRun: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error { // We run this method for its side-effects. On windows, this will enable the windows terminal // to understand ANSI escape codes. _, _, _ = term.StdStreams() // If we fail before we start the async update check, go ahead and close the // channel since we know it will never receive a value. var waitForUpdateCheck bool defer func() { if !waitForUpdateCheck { close(updateCheckResult) } }() // For all commands, attempt to grab out the --color value provided so we // can set the GlobalColorization value to be used by any code that doesn't // get DisplayOptions passed in. cmdFlag := cmd.Flag("color") if cmdFlag != nil { err := cmdutil.SetGlobalColorization(cmdFlag.Value.String()) if err != nil { return err } } if cwd != "" { if err := os.Chdir(cwd); err != nil { return err } } logging.InitLogging(logToStderr, verbose, logFlow) cmdutil.InitTracing("pulumi-cli", "pulumi", tracing) if tracingHeaderFlag != "" { tracingHeader = tracingHeaderFlag } if profiling != "" { if err := cmdutil.InitProfiling(profiling); err != nil { logging.Warningf("could not initialize profiling: %v", err) } } if cmdutil.IsTruthy(os.Getenv("PULUMI_SKIP_UPDATE_CHECK")) { logging.V(5).Infof("skipping update check") } else { // Run the version check in parallel so that it doesn't block executing the command. // If there is a new version to report, we will do so after the command has finished. waitForUpdateCheck = true go func() { updateCheckResult <- checkForUpdate() close(updateCheckResult) }() } return nil }), PersistentPostRun: func(cmd *cobra.Command, args []string) { // Before exiting, if there is a new version of the CLI available, print it out. jsonFlag := cmd.Flag("json") isJSON := jsonFlag != nil && jsonFlag.Value.String() == "true" checkVersionMsg, ok := <-updateCheckResult if ok && checkVersionMsg != nil && !isJSON { cmdutil.Diag().Warningf(checkVersionMsg) } logging.Flush() cmdutil.CloseTracing() if profiling != "" { if err := cmdutil.CloseProfiling(profiling); err != nil { logging.Warningf("could not close profiling: %v", err) } } }, } cmd.PersistentFlags().StringVarP(&cwd, "cwd", "C", "", "Run pulumi as if it had been started in another directory") cmd.PersistentFlags().BoolVarP(&cmdutil.Emoji, "emoji", "e", runtime.GOOS == "darwin", "Enable emojis in the output") cmd.PersistentFlags().BoolVar(&filestate.DisableIntegrityChecking, "disable-integrity-checking", false, "Disable integrity checking of checkpoint files") cmd.PersistentFlags().BoolVar(&logFlow, "logflow", false, "Flow log settings to child processes (like plugins)") cmd.PersistentFlags().BoolVar(&logToStderr, "logtostderr", false, "Log to stderr instead of to files") cmd.PersistentFlags().BoolVar(&cmdutil.DisableInteractive, "non-interactive", false, "Disable interactive mode for all commands") cmd.PersistentFlags().StringVar(&tracing, "tracing", "", "Emit tracing to the specified endpoint. Use the `file:` scheme to write tracing data to a local file") cmd.PersistentFlags().StringVar(&profiling, "profiling", "", "Emit CPU and memory profiles and an execution trace to '[filename].[pid].{cpu,mem,trace}', respectively") cmd.PersistentFlags().IntVarP(&verbose, "verbose", "v", 0, "Enable verbose logging (e.g., v=3); anything >3 is very verbose") cmd.PersistentFlags().StringVar( &color, "color", "auto", "Colorize output. Choices are: always, never, raw, auto") // Common commands: // - Getting Started Commands: cmd.AddCommand(newNewCmd()) // - Deploy Commands: cmd.AddCommand(newUpCmd()) cmd.AddCommand(newPreviewCmd()) cmd.AddCommand(newDestroyCmd()) cmd.AddCommand(newWatchCmd()) // - Stack Management Commands: cmd.AddCommand(newStackCmd()) cmd.AddCommand(newConfigCmd()) // - Service Commands: cmd.AddCommand(newLoginCmd()) cmd.AddCommand(newLogoutCmd()) cmd.AddCommand(newWhoAmICmd()) // - Policy Management Commands: cmd.AddCommand(newPolicyCmd()) // - Advanced Commands: cmd.AddCommand(newCancelCmd()) cmd.AddCommand(newRefreshCmd()) cmd.AddCommand(newStateCmd()) // - Other Commands: cmd.AddCommand(newLogsCmd()) cmd.AddCommand(newPluginCmd()) cmd.AddCommand(newVersionCmd()) cmd.AddCommand(newHistoryCmd()) // Less common, and thus hidden, commands: cmd.AddCommand(newGenCompletionCmd(cmd)) cmd.AddCommand(newGenMarkdownCmd(cmd)) // We have a set of commands that are still experimental and that we add only when PULUMI_EXPERIMENTAL is set // to true. if hasExperimentalCommands() { // - Query Commands: cmd.AddCommand(newQueryCmd()) } // We have a set of options that are useful for developers of pulumi that we add when PULUMI_DEBUG_COMMANDS is // set to true. if hasDebugCommands() { cmd.PersistentFlags().StringVar(&tracingHeaderFlag, "tracing-header", "", "Include the tracing header with the given contents.") // - Diagnostic Commands: cmd.AddCommand(newViewTraceCmd()) // For legacy reasons, we make this command available also under PULUMI_DEBUG_COMMANDS, though // PULUMI_EXPERIMENTAL should be preferred. // - Query Commands: cmd.AddCommand(newQueryCmd()) } return cmd } // checkForUpdate checks to see if the CLI needs to be updated, and if so emits a warning, as well as information // as to how it can be upgraded. func checkForUpdate() *diag.Diag { curVer, err := semver.ParseTolerant(version.Version) if err != nil { logging.V(3).Infof("error parsing current version: %s", err) } // We don't care about warning for you to update if you have installed a developer version if isDevVersion(curVer) { return nil } latestVer, oldestAllowedVer, err := getCLIVersionInfo() if err != nil { logging.V(3).Infof("error fetching latest version information: %s", err) } if oldestAllowedVer.GT(curVer) { return diag.RawMessage("", getUpgradeMessage(latestVer, curVer)) } return nil } // getCLIVersionInfo returns information about the latest version of the CLI and the oldest version that should be // allowed without warning. It caches data from the server for a day. func getCLIVersionInfo() (semver.Version, semver.Version, error) { latest, oldest, err := getCachedVersionInfo() if err == nil { return latest, oldest, err } client := client.NewClient(httpstate.DefaultURL(), "", cmdutil.Diag()) latest, oldest, err = client.GetCLIVersionInfo(commandContext()) if err != nil { return semver.Version{}, semver.Version{}, err } brewLatest, isBrew, err := getLatestBrewFormulaVersion() if err != nil { logging.V(3).Infof("error determining if the running executable was installed with brew: %s", err) } if isBrew { // When consulting Homebrew for version info, we just use the latest version as the oldest allowed. latest, oldest = brewLatest, brewLatest } err = cacheVersionInfo(latest, oldest) if err != nil { logging.V(3).Infof("failed to cache version info: %s", err) } return latest, oldest, err } // cacheVersionInfo saves version information in a cache file to be looked up later. func cacheVersionInfo(latest semver.Version, oldest semver.Version) error { updateCheckFile, err := workspace.GetCachedVersionFilePath() if err != nil { return err } file, err := os.OpenFile(updateCheckFile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0600) if err != nil { return err } defer contract.IgnoreClose(file) return json.NewEncoder(file).Encode(cachedVersionInfo{ LatestVersion: latest.String(), OldestWithoutWarning: oldest.String(), }) } // getCachedVersionInfo reads cached information about the newest CLI version, returning the newest version avaliaible // as well as the oldest version that should be allowed without warning the user they should upgrade. func getCachedVersionInfo() (semver.Version, semver.Version, error) { updateCheckFile, err := workspace.GetCachedVersionFilePath() if err != nil { return semver.Version{}, semver.Version{}, err } ts, err := times.Stat(updateCheckFile) if err != nil { return semver.Version{}, semver.Version{}, err } if time.Now().After(ts.ModTime().Add(24 * time.Hour)) { return semver.Version{}, semver.Version{}, errors.New("cached expired") } file, err := os.OpenFile(updateCheckFile, os.O_RDONLY, 0600) if err != nil { return semver.Version{}, semver.Version{}, err } defer contract.IgnoreClose(file) var cached cachedVersionInfo if err = json.NewDecoder(file).Decode(&cached); err != nil { return semver.Version{}, semver.Version{}, err } latest, err := semver.ParseTolerant(cached.LatestVersion) if err != nil { return semver.Version{}, semver.Version{}, err } oldest, err := semver.ParseTolerant(cached.OldestWithoutWarning) if err != nil { return semver.Version{}, semver.Version{}, err } return latest, oldest, err } // cachedVersionInfo is the on disk format of the version information the CLI caches between runs. type cachedVersionInfo struct { LatestVersion string `json:"latestVersion"` OldestWithoutWarning string `json:"oldestWithoutWarning"` } // getUpgradeMessage gets a message to display to a user instructing them they are out of date and how to move from // current to latest. func getUpgradeMessage(latest semver.Version, current semver.Version) string { cmd := getUpgradeCommand() msg := fmt.Sprintf("A new version of Pulumi is available. To upgrade from version '%s' to '%s', ", current, latest) if cmd != "" { msg += "run \n " + cmd + "\nor " } msg += "visit https://pulumi.com/docs/reference/install/ for manual instructions and release notes." return msg } // getUpgradeCommand returns a command that will upgrade the CLI to the newest version. If we can not determine how // the CLI was installed, the empty string is returned. func getUpgradeCommand() string { curUser, err := user.Current() if err != nil { return "" } exe, err := os.Executable() if err != nil { return "" } isBrew, err := isBrewInstall(exe) if err != nil { logging.V(3).Infof("error determining if the running executable was installed with brew: %s", err) } if isBrew { return "$ brew upgrade pulumi" } if filepath.Dir(exe) != filepath.Join(curUser.HomeDir, workspace.BookkeepingDir, "bin") { return "" } if runtime.GOOS != "windows" { return "$ curl -sSL https://get.pulumi.com | sh" } powershellCmd := `"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe"` if _, err := exec.LookPath("powershell"); err == nil { powershellCmd = "powershell" } return "> " + powershellCmd + ` -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ` + `((New-Object System.Net.WebClient).DownloadString('https://get.pulumi.com/install.ps1'))"` } // isBrewInstall returns true if the current running executable is running on macOS and was installed with brew. func isBrewInstall(exe string) (bool, error) { if runtime.GOOS != "darwin" { return false, nil } exePath, err := filepath.EvalSymlinks(exe) if err != nil { return false, err } brewBin, err := exec.LookPath("brew") if err != nil { return false, err } brewPrefixCmd := exec.Command(brewBin, "--prefix", "pulumi") var stdout bytes.Buffer var stderr bytes.Buffer brewPrefixCmd.Stdout = &stdout brewPrefixCmd.Stderr = &stderr if err = brewPrefixCmd.Run(); err != nil { if ee, ok := err.(*exec.ExitError); ok { ee.Stderr = stderr.Bytes() } return false, errors.Wrapf(err, "'brew --prefix pulumi' failed") } brewPrefixCmdOutput := strings.TrimSpace(stdout.String()) if brewPrefixCmdOutput == "" { return false, errors.New("trimmed output from 'brew --prefix pulumi' is empty") } brewPrefixPath, err := filepath.EvalSymlinks(brewPrefixCmdOutput) if err != nil { return false, err } brewPrefixExePath := filepath.Join(brewPrefixPath, "bin", "pulumi") return exePath == brewPrefixExePath, nil } func getLatestBrewFormulaVersion() (semver.Version, bool, error) { exe, err := os.Executable() if err != nil { return semver.Version{}, false, err } isBrew, err := isBrewInstall(exe) if err != nil { return semver.Version{}, false, err } if !isBrew { return semver.Version{}, false, nil } url, err := url.Parse("https://formulae.brew.sh/api/formula/pulumi.json") contract.AssertNoError(err) resp, err := httputil.DoWithRetry(&http.Request{ Method: http.MethodGet, URL: url, }, http.DefaultClient) if err != nil { return semver.Version{}, false, err } defer contract.IgnoreClose(resp.Body) type versions struct { Stable string `json:"stable"` } var formula struct { Versions versions `json:"versions"` } if err := json.NewDecoder(resp.Body).Decode(&formula); err != nil { return semver.Version{}, false, err } stable, err := semver.ParseTolerant(formula.Versions.Stable) if err != nil { return semver.Version{}, false, err } return stable, true, nil } func isDevVersion(s semver.Version) bool { if len(s.Pre) == 0 { return false } devStrings := regexp.MustCompile(`alpha|beta|dev|rc`) return !s.Pre[0].IsNum && devStrings.MatchString(s.Pre[0].VersionStr) } func confirmPrompt(prompt string, name string, opts display.Options) bool { if prompt != "" { fmt.Print( opts.Color.Colorize( fmt.Sprintf("%s%s%s\n", colors.SpecAttention, prompt, colors.Reset))) } fmt.Print( opts.Color.Colorize( fmt.Sprintf("%sPlease confirm that this is what you'd like to do by typing (%s\"%s\"%s):%s ", colors.SpecAttention, colors.SpecPrompt, name, colors.SpecAttention, colors.Reset))) reader := bufio.NewReader(os.Stdin) line, _ := reader.ReadString('\n') return strings.TrimSpace(line) == name }
[ "\"PULUMI_SKIP_UPDATE_CHECK\"" ]
[]
[ "PULUMI_SKIP_UPDATE_CHECK" ]
[]
["PULUMI_SKIP_UPDATE_CHECK"]
go
1
0
k8s-proxy/podinfo.py
# Copyright 2018 Datawire. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Emit information about the pod as a JSON blob """ import json import os import re import sys IGNORED_MOUNTS = [ r'/sys($|/.*)', r'/proc($|/.*)', r'/dev($|/.*)', r'/etc/hostname$', r'/etc/resolv.conf$', r'/etc/hosts$', r'/$', ] def get_mount_points(): "Returns a filtered list of mount-points" ret = [] ignore = re.compile('(' + '|'.join(IGNORED_MOUNTS) + ')') splitter = re.compile(r'\s+') try: with open('/proc/mounts', 'r') as mount_fp: for line in mount_fp: mount_point = splitter.split(line)[1] if not ignore.match(mount_point): ret.append(mount_point) except Exception as exc: print("//Failed to get mount points: {}".format(exc), file=sys.stderr) print("//Retrieved so far: {}".format(ret), file=sys.stderr) return [] return ret print( json.dumps( dict( env=dict(os.environ), hostname=open("/etc/hostname").read(), resolv=open("/etc/resolv.conf").read(), mountpoints=get_mount_points(), ) ) )
[]
[]
[]
[]
[]
python
0
0
atomic_reactor/plugins/input_osv3.py
""" Copyright (c) 2015 Red Hat, Inc All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the LICENSE file for details. Reads input from OpenShift v3 """ import json import os from atomic_reactor.plugin import InputPlugin from atomic_reactor.util import get_build_json, read_yaml from osbs.utils import RegistryURI class OSv3InputPlugin(InputPlugin): key = "osv3" def __init__(self, **kwargs): """ constructor """ # call parent constructor super(OSv3InputPlugin, self).__init__(**kwargs) def get_plugins_with_user_params(self, build_json, user_params): # get the reactor config map and derive an osbs instance from it from osbs.api import OSBS from osbs.conf import Configuration osbs_conf = Configuration(build_json_dir=json.loads(user_params).get('build_json_dir')) osbs = OSBS(osbs_conf, osbs_conf) return osbs.render_plugins_configuration(user_params) def get_value(self, name, default=None): return self.reactor_env.get(name, default) def remove_plugin(self, phase, target_plugin, reason): if phase in self.plugins_json: for index, plugin in enumerate(self.plugins_json[phase]): if plugin['name'] == target_plugin: self.log.info('%s: removing %s from phase %s', reason, target_plugin, phase) del self.plugins_json[phase][index] break def remove_koji_plugins(self): koji_map = self.get_value('koji', {}) if not koji_map.get('hub_url'): # bump_release is removed in PluginsConfiguration if no release value self.remove_plugin('prebuild_plugins', 'bump_release', 'no koji hub available') # inject_parent_image is removed in PluginsConfiguration if no parent image self.remove_plugin('prebuild_plugins', 'inject_parent_image', 'no koji hub available') self.remove_plugin('prebuild_plugins', 'koji_parent', 'no koji hub available') self.remove_plugin('postbuild_plugins', 'koji_upload', 'no koji hub available') self.remove_plugin('exit_plugins', 'koji_promote', 'no koji hub available') self.remove_plugin('exit_plugins', 'koji_import', 'no koji hub available') self.remove_plugin('exit_plugins', 'koji_tag_build', 'no koji hub available') # root and hub are required, so this check is probably redundant if not koji_map.get('root_url'): self.remove_plugin('prebuild_plugins', 'fetch_maven_artifacts', 'no koji root available') def remove_pulp_plugins(self): phases = ('postbuild_plugins', 'exit_plugins') pulp_registry = self.get_value('pulp') koji_hub = self.get_value('koji', {}).get('hub_url') for phase in phases: if not (pulp_registry and koji_hub): self.remove_plugin(phase, 'pulp_pull', 'no pulp or koji available') if not pulp_registry: self.remove_plugin('postbuild_plugins', 'pulp_push', 'no pulp available') self.remove_plugin('postbuild_plugins', 'pulp_sync', 'no pulp available') self.remove_plugin('postbuild_plugins', 'pulp_tag', 'no pulp available') self.remove_plugin('exit_plugins', 'delete_from_registry', 'no pulp available') self.remove_plugin('exit_plugins', 'pulp_publish', 'no pulp available') else: docker_registry = None all_registries = self.get_value('registries', {}) versions = self.get_value('content_versions', ['v1', 'v2']) for registry in all_registries: reguri = RegistryURI(registry.get('url')) if reguri.version == 'v2': # First specified v2 registry is the one we'll tell pulp # to sync from. Keep the http prefix -- pulp wants it. docker_registry = registry break if 'v1' not in versions: self.remove_plugin('postbuild_plugins', 'pulp_push', 'v1 content not enabled') if docker_registry: source_registry_str = self.get_value('source_registry', {}).get('url') perform_delete = (source_registry_str is None or RegistryURI(source_registry_str).uri != reguri.uri) if not perform_delete: self.remove_plugin('exit_plugins', 'delete_from_registry', 'no delete needed') else: self.remove_plugin('postbuild_plugins', 'pulp_sync', 'no V2 pulp available') self.remove_plugin('exit_plugins', 'delete_from_registry', 'no V2 pulp available') def remove_plugins_without_parameters(self): """ This used to be handled in BuildRequest, but with REACTOR_CONFIG, osbs-client doesn't have enough information. """ # Compatibility code for dockerfile_content plugin self.remove_plugin('prebuild_plugins', 'dockerfile_content', 'dockerfile_content is deprecated, please remove from config') if not self.reactor_env: return self.remove_koji_plugins() self.remove_pulp_plugins() if not self.get_value('odcs'): self.remove_plugin('prebuild_plugins', 'resolve_composes', 'no odcs available') if not self.get_value('smtp'): self.remove_plugin('exit_plugins', 'sendmail', 'no mailhost available') if not self.get_value('sources_command'): self.remove_plugin('prebuild_plugins', 'distgit_fetch_artefacts', 'no sources command') def run(self): """ each plugin has to implement this method -- it is used to run the plugin actually response from plugin is kept and used in json result response """ user_params = None build_json = get_build_json() git_url = os.environ['SOURCE_URI'] git_ref = os.environ.get('SOURCE_REF', None) image = os.environ['OUTPUT_IMAGE'] self.target_registry = os.environ.get('OUTPUT_REGISTRY', None) self.reactor_env = None try: user_params = os.environ['USER_PARAMS'] self.plugins_json = self.get_plugins_with_user_params(build_json, user_params) # if we get the USER_PARAMS, we'd better get the REACTOR_CONFIG too reactor_config_map = os.environ['REACTOR_CONFIG'] self.reactor_env = read_yaml(reactor_config_map, 'schemas/config.json') except KeyError: try: self.plugins_json = os.environ['ATOMIC_REACTOR_PLUGINS'] except KeyError: raise RuntimeError("No plugin configuration found!") self.plugins_json = json.loads(self.plugins_json) input_json = { 'source': { 'provider': 'git', 'uri': git_url, 'provider_params': {'git_commit': git_ref} }, 'image': image, 'openshift_build_selflink': build_json.get('metadata', {}).get('selfLink', None) } input_json.update(self.plugins_json) self.log.debug("build json: %s", input_json) self.remove_plugins_without_parameters() return input_json @classmethod def is_autousable(cls): return 'BUILD' in os.environ and 'SOURCE_URI' in os.environ and 'OUTPUT_IMAGE' in os.environ
[]
[]
[ "OUTPUT_IMAGE", "REACTOR_CONFIG", "ATOMIC_REACTOR_PLUGINS", "SOURCE_URI", "USER_PARAMS", "SOURCE_REF", "OUTPUT_REGISTRY" ]
[]
["OUTPUT_IMAGE", "REACTOR_CONFIG", "ATOMIC_REACTOR_PLUGINS", "SOURCE_URI", "USER_PARAMS", "SOURCE_REF", "OUTPUT_REGISTRY"]
python
7
0
DeBERTa/apps/train.py
# Copyright (c) Microsoft, Inc. 2020 # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Author: [email protected] # Date: 01/25/2020 # """DeBERTa finetuning runner.""" import os from collections import OrderedDict, Mapping, Sequence import argparse import random import time import numpy as np import math import torch import json from torch.utils.data import DataLoader from ..deberta import GPT2Tokenizer from ..utils import * from ..utils import xtqdm as tqdm from .task_registry import tasks from ..training import DistributedTrainer, initialize_distributed, batch_to, set_random_seed,kill_children from ..data import DistributedBatchSampler, SequentialSampler, BatchSampler, AsyncDataLoader def create_model(args, num_labels, model_class_fn): # Prepare model rank = getattr(args, 'rank', 0) init_model = args.init_model if rank<1 else None model = model_class_fn(init_model, args.model_config, num_labels=num_labels, \ drop_out=args.cls_drop_out, \ pre_trained = args.pre_trained) if args.fp16: model = model.half() return model def train_model(args, model, device, train_data, eval_data): total_examples = len(train_data) num_train_steps = int(len(train_data)*args.num_train_epochs / args.train_batch_size) logger.info(" Training batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) def data_fn(trainer): return train_data, num_train_steps, None def eval_fn(trainer, model, device, tag): results = run_eval(trainer.args, model, device, eval_data, tag, steps=trainer.trainer_state.steps) eval_metric = np.mean([v[0] for k,v in results.items() if 'train' not in k]) return eval_metric def loss_fn(trainer, model, data): _, loss = model(**data) return loss.mean(), data['input_ids'].size(0) trainer = DistributedTrainer(args, model, device, data_fn, loss_fn = loss_fn, eval_fn = eval_fn, dump_interval = args.dump_interval) trainer.train() def merge_distributed(data_list, max_len=None): merged = [] def gather(data): data_chunks = [torch.zeros_like(data) for _ in range(args.world_size)] torch.distributed.all_gather(data_chunks, data) torch.cuda.synchronize() return data_chunks for data in data_list: if torch.distributed.is_initialized() and torch.distributed.get_world_size()>1: if isinstance(data, Sequence): data_chunks = [] for d in data: chunks_ = gather(d) data_ = torch.cat(chunks_) data_chunks.append(data_) merged.append(data_chunks) else: data_chunks = gather(data) merged.extend(data_chunks) else: merged.append(data) if not isinstance(merged[0], Sequence): merged = torch.cat(merged) if max_len is not None: return merged[:max_len] else: return merged else: data_list=[] for d in zip(*merged): data = torch.cat(d) if max_len is not None: data = data[:max_len] data_list.append(data) return data_list def calc_metrics(predicts, labels, eval_loss, eval_item, eval_results, args, name, prefix, steps, tag): tb_metrics = OrderedDict() result=OrderedDict() metrics_fn = eval_item.metrics_fn predict_fn = eval_item.predict_fn if metrics_fn is None: eval_metric = metric_accuracy(predicts, labels) else: metrics = metrics_fn(predicts, labels) result.update(metrics) critial_metrics = set(metrics.keys()) if eval_item.critial_metrics is None or len(eval_item.critial_metrics)==0 else eval_item.critial_metrics eval_metric = np.mean([v for k,v in metrics.items() if k in critial_metrics]) result['eval_loss'] = eval_loss result['eval_metric'] = eval_metric result['eval_samples'] = len(labels) if args.rank<=0: output_eval_file = os.path.join(args.output_dir, "eval_results_{}_{}.txt".format(name, prefix)) with open(output_eval_file, 'w', encoding='utf-8') as writer: logger.info("***** Eval results-{}-{} *****".format(name, prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) tb_metrics[f'{name}/{key}'] = result[key] if predict_fn is not None: predict_fn(predicts, args.output_dir, name, prefix) else: output_predict_file = os.path.join(args.output_dir, "predict_results_{}_{}.txt".format(name, prefix)) np.savetxt(output_predict_file, predicts, delimiter='\t') output_label_file = os.path.join(args.output_dir, "predict_labels_{}_{}.txt".format(name, prefix)) np.savetxt(output_label_file, labels, delimiter='\t') if not eval_item.ignore_metric: eval_results[name]=(eval_metric, predicts, labels) _tag = tag + '/' if tag is not None else '' def _ignore(k): ig = ['/eval_samples', '/eval_loss'] for i in ig: if k.endswith(i): return True return False def run_eval(args, model, device, eval_data, prefix=None, tag=None, steps=None): # Run prediction for full data prefix = f'{tag}_{prefix}' if tag is not None else prefix eval_results=OrderedDict() eval_metric=0 no_tqdm = (True if os.getenv('NO_TQDM', '0')!='0' else False) or args.rank>0 for eval_item in eval_data: name = eval_item.name eval_sampler = SequentialSampler(len(eval_item.data)) batch_sampler = BatchSampler(eval_sampler, args.eval_batch_size) batch_sampler = DistributedBatchSampler(batch_sampler, rank=args.rank, world_size=args.world_size) eval_dataloader = DataLoader(eval_item.data, batch_sampler=batch_sampler, num_workers=args.workers) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 predicts=[] labels=[] for batch in tqdm(AsyncDataLoader(eval_dataloader), ncols=80, desc='Evaluating: {}'.format(prefix), disable=no_tqdm): batch = batch_to(batch, device) with torch.no_grad(): logits, tmp_eval_loss = model(**batch) label_ids = batch['labels'].to(device) predicts.append(logits) labels.append(label_ids) eval_loss += tmp_eval_loss.mean().item() input_ids = batch['input_ids'] nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps predicts = merge_distributed(predicts, len(eval_item.data)) labels = merge_distributed(labels, len(eval_item.data)) if isinstance(predicts, Sequence): for k,pred in enumerate(predicts): calc_metrics(pred.detach().cpu().numpy(), labels.detach().cpu().numpy(), eval_loss, eval_item, eval_results, args, name + f'@{k}', prefix, steps, tag) else: calc_metrics(predicts.detach().cpu().numpy(), labels.detach().cpu().numpy(), eval_loss, eval_item, eval_results, args, name, prefix, steps, tag) return eval_results def run_predict(args, model, device, eval_data, prefix=None): # Run prediction for full data eval_results=OrderedDict() eval_metric=0 for eval_item in eval_data: name = eval_item.name eval_sampler = SequentialSampler(len(eval_item.data)) batch_sampler = BatchSampler(eval_sampler, args.eval_batch_size) batch_sampler = DistributedBatchSampler(batch_sampler, rank=args.rank, world_size=args.world_size) eval_dataloader = DataLoader(eval_item.data, batch_sampler=batch_sampler, num_workers=args.workers) model.eval() predicts=None for batch in tqdm(AsyncDataLoader(eval_dataloader), ncols=80, desc='Evaluating: {}'.format(prefix), disable=args.rank>0): batch = batch_to(batch, device) with torch.no_grad(): logits, _ = model(**batch) if args.world_size>1: logits_all = [torch.zeros_like(logits) for _ in range(args.world_size)] torch.distributed.all_gather(logits_all, logits) torch.cuda.synchronize() logits = torch.cat(logits_all) logits = logits.detach().cpu().numpy() if predicts is None: predicts = np.copy(logits) else: predicts = np.append(predicts, logits, axis=0) predicts = predicts[:len(eval_item.data)] if args.rank<=0: output_test_file = os.path.join(args.output_dir, "test_logits_{}_{}.txt".format(name, prefix)) logger.info("***** Dump prediction results-{}-{} *****".format(name, prefix)) logger.info("Location: {}".format(output_test_file)) np.savetxt(output_test_file, predicts, delimiter='\t') predict_fn = eval_item.predict_fn if predict_fn: predict_fn(predicts, args.output_dir, name, prefix) def main(args): if not args.do_train and not args.do_eval and not args.do_predict: raise ValueError("At least one of `do_train` or `do_eval` or `do_predict` must be True.") os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) tokenizer = GPT2Tokenizer() processor = tasks[task_name](tokenizer = tokenizer, max_seq_len = args.max_seq_length, data_dir = args.data_dir) label_list = processor.get_labels() eval_data = processor.eval_data(max_seq_len=args.max_seq_length) logger.info(" Evaluation batch size = %d", args.eval_batch_size) if args.do_predict: test_data = processor.test_data(max_seq_len=args.max_seq_length) logger.info(" Prediction batch size = %d", args.predict_batch_size) if args.do_train: train_data = processor.train_data(max_seq_len=args.max_seq_length, mask_gen = None, debug=args.debug) model_class_fn = processor.get_model_class_fn() model = create_model(args, len(label_list), model_class_fn) if args.do_train: with open(os.path.join(args.output_dir, 'model_config.json'), 'w', encoding='utf-8') as fs: fs.write(model.config.to_json_string() + '\n') logger.info("Model config {}".format(model.config)) device = initialize_distributed(args) if not isinstance(device, torch.device): return 0 model.to(device) if args.do_eval: run_eval(args, model, device, eval_data, prefix=args.tag) if args.do_train: train_model(args, model, device, train_data, eval_data) if args.do_predict: run_predict(args, model, device, test_data, prefix=args.tag) def build_argument_parser(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", default=False, action='store_true', help="Whether to run prediction on the test set.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.") parser.add_argument("--predict_batch_size", default=32, type=int, help="Total batch size for prediction.") parser.add_argument("--max_grad_norm", default=1, type=float, help="The clip threshold of global gradient norm") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--epsilon", default=1e-6, type=float, help="epsilon setting for Adam.") parser.add_argument("--adam_beta1", default=0.9, type=float, help="The beta1 parameter for Adam.") parser.add_argument("--adam_beta2", default=0.999, type=float, help="The beta2 parameter for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--lr_schedule_ends", default=0, type=float, help="The ended learning rate scale for learning rate scheduling") parser.add_argument("--lr_schedule", default='warmup_linear', type=str, help="The learning rate scheduler used for traning. " "E.g. warmup_linear, warmup_linear_shift, warmup_cosine, warmup_constant. Default, warmup_linear") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=1234, help="random seed for initialization") parser.add_argument('--accumulative_update', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', default=False, type=boolean_string, help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=256, help='Loss scaling, positive power of 2 values can improve fp16 convergence.') parser.add_argument('--scale_steps', type=int, default=1000, help='The steps to wait to increase the loss scale.') parser.add_argument('--init_model', type=str, help="The model state file used to initialize the model weights.") parser.add_argument('--model_config', type=str, help="The config file of bert model.") parser.add_argument('--cls_drop_out', type=float, default=None, help="The config file model initialization and fine tuning.") parser.add_argument('--weight_decay', type=float, default=0.01, help="The weight decay rate") parser.add_argument('--tag', type=str, default='final', help="The tag name of current prediction/runs.") parser.add_argument("--dump_interval", default=10000, type=int, help="Interval steps for generating checkpoint.") parser.add_argument('--lookahead_k', default=-1, type=int, help="lookahead k parameter") parser.add_argument('--lookahead_alpha', default=0.5, type=float, help="lookahead alpha parameter") parser.add_argument('--with_radam', default=False, type=boolean_string, help="whether to use RAdam") parser.add_argument('--opt_type', type=str.lower, default='adam', choices=['adam', 'admax'], help="The optimizer to be used.") parser.add_argument('--workers', type=int, default=1, help="The workers to load data.") parser.add_argument('--debug', default=False, type=boolean_string, help="Whether to cache cooked binary features") parser.add_argument('--pre_trained', default=None, type=str, help="The path of pre-trained RoBERTa model") return parser if __name__ == "__main__": parser = build_argument_parser() args = parser.parse_args() logger = set_logger(args.task_name, os.path.join(args.output_dir, 'training_{}.log'.format(args.task_name))) logger.info(args) try: main(args) except Exception as ex: try: logger.exception(f'Uncatched exception happened during execution.') import atexit atexit._run_exitfuncs() except: pass kill_children() os._exit(-1)
[]
[]
[ "NO_TQDM" ]
[]
["NO_TQDM"]
python
1
0
pkg/controllers/genericissuer_controller.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/go-logr/logr" api "github.com/jniebuhr/aws-pca-issuer/pkg/api/v1beta1" awspca "github.com/jniebuhr/aws-pca-issuer/pkg/aws" "github.com/jniebuhr/aws-pca-issuer/pkg/util" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "os" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) var awsDefaultRegion = os.Getenv("AWS_REGION") // GenericIssuerReconciler reconciles both AWSPCAIssuer and AWSPCAClusterIssuer objects type GenericIssuerReconciler struct { client.Client Log logr.Logger Scheme *runtime.Scheme Recorder record.EventRecorder } // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile func (r *GenericIssuerReconciler) Reconcile(ctx context.Context, req ctrl.Request, issuer api.GenericIssuer) (ctrl.Result, error) { log := r.Log.WithValues("genericissuer", req.NamespacedName) spec := issuer.GetSpec() err := validateIssuer(spec) if err != nil { log.Error(err, "failed to validate issuer") _ = r.setStatus(ctx, issuer, metav1.ConditionFalse, "Validation", "Failed to validate resource: %v", err) return ctrl.Result{}, err } config := aws.Config{} if spec.Region != "" { config.Region = aws.String(spec.Region) } if spec.SecretRef.Name != "" { secretNamespaceName := types.NamespacedName{ Namespace: spec.SecretRef.Namespace, Name: spec.SecretRef.Name, } secret := new(core.Secret) if err := r.Client.Get(ctx, secretNamespaceName, secret); err != nil { log.Error(err, "failed to retrieve AWS secret") _ = r.setStatus(ctx, issuer, metav1.ConditionFalse, "Error", "Failed to retrieve secret: %v", err) return ctrl.Result{}, err } accessKey, ok := secret.Data["AWS_ACCESS_KEY_ID"] if !ok { err := fmt.Errorf("secret does not contain AWS_ACCESS_KEY_ID") log.Error(err, "secret value AWS_ACCESS_KEY_ID was not found") _ = r.setStatus(ctx, issuer, metav1.ConditionFalse, "Error", "secret value AWS_ACCESS_KEY_ID was not found") return ctrl.Result{}, err } secretKey, ok := secret.Data["AWS_SECRET_ACCESS_KEY"] if !ok { err := fmt.Errorf("secret does not contain AWS_SECRET_ACCESS_KEY") log.Error(err, "secret value AWS_SECRET_ACCESS_KEY was not found") _ = r.setStatus(ctx, issuer, metav1.ConditionFalse, "Error", "secret value AWS_SECRET_ACCESS_KEY was not found") return ctrl.Result{}, err } config.Credentials = credentials.NewStaticCredentials(string(accessKey), string(secretKey), "") } sess, err := session.NewSession(&config) if err != nil { log.Error(err, "failed to create AWS session") _ = r.setStatus(ctx, issuer, metav1.ConditionFalse, "Error", "Failed to create AWS session") return ctrl.Result{}, err } awspca.StoreProvisioner(req.NamespacedName, awspca.NewProvisioner(sess, spec.Arn)) return ctrl.Result{}, r.setStatus(ctx, issuer, metav1.ConditionTrue, "Verified", "Issuer verified") } func (r *GenericIssuerReconciler) setStatus(ctx context.Context, issuer api.GenericIssuer, status metav1.ConditionStatus, reason, message string, args ...interface{}) error { log := r.Log.WithValues("genericissuer", issuer.GetName()) completeMessage := fmt.Sprintf(message, args...) util.SetIssuerCondition(log, issuer, api.ConditionTypeReady, status, reason, completeMessage) eventType := core.EventTypeNormal if status == metav1.ConditionFalse { eventType = core.EventTypeWarning } r.Recorder.Event(issuer, eventType, reason, completeMessage) return r.Client.Status().Update(ctx, issuer) } func validateIssuer(spec *api.AWSPCAIssuerSpec) error { switch { case spec.Arn == "": return fmt.Errorf("spec.arn cannot be empty") case spec.Region == "" && awsDefaultRegion == "": return fmt.Errorf("spec.region cannot be empty if no default region is specified") } return nil }
[ "\"AWS_REGION\"" ]
[]
[ "AWS_REGION" ]
[]
["AWS_REGION"]
go
1
0
gnupg/_util.py
# -*- coding: utf-8 -*- # # This file is part of python-gnupg, a Python interface to GnuPG. # Copyright © 2013 Isis Lovecruft, <[email protected]> 0xA3ADB67A2CDB8B35 # © 2013 Andrej B. # © 2013 LEAP Encryption Access Project # © 2008-2012 Vinay Sajip # © 2005 Steve Traugott # © 2004 A.M. Kuchling # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. '''Extra utilities for python-gnupg.''' from __future__ import absolute_import from datetime import datetime from socket import gethostname from time import localtime from time import mktime import codecs import encodings import os import threading import random import re import string import sys # These are all the classes which are stream-like; they are used in # :func:`_is_stream`. _STREAMLIKE_TYPES = [] # These StringIO classes are actually utilised. try: import io from io import StringIO from io import BytesIO except ImportError: from cStringIO import StringIO else: # The io.IOBase type covers the above example for an open file handle in # Python3, as well as both io.BytesIO and io.StringIO. _STREAMLIKE_TYPES.append(io.IOBase) # The remaining StringIO classes which are imported are used to determine if a # object is a stream-like in :func:`_is_stream`. if 2 == sys.version_info[0]: # Import the StringIO class from the StringIO module since it is a # commonly used stream class. It is distinct from either of the # StringIO's that may be loaded in the above try/except clause, so the # name is prefixed with an underscore to distinguish it. from StringIO import StringIO as _StringIO_StringIO _STREAMLIKE_TYPES.append(_StringIO_StringIO) # Import the cStringIO module to test for the cStringIO stream types, # InputType and OutputType. See # http://stackoverflow.com/questions/14735295/to-check-an-instance-is-stringio import cStringIO as _cStringIO _STREAMLIKE_TYPES.append(_cStringIO.InputType) _STREAMLIKE_TYPES.append(_cStringIO.OutputType) # In Python2: # # >>> type(open('README.md', 'rb')) # <open file 'README.md', mode 'rb' at 0x7f9493951d20> # # whereas, in Python3, the `file` builtin doesn't exist and instead we get: # # >>> type(open('README.md', 'rb')) # <_io.BufferedReader name='README.md'> # # which is covered by the above addition of io.IOBase. _STREAMLIKE_TYPES.append(file) from . import _logger try: unicode _py3k = False try: isinstance(__name__, basestring) except NameError: msg = "Sorry, python-gnupg requires a Python version with proper" msg += " unicode support. Please upgrade to Python>=2.6." raise SystemExit(msg) except NameError: _py3k = True _running_windows = False if "win" in sys.platform: _running_windows = True ## Directory shortcuts: ## we don't want to use this one because it writes to the install dir: #_here = getabsfile(currentframe()).rsplit(os.path.sep, 1)[0] _here = os.path.join(os.getcwd(), 'gnupg') ## current dir _test = os.path.join(os.path.join(_here, 'test'), 'tmp') ## ./tests/tmp _user = os.environ.get('HOME') ## $HOME # Fix for Issue #74: we shouldn't expect that a $HOME directory is set in all # environs. https://github.com/isislovecruft/python-gnupg/issues/74 if not _user: _user = '/tmp/python-gnupg' try: os.makedirs(_user) except (OSError, IOError): _user = os.getcwd() # If we can't use $HOME, but we have (or can create) a # /tmp/python-gnupg/gnupghome directory, then we'll default to using # that. Otherwise, we'll use the current directory + /gnupghome. _user = os.path.sep.join([_user, 'gnupghome']) _ugpg = os.path.join(_user, '.gnupg') ## $HOME/.gnupg _conf = os.path.join(os.path.join(_user, '.config'), 'python-gnupg') ## $HOME/.config/python-gnupg ## Logger is disabled by default log = _logger.create_logger(0) #: Compiled regex for determining a GnuPG binary's version: _VERSION_STRING_REGEX = re.compile('(\d)*(\.)*(\d)*(\.)*(\d)*') def find_encodings(enc=None, system=False): """Find functions for encoding translations for a specific codec. :param str enc: The codec to find translation functions for. It will be normalized by converting to lowercase, excluding everything which is not ascii, and hyphens will be converted to underscores. :param bool system: If True, find encodings based on the system's stdin encoding, otherwise assume utf-8. :raises: :exc:LookupError if the normalized codec, ``enc``, cannot be found in Python's encoding translation map. """ if not enc: enc = 'utf-8' if system: if getattr(sys.stdin, 'encoding', None) is None: enc = sys.stdin.encoding log.debug("Obtained encoding from stdin: %s" % enc) else: enc = 'ascii' ## have to have lowercase to work, see ## http://docs.python.org/dev/library/codecs.html#standard-encodings enc = enc.lower() codec_alias = encodings.normalize_encoding(enc) codecs.register(encodings.search_function) coder = codecs.lookup(codec_alias) return coder if _py3k: def b(x): """See http://python3porting.com/problems.html#nicer-solutions""" coder = find_encodings() if isinstance(x, bytes): return coder.encode(x.decode(coder.name))[0] else: return coder.encode(x)[0] def s(x): if isinstance(x, str): return x elif isinstance(x, (bytes, bytearray)): return x.decode(find_encodings().name) else: raise NotImplemented else: def b(x): """See http://python3porting.com/problems.html#nicer-solutions""" return x def s(x): if isinstance(x, basestring): return x elif isinstance(x, (bytes, bytearray)): return x.decode(find_encodings().name) else: raise NotImplemented def binary(data): coder = find_encodings() if _py3k and isinstance(data, bytes): encoded = coder.encode(data.decode(coder.name))[0] elif _py3k and isinstance(data, str): encoded = coder.encode(data)[0] elif not _py3k and type(data) is not str: encoded = coder.encode(data)[0] else: encoded = data return encoded def author_info(name, contact=None, public_key=None): """Easy object-oriented representation of contributor info. :param str name: The contributor´s name. :param str contact: The contributor´s email address or contact information, if given. :param str public_key: The contributor´s public keyid, if given. """ return Storage(name=name, contact=contact, public_key=public_key) def _copy_data(instream, outstream): """Copy data from one stream to another. :type instream: :class:`io.BytesIO` or :class:`io.StringIO` or file :param instream: A byte stream or open file to read from. :param file outstream: The file descriptor of a tmpfile to write to. """ sent = 0 while True: if ((_py3k and isinstance(instream, str)) or (not _py3k and isinstance(instream, basestring))): data = instream[:1024] instream = instream[1024:] else: data = instream.read(1024) if len(data) == 0: break sent += len(data) encoded = binary(data) log.debug("Sending %d bytes of data..." % sent) log.debug("Encoded data (type %s):\n%s" % (type(encoded), encoded)) if not _py3k: try: outstream.write(encoded) except IOError as ioe: # Can get 'broken pipe' errors even when all data was sent if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <type 'str'> to outstream.") else: try: outstream.write(bytes(encoded)) except TypeError as te: # XXX FIXME This appears to happen because # _threaded_copy_data() sometimes passes the `outstream` as an # object with type <_io.BufferredWriter> and at other times # with type <encodings.utf_8.StreamWriter>. We hit the # following error when the `outstream` has type # <encodings.utf_8.StreamWriter>. if not "convert 'bytes' object to str implicitly" in str(te): log.error(str(te)) try: outstream.write(encoded.decode()) except TypeError as yate: # We hit the "'str' does not support the buffer interface" # error in Python3 when the `outstream` is an io.BytesIO and # we try to write a str to it. We don't care about that # error, we'll just try again with bytes. if not "does not support the buffer interface" in str(yate): log.error(str(yate)) except IOError as ioe: # Can get 'broken pipe' errors even when all data was sent if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <class 'str'> outstream.") except IOError as ioe: # Can get 'broken pipe' errors even when all data was sent if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <class 'bytes'> to outstream.") try: outstream.close() except IOError as ioe: log.error("Unable to close outstream %s:\r\t%s" % (outstream, ioe)) else: log.debug("Closed outstream: %d bytes sent." % sent) def _create_if_necessary(directory): """Create the specified directory, if necessary. :param str directory: The directory to use. :rtype: bool :returns: True if no errors occurred and the directory was created or existed beforehand, False otherwise. """ if not os.path.isabs(directory): log.debug("Got non-absolute path: %s" % directory) directory = os.path.abspath(directory) if not os.path.isdir(directory): log.info("Creating directory: %s" % directory) try: os.makedirs(directory, 0x1C0) except OSError as ose: log.error(ose, exc_info=1) return False else: log.debug("Created directory.") return True def create_uid_email(username=None, hostname=None): """Create an email address suitable for a UID on a GnuPG key. :param str username: The username portion of an email address. If None, defaults to the username of the running Python process. :param str hostname: The FQDN portion of an email address. If None, the hostname is obtained from gethostname(2). :rtype: str :returns: A string formatted as <username>@<hostname>. """ if hostname: hostname = hostname.replace(' ', '_') if not username: try: username = os.environ['LOGNAME'] except KeyError: username = os.environ['USERNAME'] if not hostname: hostname = gethostname() uid = "%s@%s" % (username.replace(' ', '_'), hostname) else: username = username.replace(' ', '_') if (not hostname) and (username.find('@') == 0): uid = "%s@%s" % (username, gethostname()) elif hostname: uid = "%s@%s" % (username, hostname) else: uid = username return uid def _deprefix(line, prefix, callback=None): """Remove the prefix string from the beginning of line, if it exists. :param string line: A line, such as one output by GnuPG's status-fd. :param string prefix: A substring to remove from the beginning of ``line``. Case insensitive. :type callback: callable :param callback: Function to call if the prefix is found. The signature to callback will be only one argument, the ``line`` without the ``prefix``, i.e. ``callback(line)``. :rtype: string :returns: If the prefix was found, the ``line`` without the prefix is returned. Otherwise, the original ``line`` is returned. """ try: assert line.upper().startswith(u''.join(prefix).upper()) except AssertionError: log.debug("Line doesn't start with prefix '%s':\n%s" % (prefix, line)) return line else: newline = line[len(prefix):] if callback is not None: try: callback(newline) except Exception as exc: log.exception(exc) return newline def _find_binary(binary=None): """Find the absolute path to the GnuPG binary. Also run checks that the binary is not a symlink, and check that our process real uid has exec permissions. :param str binary: The path to the GnuPG binary. :raises: :exc:`~exceptions.RuntimeError` if it appears that GnuPG is not installed. :rtype: str :returns: The absolute path to the GnuPG binary to use, if no exceptions occur. """ found = None if binary is not None: if os.path.isabs(binary) and os.path.isfile(binary): return binary if not os.path.isabs(binary): try: found = _which(binary) log.debug("Found potential binary paths: %s" % '\n'.join([path for path in found])) found = found[0] except IndexError as ie: log.info("Could not determine absolute path of binary: '%s'" % binary) elif os.access(binary, os.X_OK): found = binary if found is None: try: found = _which('gpg', abspath_only=True, disallow_symlinks=True)[0] except IndexError as ie: log.error("Could not find binary for 'gpg'.") try: found = _which('gpg2')[0] except IndexError as ie: log.error("Could not find binary for 'gpg2'.") if found is None: raise RuntimeError("GnuPG is not installed!") return found def _has_readwrite(path): """ Determine if the real uid/gid of the executing user has read and write permissions for a directory or a file. :param str path: The path to the directory or file to check permissions for. :rtype: bool :returns: True if real uid/gid has read+write permissions, False otherwise. """ return os.access(path, os.R_OK ^ os.W_OK) def _is_file(filename): """Check that the size of the thing which is supposed to be a filename has size greater than zero, without following symbolic links or using :func:os.path.isfile. :param filename: An object to check. :rtype: bool :returns: True if **filename** is file-like, False otherwise. """ try: statinfo = os.lstat(filename) log.debug("lstat(%r) with type=%s gave us %r" % (repr(filename), type(filename), repr(statinfo))) if not (statinfo.st_size > 0): raise ValueError("'%s' appears to be an empty file!" % filename) except OSError as oserr: log.error(oserr) if filename == '-': log.debug("Got '-' for filename, assuming sys.stdin...") return True except (ValueError, TypeError, IOError) as err: log.error(err) else: return True return False def _is_stream(input): """Check that the input is a byte stream. :param input: An object provided for reading from or writing to. :rtype: bool :returns: True if :param:input is a stream, False if otherwise. """ return isinstance(input, tuple(_STREAMLIKE_TYPES)) def _is_string(thing): """Check that **thing** is a string. The definition of the latter depends upon the Python version. :param thing: The thing to check if it's a string. :rtype: bool :returns: ``True`` if **thing** is string (or unicode in Python2). """ if (_py3k and isinstance(thing, str)): return True if (not _py3k and isinstance(thing, basestring)): return True return False def _is_bytes(thing): """Check that **thing** is bytes. :param thing: The thing to check if it's bytes. :rtype: bool :returns: ``True`` if **thing** is bytes or a bytearray. """ if isinstance(thing, (bytes, bytearray)): return True return False def _is_list_or_tuple(instance): """Check that ``instance`` is a list or tuple. :param instance: The object to type check. :rtype: bool :returns: True if ``instance`` is a list or tuple, False otherwise. """ return isinstance(instance, (list, tuple,)) def _is_gpg1(version): """Returns True if using GnuPG version 1.x. :param tuple version: A tuple of three integers indication major, minor, and micro version numbers. """ (major, minor, micro) = _match_version_string(version) if major == 1: return True return False def _is_gpg2(version): """Returns True if using GnuPG version 2.x. :param tuple version: A tuple of three integers indication major, minor, and micro version numbers. """ (major, minor, micro) = _match_version_string(version) if major == 2: return True return False def _make_binary_stream(thing, encoding=None, armor=True): """Encode **thing**, then make it stream/file-like. :param thing: The thing to turn into a encoded stream. :rtype: ``io.BytesIO`` or ``io.StringIO``. :returns: The encoded **thing**, wrapped in an ``io.BytesIO`` (if available), otherwise wrapped in a ``io.StringIO``. """ if _py3k: if isinstance(thing, str): thing = thing.encode(encoding) else: if type(thing) is not str: thing = thing.encode(encoding) try: rv = BytesIO(thing) except NameError: rv = StringIO(thing) return rv def _make_passphrase(length=None, save=False, file=None): """Create a passphrase and write it to a file that only the user can read. This is not very secure, and should not be relied upon for actual key passphrases. :param int length: The length in bytes of the string to generate. :param file file: The file to save the generated passphrase in. If not given, defaults to 'passphrase-<the real user id>-<seconds since epoch>' in the top-level directory. """ if not length: length = 40 passphrase = _make_random_string(length) if save: ruid, euid, suid = os.getresuid() gid = os.getgid() now = mktime(localtime()) if not file: filename = str('passphrase-%s-%s' % uid, now) file = os.path.join(_repo, filename) with open(file, 'a') as fh: fh.write(passphrase) fh.flush() fh.close() os.chmod(file, stat.S_IRUSR | stat.S_IWUSR) os.chown(file, ruid, gid) log.warn("Generated passphrase saved to %s" % file) return passphrase def _make_random_string(length): """Returns a random lowercase, uppercase, alphanumerical string. :param int length: The length in bytes of the string to generate. """ chars = string.ascii_lowercase + string.ascii_uppercase + string.digits return ''.join(random.choice(chars) for x in range(length)) def _match_version_string(version): """Sort a binary version string into major, minor, and micro integers. :param str version: A version string in the form x.x.x """ matched = _VERSION_STRING_REGEX.match(version) g = matched.groups() major, minor, micro = int(g[0]), int(g[2]), int(g[4]) return (major, minor, micro) def _next_year(): """Get the date of today plus one year. :rtype: str :returns: The date of this day next year, in the format '%Y-%m-%d'. """ now = datetime.now().__str__() date = now.split(' ', 1)[0] year, month, day = date.split('-', 2) next_year = str(int(year)+1) return '-'.join((next_year, month, day)) def _now(): """Get a timestamp for right now, formatted according to ISO 8601.""" return datetime.isoformat(datetime.now()) def _separate_keyword(line): """Split the line, and return (first_word, the_rest).""" try: first, rest = line.split(None, 1) except ValueError: first = line.strip() rest = '' return first, rest def _threaded_copy_data(instream, outstream): """Copy data from one stream to another in a separate thread. Wraps ``_copy_data()`` in a :class:`threading.Thread`. :type instream: :class:`io.BytesIO` or :class:`io.StringIO` :param instream: A byte stream to read from. :param file outstream: The file descriptor of a tmpfile to write to. """ copy_thread = threading.Thread(target=_copy_data, args=(instream, outstream)) copy_thread.setDaemon(True) log.debug('%r, %r, %r', copy_thread, instream, outstream) copy_thread.start() return copy_thread def _utc_epoch(): """Get the seconds since epoch.""" return int(mktime(localtime())) def _which(executable, flags=os.X_OK, abspath_only=False, disallow_symlinks=False): """Borrowed from Twisted's :mod:twisted.python.proutils . Search PATH for executable files with the given name. On newer versions of MS-Windows, the PATHEXT environment variable will be set to the list of file extensions for files considered executable. This will normally include things like ".EXE". This fuction will also find files with the given name ending with any of these extensions. On MS-Windows the only flag that has any meaning is os.F_OK. Any other flags will be ignored. Note: This function does not help us prevent an attacker who can already manipulate the environment's PATH settings from placing malicious code higher in the PATH. It also does happily follows links. :param str name: The name for which to search. :param int flags: Arguments to L{os.access}. :rtype: list :returns: A list of the full paths to files found, in the order in which they were found. """ def _can_allow(p): if not os.access(p, flags): return False if abspath_only and not os.path.abspath(p): log.warn('Ignoring %r (path is not absolute)', p) return False if disallow_symlinks and os.path.islink(p): log.warn('Ignoring %r (path is a symlink)', p) return False return True result = [] exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)) path = os.environ.get('PATH', None) if path is None: return [] for p in os.environ.get('PATH', '').split(os.pathsep): p = os.path.join(p, executable) if _can_allow(p): result.append(p) for e in exts: pext = p + e if _can_allow(pext): result.append(pext) return result def _write_passphrase(stream, passphrase, encoding): """Write the passphrase from memory to the GnuPG process' stdin. :type stream: file, :class:`~io.BytesIO`, or :class:`~io.StringIO` :param stream: The input file descriptor to write the password to. :param str passphrase: The passphrase for the secret key material. :param str encoding: The data encoding expected by GnuPG. Usually, this is ``sys.getfilesystemencoding()``. """ passphrase = '%s\n' % passphrase passphrase = passphrase.encode(encoding) stream.write(passphrase) log.debug("Wrote passphrase on stdin.") class InheritableProperty(object): """Based on the emulation of PyProperty_Type() in Objects/descrobject.c""" def __init__(self, fget=None, fset=None, fdel=None, doc=None): self.fget = fget self.fset = fset self.fdel = fdel self.__doc__ = doc def __get__(self, obj, objtype=None): if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") if self.fget.__name__ == '<lambda>' or not self.fget.__name__: return self.fget(obj) else: return getattr(obj, self.fget.__name__)() def __set__(self, obj, value): if self.fset is None: raise AttributeError("can't set attribute") if self.fset.__name__ == '<lambda>' or not self.fset.__name__: self.fset(obj, value) else: getattr(obj, self.fset.__name__)(value) def __delete__(self, obj): if self.fdel is None: raise AttributeError("can't delete attribute") if self.fdel.__name__ == '<lambda>' or not self.fdel.__name__: self.fdel(obj) else: getattr(obj, self.fdel.__name__)() class Storage(dict): """A dictionary where keys are stored as class attributes. For example, ``obj.foo`` can be used in addition to ``obj['foo']``: >>> o = Storage(a=1) >>> o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> o['a'] 2 >>> del o.a >>> o.a None """ def __getattr__(self, key): try: return self[key] except KeyError as k: return None def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError as k: raise AttributeError(k.args[0]) def __repr__(self): return '<Storage ' + dict.__repr__(self) + '>' def __getstate__(self): return dict(self) def __setstate__(self, value): for (k, v) in value.items(): self[k] = v
[]
[]
[ "USERNAME", "LOGNAME", "PATHEXT", "HOME", "PATH" ]
[]
["USERNAME", "LOGNAME", "PATHEXT", "HOME", "PATH"]
python
5
0
tensorflow/python/keras/distribute/distribute_coordinator_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to distribute coordinator. The module is used only for utils to support legacy TF1 code path involving distribute coordinator, and is not expected to change in any way. This is subject to cleanup once TF1 is no longer supported. TODO(rchao): Remove this module once TF1 is not supported. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import os import threading import time from tensorflow.core.protobuf import cluster_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import monitored_session from tensorflow.python.training import server_lib _worker_context = threading.local() _thread_local = threading.local() def get_current_worker_context(): """Returns the current task context.""" try: return _worker_context.current except AttributeError: return None class _TaskType(object): PS = "ps" WORKER = "worker" CHIEF = "chief" EVALUATOR = "evaluator" CLIENT = "client" def _get_num_workers(cluster_spec): """Gets number of workers including chief.""" if not cluster_spec: return 0 return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len( cluster_spec.as_dict().get(_TaskType.CHIEF, [])) class _WorkerContext(object): """The worker context class. This context object provides configuration information for each task. One context manager with a worker context object will be created per invocation to the `worker_fn` where `get_current_worker_context` can be called to access the worker context object. """ def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer="grpc", worker_barrier=None): """Initialize the worker context object. Args: strategy: a `DistributionStrategy` object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as "worker" or "ps". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional `tf.compat.v1.ConfigProto` object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the `cluster_spec` will be used directly. worker_barrier: optional, the barrier object for worker synchronization. """ self._strategy = strategy self._cluster_spec = cluster_spec self._task_type = task_type self._task_id = task_id self._session_config = session_config self._worker_barrier = worker_barrier self._rpc_layer = rpc_layer self._master_target = self._get_master_target() self._num_workers = _get_num_workers(cluster_spec) self._is_chief_node = self._is_chief() def _debug_message(self): if self._cluster_spec: return "[cluster_spec: %r, task_type: %r, task_id: %r]" % ( self._cluster_spec, self.task_type, self.task_id) else: return "[local]" def __enter__(self): old_context = get_current_worker_context() if old_context: raise ValueError( "You cannot run distribute coordinator in a `worker_fn`.\t" + self._debug_message()) # pylint: disable=protected-access _worker_context.current = self def __exit__(self, unused_exception_type, unused_exception_value, unused_traceback): # pylint: disable=protected-access _worker_context.current = None def _get_master_target(self): """Return the master target for a task.""" # If cluster_spec is None or empty, we use local master. if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR: return "" # If task_type is None, then it is in-graph replicated training. In this # case we use the chief or first worker's master target. if not self._task_type: if _TaskType.CHIEF in self._cluster_spec.jobs: task_type = _TaskType.CHIEF task_id = 0 else: assert _TaskType.WORKER in self._cluster_spec.jobs task_type = _TaskType.WORKER task_id = 0 else: task_type = self._task_type task_id = self._task_id prefix = "" if self._rpc_layer: prefix = self._rpc_layer + "://" return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0] def _is_chief(self): """Return whether the task is the chief worker.""" if (not self._cluster_spec or self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]): return True # If not local and chief not in the cluster_spec, use the first worker as # chief. if (_TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and self._task_id == 0): return True return False def wait_for_other_workers(self): """Waits for other workers to reach the same call to this method. Raises: ValueError: if `worker_barrier` is not passed to the __init__ method. """ if not self._worker_barrier: # TODO(yuefengz): we should throw an error in independent worker mode. return self._worker_barrier.wait() def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200): """Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the `strategy` object when `create_session` is called on it. Args: scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator. """ if config: session_config = copy.deepcopy(config) session_config.MergeFrom(self._session_config) else: session_config = self._session_config if not self._strategy or self._strategy.extended.experimental_should_init: logging.info("Creating chief session creator with config: %r", config) return monitored_session.ChiefSessionCreator( scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path) else: logging.info("Creating worker session creator with config: %r", config) return monitored_session.WorkerSessionCreator( scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs) @property def session_config(self): return copy.deepcopy(self._session_config) @property def has_barrier(self): """Whether the barrier is set or not.""" return self._worker_barrier is not None @property def distributed_mode(self): """Whether it is distributed training or not.""" return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR @property def cluster_spec(self): """Returns a copy of the cluster_spec object.""" return copy.deepcopy(self._cluster_spec) @property def task_type(self): """Returns the role of the corresponding task.""" return self._task_type @property def task_id(self): """Returns the id or index of the corresponding task.""" return self._task_id @property def master_target(self): """Returns the session master for the corresponding task to connect to.""" return self._master_target @property def is_chief(self): """Returns whether the task is a chief node.""" return self._is_chief_node @property def num_workers(self): """Returns number of workers in the cluster, including chief.""" return self._num_workers @property def experimental_should_init(self): """Whether to run init ops.""" return self._strategy.extended.experimental_should_init @property def should_checkpoint(self): """Whether to save checkpoint.""" return self._strategy.extended.should_checkpoint @property def should_save_summary(self): """Whether to save summaries.""" return self._strategy.extended.should_save_summary def _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer="", worker_barrier=None, coord=None): """Runs a single worker by calling `worker_fn` under context.""" session_config = copy.deepcopy(session_config) strategy = copy.deepcopy(strategy) # If there is an EVALUATOR task, we run single-machine eval on that task. if task_type == _TaskType.EVALUATOR: # It is possible to not have a strategy object for EVALUATOR task. if strategy: strategy.configure(session_config) else: assert strategy strategy.configure(session_config, cluster_spec, task_type, task_id) context = _WorkerContext( strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier) with context: if coord: with coord.stop_on_exception(): return worker_fn(strategy) else: return worker_fn(strategy) def _split_cluster_for_evaluator(cluster_spec, task_type): """Split the cluster for evaluator since it needn't talk to other tasks.""" # Splitting the cluster is important to prevent the evaluator from talking to # other tasks in the cluster. Since we allow evaluator not to use # distribution strategies and as a result ops in the evaluator task may have # unspecified devices. Those ops may end up on other tasks if we don't split # the cluster. # Note: if you bypass distribute coordinator and bring the cluster yourself, # you can equivalently set device filters to split clusters. This is already # done by distribution strategy's `update_config_proto` method. new_cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type == _TaskType.EVALUATOR: assert _TaskType.EVALUATOR in new_cluster_spec new_cluster_spec = { _TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR] } else: new_cluster_spec.pop(_TaskType.EVALUATOR, None) return normalize_cluster_spec(new_cluster_spec) def _run_std_server(cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer=None, environment=None): """Runs a standard server.""" # Check if the Server is already running. If so, assert that no configuration # options have changed, and return the existing Server. This allows us to # call `run_distribute_coordinator` multiple times. if getattr(_thread_local, "server", None) is not None: assert _thread_local.cluster_spec == cluster_spec assert _thread_local.task_type == task_type assert _thread_local.task_id == task_id assert _thread_local.session_config_str == repr(session_config) assert _thread_local.rpc_layer == rpc_layer assert _thread_local.environment == environment return _thread_local.server else: # This method is not thread-safe. _thread_local.server_started = True _thread_local.cluster_spec = cluster_spec _thread_local.task_type = task_type _thread_local.task_id = task_id _thread_local.session_config_str = repr(session_config) _thread_local.rpc_layer = rpc_layer _thread_local.environment = environment assert cluster_spec target = cluster_spec.task_address(task_type, task_id) if rpc_layer: target = rpc_layer + "://" + target class _FakeServer(object): """A fake server that runs a master session.""" def start(self): # A tensorflow server starts when a remote session is created. logging.info( "Creating a remote session to start a TensorFlow server, " "target = %r, session_config=%r", target, session_config) session.Session(target=target, config=session_config) def join(self): while True: time.sleep(5) if environment == "google": server = _FakeServer() else: if session_config: logging.info( "Starting standard TensorFlow server, target = %r, session_config= " "%r", target, session_config) else: logging.info("Starting standard TensorFlow server, target = %r", target) cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type) server = server_lib.Server( cluster_spec, job_name=task_type, task_index=task_id, config=session_config, protocol=rpc_layer) server.start() _thread_local.server = server return server def _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id): # pylint: disable=g-doc-args """Call strategy's `configure` to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session. """ if task_type == _TaskType.EVALUATOR: if eval_strategy: eval_strategy.configure(session_config=session_config) else: # The strategy may be shared in standalone client mode. strategy = copy.deepcopy(strategy) strategy.configure( session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id) # Remove the device filters specific to the strategy, so that the # TensorFlow server brought up with one strategy can be used by other # strategies. The device filters can be set in the client side as well. del session_config.device_filters[:] # TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode. # TODO(yuefengz): we may need a smart way to figure out whether the current task # is the special task when we support cluster_spec propagation. def run_distribute_coordinator(worker_fn, strategy, eval_fn=None, eval_strategy=None, cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer="grpc"): """Runs the coordinator for distributed TensorFlow. This function runs a split coordinator for distributed TensorFlow in its default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec` specifying server addresses and their roles in a cluster, this coordinator will figure out how to set them up, give the underlying function the right targets for master sessions via a scope object and coordinate their training. The cluster consisting of standard servers needs to be brought up either with the standard server binary or with a binary running distribute coordinator with `task_type` set to non-client type which will then turn into standard servers. In addition to be the distribute coordinator, this is also the source of configurations for each job in the distributed training. As there are multiple ways to configure a distributed TensorFlow cluster, its context object provides these configurations so that users or higher-level APIs don't have to figure out the configuration for each job by themselves. In the between-graph replicated training, this coordinator will create multiple threads and each calls the `worker_fn` which is supposed to create its own graph and connect to one worker master given by its context object. In the in-graph replicated training, it has only one thread calling this `worker_fn`. Another mode is the INDEPENDENT_WORKER mode where each server runs a distribute coordinator which will start a standard server and optionally runs `worker_fn` depending whether it is between-graph training or in-graph replicated training. The `strategy` object is expected to be a DistributionStrategy object which has implemented methods needed by distributed coordinator such as `configure(session_config, cluster_spec, task_type, task_id)` which configures the strategy object for a specific task and `experimental_should_init` property which instructs the distribute coordinator whether to run init ops for a task. The distribute coordinator will make a copy of the `strategy` object, call its `configure` method and pass it to `worker_fn` as an argument. The `worker_fn` defines the training logic and is called under its own worker context which can be accessed to via `get_current_worker_context`. A worker context provides access to configurations for each task, e.g. the task_type, task_id, master target and so on. Since `worker_fn` will be called in a thread and possibly multiple times, caller should be careful when it accesses global data. For example, it is unsafe to define flags in a `worker_fn` or to define different environment variables for different `worker_fn`s. The `worker_fn` for the between-graph replication is defined as if there is only one worker corresponding to the `worker_fn` and possibly ps jobs. For example, when training with parameter servers, it assigns variables to parameter servers and all other operations to that worker. In the in-graph replication case, the `worker_fn` has to define operations for all worker jobs. Using a distribution strategy can simplify the `worker_fn` by not having to worry about the replication and device assignment of variables and operations. This method is intended to be invoked by high-level APIs so that users don't have to explicitly call it to run this coordinator. For those who don't use high-level APIs, to change a program to use this coordinator, wrap everything in a the program after global data definitions such as commandline flag definition into the `worker_fn` and get task-specific configurations from the worker context. The `cluster_spec` can be either passed by the argument or parsed from the "TF_CONFIG" environment variable. Example of a TF_CONFIG: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster}) ``` If `cluster_spec` is not given in any format, it becomes local training and this coordinator will connect to a local session. For evaluation, if "evaluator" exists in the cluster_spec, a separate thread will be created to call `eval_fn` with its `task_type` set to "evaluator". If `eval_fn` is not defined, fall back to `worker_fn`. This implies that evaluation will be done on a single machine if there is an "evaluator" task. If "evaluator" doesn't exist in the cluster_spec, it entirely depends on the `worker_fn` for how to do evaluation. Args: worker_fn: the function to be called. The function should accept a `strategy` object and will be given access to a context object via a context manager scope. strategy: a DistributionStrategy object specifying whether it should run between-graph replicated training or not, whether to run init ops, etc. This object will also be configured given `session_config`, `cluster_spec`, `task_type` and `task_id`. eval_fn: optional function for "evaluator" task. If `eval_fn` is not passed in but a "evaluator" task is found in the `cluster_spec`, the `worker_fn` will be used for this task. eval_strategy: optional DistributionStrategy object for "evaluator" task. cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles in a cluster. If not set or empty, fall back to local training. task_type: the current task type, optional if this is a client. task_id: the current task id, optional if this is a client. session_config: an optional `tf.compat.v1.ConfigProto` object which will be passed to `strategy`'s `configure` method and used to create a session. rpc_layer: optional string, the protocol for RPC, e.g. "grpc". Raises: ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or a ClusterSpec. Returns: In the client job, return the value returned by `worker_fn` if it is in-graph replication or INDEPENDENT_WORKER mode; return None otherwise. """ tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) rpc_layer = tf_config.get("rpc_layer", rpc_layer) environment = tf_config.get("environment", None) if not cluster_spec: cluster_spec = tf_config.get("cluster", {}) task_env = tf_config.get("task", {}) if task_env: task_type = task_env.get("type", task_type) task_id = int(task_env.get("index", task_id)) if cluster_spec: # TODO(yuefengz): validate cluster_spec. cluster_spec = normalize_cluster_spec(cluster_spec) elif hasattr(strategy.extended, "_cluster_resolver"): cluster_resolver = strategy.extended._cluster_resolver # pylint: disable=protected-access task_type = cluster_resolver.task_type task_id = cluster_resolver.task_id rpc_layer = cluster_resolver.rpc_layer or rpc_layer environment = cluster_resolver.environment cluster_spec = cluster_resolver.cluster_spec() # Setting the session config is necessary for some strategies such as # CollectiveAllReduceStrategy. session_config = session_config or config_pb2.ConfigProto( allow_soft_placement=True) if cluster_spec: logging.info( "Running Distribute Coordinator with cluster_spec = %r, " "task_type = %r, task_id = %r, environment = %r, rpc_layer = %r", cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer) if not cluster_spec: # `mode` is ignored in the local case. logging.info("Running local Distribute Coordinator.") _run_single_worker(worker_fn, strategy, None, None, None, session_config, rpc_layer) if eval_fn: _run_single_worker(eval_fn, eval_strategy, None, None, None, session_config, rpc_layer) else: logging.warning("Skipped evaluation since `eval_fn` is not passed in.") else: if not eval_fn: logging.warning("`eval_fn` is not passed in. The `worker_fn` will be " "used if an \"evaluator\" task exists in the cluster.") eval_fn = eval_fn or worker_fn if not eval_strategy: logging.warning("`eval_strategy` is not passed in. No distribution " "strategy will be used for evaluation.") # Every one starts a standard server, get session config from `configure` # method. _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id) if (task_type != _TaskType.EVALUATOR and not getattr(strategy.extended, "_std_server_started", False)): # Right now, with eager mode, context is configured with a std server at # the very beginning while with graph mode the std server is started when # distribute coordinator is called. We should consolidate these two paths. server = _run_std_server( cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, session_config=session_config, rpc_layer=rpc_layer, environment=environment) if task_type in [_TaskType.CHIEF, _TaskType.WORKER]: if strategy.extended.experimental_between_graph: # All jobs run `worker_fn` if between-graph. return _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer) else: # Only one node runs `worker_fn` if in-graph. context = _WorkerContext(strategy, cluster_spec, task_type, task_id) if context.is_chief: return _run_single_worker(worker_fn, strategy, cluster_spec, None, None, session_config, rpc_layer) else: server.join() elif task_type == _TaskType.EVALUATOR: return _run_single_worker(eval_fn, eval_strategy, cluster_spec, task_type, task_id, session_config, rpc_layer) else: if task_type != _TaskType.PS: raise ValueError("Unexpected task_type: %r" % task_type) server.join() def normalize_cluster_spec(cluster_spec): """Makes `cluster_spec` into a `ClusterSpec` object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a `ClusterSpec` object. Raises: ValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a `ClusterDef`. """ if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)): return server_lib.ClusterSpec(cluster_spec) elif not isinstance(cluster_spec, server_lib.ClusterSpec): raise ValueError( "`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a " "`tf.train.ClusterDef` object") return cluster_spec
[]
[]
[ "TF_CONFIG" ]
[]
["TF_CONFIG"]
python
1
0
libcontainer/factory_linux.go
// +build linux package libcontainer import ( "encoding/json" "fmt" "os" "path/filepath" "regexp" "runtime/debug" "strconv" securejoin "github.com/cyphar/filepath-securejoin" "github.com/moby/sys/mountinfo" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs/validate" "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/utils" "github.com/pkg/errors" "golang.org/x/sys/unix" ) const ( stateFilename = "state.json" execFifoFilename = "exec.fifo" ) var idRegex = regexp.MustCompile(`^[\w+-\.]+$`) // InitArgs returns an options func to configure a LinuxFactory with the // provided init binary path and arguments. func InitArgs(args ...string) func(*LinuxFactory) error { return func(l *LinuxFactory) (err error) { if len(args) > 0 { // Resolve relative paths to ensure that its available // after directory changes. if args[0], err = filepath.Abs(args[0]); err != nil { return newGenericError(err, ConfigInvalid) } } l.InitArgs = args return nil } } func getUnifiedPath(paths map[string]string) string { path := "" for k, v := range paths { if path == "" { path = v } else if v != path { panic(errors.Errorf("expected %q path to be unified path %q, got %q", k, path, v)) } } // can be empty if path != "" { if filepath.Clean(path) != path || !filepath.IsAbs(path) { panic(errors.Errorf("invalid dir path %q", path)) } } return path } func systemdCgroupV2(l *LinuxFactory, rootless bool) error { l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { return systemd.NewUnifiedManager(config, getUnifiedPath(paths), rootless) } return nil } // SystemdCgroups is an options func to configure a LinuxFactory to return // containers that use systemd to create and manage cgroups. func SystemdCgroups(l *LinuxFactory) error { if !systemd.IsRunningSystemd() { return fmt.Errorf("systemd not running on this host, can't use systemd as cgroups manager") } if cgroups.IsCgroup2UnifiedMode() { return systemdCgroupV2(l, false) } l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { return systemd.NewLegacyManager(config, paths) } return nil } // RootlessSystemdCgroups is rootless version of SystemdCgroups. func RootlessSystemdCgroups(l *LinuxFactory) error { if !systemd.IsRunningSystemd() { return fmt.Errorf("systemd not running on this host, can't use systemd as cgroups manager") } if !cgroups.IsCgroup2UnifiedMode() { return fmt.Errorf("cgroup v2 not enabled on this host, can't use systemd (rootless) as cgroups manager") } return systemdCgroupV2(l, true) } func cgroupfs2(l *LinuxFactory, rootless bool) error { l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { m, err := fs2.NewManager(config, getUnifiedPath(paths), rootless) if err != nil { panic(err) } return m } return nil } func cgroupfs(l *LinuxFactory, rootless bool) error { if cgroups.IsCgroup2UnifiedMode() { return cgroupfs2(l, rootless) } l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { return fs.NewManager(config, paths, rootless) } return nil } // Cgroupfs is an options func to configure a LinuxFactory to return containers // that use the native cgroups filesystem implementation to create and manage // cgroups. func Cgroupfs(l *LinuxFactory) error { return cgroupfs(l, false) } // RootlessCgroupfs is an options func to configure a LinuxFactory to return // containers that use the native cgroups filesystem implementation to create // and manage cgroups. The difference between RootlessCgroupfs and Cgroupfs is // that RootlessCgroupfs can transparently handle permission errors that occur // during rootless container (including euid=0 in userns) setup (while still allowing cgroup usage if // they've been set up properly). func RootlessCgroupfs(l *LinuxFactory) error { return cgroupfs(l, true) } // IntelRdtfs is an options func to configure a LinuxFactory to return // containers that use the Intel RDT "resource control" filesystem to // create and manage Intel RDT resources (e.g., L3 cache, memory bandwidth). func IntelRdtFs(l *LinuxFactory) error { l.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager { return &intelrdt.IntelRdtManager{ Config: config, Id: id, Path: path, } } return nil } // TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs. func TmpfsRoot(l *LinuxFactory) error { mounted, err := mountinfo.Mounted(l.Root) if err != nil { return err } if !mounted { if err := unix.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil { return err } } return nil } // CriuPath returns an option func to configure a LinuxFactory with the // provided criupath func CriuPath(criupath string) func(*LinuxFactory) error { return func(l *LinuxFactory) error { l.CriuPath = criupath return nil } } // New returns a linux based container factory based in the root directory and // configures the factory with the provided option funcs. func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { if root != "" { if err := os.MkdirAll(root, 0700); err != nil { return nil, newGenericError(err, SystemError) } } l := &LinuxFactory{ Root: root, InitPath: "/proc/self/exe", InitArgs: []string{os.Args[0], "init"}, Validator: validate.New(), CriuPath: "criu", } Cgroupfs(l) for _, opt := range options { if opt == nil { continue } if err := opt(l); err != nil { return nil, err } } return l, nil } // LinuxFactory implements the default factory interface for linux based systems. type LinuxFactory struct { // Root directory for the factory to store state. Root string // InitPath is the path for calling the init responsibilities for spawning // a container. InitPath string // InitArgs are arguments for calling the init responsibilities for spawning // a container. InitArgs []string // CriuPath is the path to the criu binary used for checkpoint and restore of // containers. CriuPath string // New{u,g}uidmapPath is the path to the binaries used for mapping with // rootless containers. NewuidmapPath string NewgidmapPath string // Validator provides validation to container configurations. Validator validate.Validator // NewCgroupsManager returns an initialized cgroups manager for a single container. NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager // NewIntelRdtManager returns an initialized Intel RDT manager for a single container. NewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager } func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { if l.Root == "" { return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) } if err := l.validateID(id); err != nil { return nil, err } if err := l.Validator.Validate(config); err != nil { return nil, newGenericError(err, ConfigInvalid) } containerRoot, err := securejoin.SecureJoin(l.Root, id) if err != nil { return nil, err } if _, err := os.Stat(containerRoot); err == nil { return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse) } else if !os.IsNotExist(err) { return nil, newGenericError(err, SystemError) } if err := os.MkdirAll(containerRoot, 0711); err != nil { return nil, newGenericError(err, SystemError) } if err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil { return nil, newGenericError(err, SystemError) } c := &linuxContainer{ id: id, root: containerRoot, config: config, initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, newuidmapPath: l.NewuidmapPath, newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), } if intelrdt.IsCatEnabled() || intelrdt.IsMbaEnabled() { c.intelRdtManager = l.NewIntelRdtManager(config, id, "") } c.state = &stoppedState{c: c} return c, nil } func (l *LinuxFactory) Load(id string) (Container, error) { if l.Root == "" { return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) } //when load, we need to check id is valid or not. if err := l.validateID(id); err != nil { return nil, err } containerRoot, err := securejoin.SecureJoin(l.Root, id) if err != nil { return nil, err } state, err := l.loadState(containerRoot, id) if err != nil { return nil, err } r := &nonChildProcess{ processPid: state.InitProcessPid, processStartTime: state.InitProcessStartTime, fds: state.ExternalDescriptors, } c := &linuxContainer{ initProcess: r, initProcessStartTime: state.InitProcessStartTime, id: id, config: &state.Config, initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, newuidmapPath: l.NewuidmapPath, newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), root: containerRoot, created: state.Created, } c.state = &loadedState{c: c} if err := c.refreshState(); err != nil { return nil, err } if intelrdt.IsCatEnabled() || intelrdt.IsMbaEnabled() { c.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath) } return c, nil } func (l *LinuxFactory) Type() string { return "libcontainer" } // StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state // This is a low level implementation detail of the reexec and should not be consumed externally func (l *LinuxFactory) StartInitialization() (err error) { // Get the INITPIPE. envInitPipe := os.Getenv("_LIBCONTAINER_INITPIPE") pipefd, err := strconv.Atoi(envInitPipe) if err != nil { return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err) } pipe := os.NewFile(uintptr(pipefd), "pipe") defer pipe.Close() // Only init processes have FIFOFD. fifofd := -1 envInitType := os.Getenv("_LIBCONTAINER_INITTYPE") it := initType(envInitType) if it == initStandard { envFifoFd := os.Getenv("_LIBCONTAINER_FIFOFD") if fifofd, err = strconv.Atoi(envFifoFd); err != nil { return fmt.Errorf("unable to convert _LIBCONTAINER_FIFOFD=%s to int: %s", envFifoFd, err) } } var consoleSocket *os.File if envConsole := os.Getenv("_LIBCONTAINER_CONSOLE"); envConsole != "" { console, err := strconv.Atoi(envConsole) if err != nil { return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err) } consoleSocket = os.NewFile(uintptr(console), "console-socket") defer consoleSocket.Close() } // clear the current process's environment to clean any libcontainer // specific env vars. os.Clearenv() defer func() { // We have an error during the initialization of the container's init, // send it back to the parent process in the form of an initError. if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil { fmt.Fprintln(os.Stderr, err) return } if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil { fmt.Fprintln(os.Stderr, err) return } }() defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack())) } }() i, err := newContainerInit(it, pipe, consoleSocket, fifofd) if err != nil { return err } // If Init succeeds, syscall.Exec will not return, hence none of the defers will be called. return i.Init() } func (l *LinuxFactory) loadState(root, id string) (*State, error) { stateFilePath, err := securejoin.SecureJoin(root, stateFilename) if err != nil { return nil, err } f, err := os.Open(stateFilePath) if err != nil { if os.IsNotExist(err) { return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists) } return nil, newGenericError(err, SystemError) } defer f.Close() var state *State if err := json.NewDecoder(f).Decode(&state); err != nil { return nil, newGenericError(err, SystemError) } return state, nil } func (l *LinuxFactory) validateID(id string) error { if !idRegex.MatchString(id) || string(os.PathSeparator)+id != utils.CleanPath(string(os.PathSeparator)+id) { return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) } return nil } // NewuidmapPath returns an option func to configure a LinuxFactory with the // provided .. func NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error { return func(l *LinuxFactory) error { l.NewuidmapPath = newuidmapPath return nil } } // NewgidmapPath returns an option func to configure a LinuxFactory with the // provided .. func NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error { return func(l *LinuxFactory) error { l.NewgidmapPath = newgidmapPath return nil } }
[ "\"_LIBCONTAINER_INITPIPE\"", "\"_LIBCONTAINER_INITTYPE\"", "\"_LIBCONTAINER_FIFOFD\"", "\"_LIBCONTAINER_CONSOLE\"" ]
[]
[ "_LIBCONTAINER_FIFOFD", "_LIBCONTAINER_INITTYPE", "_LIBCONTAINER_CONSOLE", "_LIBCONTAINER_INITPIPE" ]
[]
["_LIBCONTAINER_FIFOFD", "_LIBCONTAINER_INITTYPE", "_LIBCONTAINER_CONSOLE", "_LIBCONTAINER_INITPIPE"]
go
4
0
pkg/proc/proc_test.go
package proc_test import ( "bytes" "flag" "fmt" "go/ast" "go/constant" "go/token" "io/ioutil" "net" "net/http" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/go-delve/delve/pkg/dwarf/frame" "github.com/go-delve/delve/pkg/goversion" "github.com/go-delve/delve/pkg/logflags" "github.com/go-delve/delve/pkg/proc" "github.com/go-delve/delve/pkg/proc/gdbserial" "github.com/go-delve/delve/pkg/proc/native" protest "github.com/go-delve/delve/pkg/proc/test" ) var normalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1, 0} var testBackend, buildMode string func init() { runtime.GOMAXPROCS(4) os.Setenv("GOMAXPROCS", "4") } func TestMain(m *testing.M) { flag.StringVar(&testBackend, "backend", "", "selects backend") flag.StringVar(&buildMode, "test-buildmode", "", "selects build mode") var logConf string flag.StringVar(&logConf, "log", "", "configures logging") flag.Parse() protest.DefaultTestBackend(&testBackend) if buildMode != "" && buildMode != "pie" { fmt.Fprintf(os.Stderr, "unknown build mode %q", buildMode) os.Exit(1) } logflags.Setup(logConf != "", logConf) os.Exit(protest.RunTestsWithFixtures(m)) } func withTestProcess(name string, t testing.TB, fn func(p proc.Process, fixture protest.Fixture)) { withTestProcessArgs(name, t, ".", []string{}, 0, fn) } func withTestProcessArgs(name string, t testing.TB, wd string, args []string, buildFlags protest.BuildFlags, fn func(p proc.Process, fixture protest.Fixture)) { if buildMode == "pie" { buildFlags |= protest.BuildModePIE } fixture := protest.BuildFixture(name, buildFlags) var p proc.Process var err error var tracedir string switch testBackend { case "native": p, err = native.Launch(append([]string{fixture.Path}, args...), wd, false, []string{}) case "lldb": p, err = gdbserial.LLDBLaunch(append([]string{fixture.Path}, args...), wd, false, []string{}) case "rr": protest.MustHaveRecordingAllowed(t) t.Log("recording") p, tracedir, err = gdbserial.RecordAndReplay(append([]string{fixture.Path}, args...), wd, true, []string{}) t.Logf("replaying %q", tracedir) default: t.Fatal("unknown backend") } if err != nil { t.Fatal("Launch():", err) } defer func() { p.Detach(true) if tracedir != "" { protest.SafeRemoveAll(tracedir) } }() fn(p, fixture) } func getRegisters(p proc.Process, t *testing.T) proc.Registers { regs, err := p.CurrentThread().Registers(false) if err != nil { t.Fatal("Registers():", err) } return regs } func dataAtAddr(thread proc.MemoryReadWriter, addr uint64) ([]byte, error) { data := make([]byte, 1) _, err := thread.ReadMemory(data, uintptr(addr)) return data, err } func assertNoError(err error, t testing.TB, s string) { if err != nil { _, file, line, _ := runtime.Caller(1) fname := filepath.Base(file) t.Fatalf("failed assertion at %s:%d: %s - %s\n", fname, line, s, err) } } func currentPC(p proc.Process, t *testing.T) uint64 { regs, err := p.CurrentThread().Registers(false) if err != nil { t.Fatal(err) } return regs.PC() } func currentLineNumber(p proc.Process, t *testing.T) (string, int) { pc := currentPC(p, t) f, l, _ := p.BinInfo().PCToLine(pc) return f, l } func assertLineNumber(p proc.Process, t *testing.T, lineno int, descr string) (string, int) { f, l := currentLineNumber(p, t) if l != lineno { _, callerFile, callerLine, _ := runtime.Caller(1) t.Fatalf("%s expected line :%d got %s:%d\n\tat %s:%d", descr, lineno, f, l, callerFile, callerLine) } return f, l } func TestExit(t *testing.T) { protest.AllowRecording(t) withTestProcess("continuetestprog", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) pe, ok := err.(proc.ErrProcessExited) if !ok { t.Fatalf("Continue() returned unexpected error type %s", err) } if pe.Status != 0 { t.Errorf("Unexpected error status: %d", pe.Status) } if pe.Pid != p.Pid() { t.Errorf("Unexpected process id: %d", pe.Pid) } }) } func TestExitAfterContinue(t *testing.T) { protest.AllowRecording(t) withTestProcess("continuetestprog", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "First Continue()") err = proc.Continue(p) pe, ok := err.(proc.ErrProcessExited) if !ok { t.Fatalf("Continue() returned unexpected error type %s", pe) } if pe.Status != 0 { t.Errorf("Unexpected error status: %d", pe.Status) } if pe.Pid != p.Pid() { t.Errorf("Unexpected process id: %d", pe.Pid) } }) } func setFunctionBreakpoint(p proc.Process, fname string) (*proc.Breakpoint, error) { addr, err := proc.FindFunctionLocation(p, fname, true, 0) if err != nil { return nil, err } return p.SetBreakpoint(addr, proc.UserBreakpoint, nil) } func setFileBreakpoint(p proc.Process, t *testing.T, fixture protest.Fixture, lineno int) *proc.Breakpoint { addr, err := proc.FindFileLocation(p, fixture.Source, lineno) if err != nil { t.Fatalf("FindFileLocation: %v", err) } bp, err := p.SetBreakpoint(addr, proc.UserBreakpoint, nil) if err != nil { t.Fatalf("SetBreakpoint: %v", err) } return bp } func TestHalt(t *testing.T) { stopChan := make(chan interface{}, 1) withTestProcess("loopprog", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.loop") assertNoError(err, t, "SetBreakpoint") assertNoError(proc.Continue(p), t, "Continue") if p, ok := p.(*native.Process); ok { for _, th := range p.ThreadList() { _, err := th.Registers(false) assertNoError(err, t, "Registers") } } resumeChan := make(chan struct{}, 1) go func() { <-resumeChan time.Sleep(100 * time.Millisecond) stopChan <- p.RequestManualStop() }() p.ResumeNotify(resumeChan) assertNoError(proc.Continue(p), t, "Continue") retVal := <-stopChan if err, ok := retVal.(error); ok && err != nil { t.Fatal() } // Loop through threads and make sure they are all // actually stopped, err will not be nil if the process // is still running. if p, ok := p.(*native.Process); ok { for _, th := range p.ThreadList() { if th, ok := th.(*native.Thread); ok { if !th.Stopped() { t.Fatal("expected thread to be stopped, but was not") } } _, err := th.Registers(false) assertNoError(err, t, "Registers") } } }) } func TestStep(t *testing.T) { protest.AllowRecording(t) withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { helloworldaddr, err := proc.FindFunctionLocation(p, "main.helloworld", false, 0) assertNoError(err, t, "FindFunctionLocation") _, err = p.SetBreakpoint(helloworldaddr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") regs := getRegisters(p, t) rip := regs.PC() err = p.CurrentThread().StepInstruction() assertNoError(err, t, "Step()") regs = getRegisters(p, t) if rip >= regs.PC() { t.Errorf("Expected %#v to be greater than %#v", regs.PC(), rip) } }) } func TestBreakpoint(t *testing.T) { protest.AllowRecording(t) withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { helloworldaddr, err := proc.FindFunctionLocation(p, "main.helloworld", false, 0) assertNoError(err, t, "FindFunctionLocation") bp, err := p.SetBreakpoint(helloworldaddr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers") pc := regs.PC() if bp.TotalHitCount != 1 { t.Fatalf("Breakpoint should be hit once, got %d\n", bp.TotalHitCount) } if pc-1 != bp.Addr && pc != bp.Addr { f, l, _ := p.BinInfo().PCToLine(pc) t.Fatalf("Break not respected:\nPC:%#v %s:%d\nFN:%#v \n", pc, f, l, bp.Addr) } }) } func TestBreakpointInSeparateGoRoutine(t *testing.T) { protest.AllowRecording(t) withTestProcess("testthreads", t, func(p proc.Process, fixture protest.Fixture) { fnentry, err := proc.FindFunctionLocation(p, "main.anotherthread", false, 0) assertNoError(err, t, "FindFunctionLocation") _, err = p.SetBreakpoint(fnentry, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint") assertNoError(proc.Continue(p), t, "Continue") regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers") pc := regs.PC() f, l, _ := p.BinInfo().PCToLine(pc) if f != "testthreads.go" && l != 8 { t.Fatal("Program did not hit breakpoint") } }) } func TestBreakpointWithNonExistantFunction(t *testing.T) { withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { _, err := p.SetBreakpoint(0, proc.UserBreakpoint, nil) if err == nil { t.Fatal("Should not be able to break at non existant function") } }) } func TestClearBreakpointBreakpoint(t *testing.T) { withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { fnentry, err := proc.FindFunctionLocation(p, "main.sleepytime", false, 0) assertNoError(err, t, "FindFunctionLocation") bp, err := p.SetBreakpoint(fnentry, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") bp, err = p.ClearBreakpoint(fnentry) assertNoError(err, t, "ClearBreakpoint()") data, err := dataAtAddr(p.CurrentThread(), bp.Addr) assertNoError(err, t, "dataAtAddr") int3 := []byte{0xcc} if bytes.Equal(data, int3) { t.Fatalf("Breakpoint was not cleared data: %#v, int3: %#v", data, int3) } if countBreakpoints(p) != 0 { t.Fatal("Breakpoint not removed internally") } }) } type nextTest struct { begin, end int } func countBreakpoints(p proc.Process) int { bpcount := 0 for _, bp := range p.Breakpoints().M { if bp.ID >= 0 { bpcount++ } } return bpcount } type contFunc int const ( contContinue contFunc = iota contNext contStep contStepout ) type seqTest struct { cf contFunc pos int } func testseq(program string, contFunc contFunc, testcases []nextTest, initialLocation string, t *testing.T) { seqTestcases := make([]seqTest, len(testcases)+1) seqTestcases[0] = seqTest{contContinue, testcases[0].begin} for i := range testcases { if i > 0 { if testcases[i-1].end != testcases[i].begin { panic(fmt.Errorf("begin/end mismatch at index %d", i)) } } seqTestcases[i+1] = seqTest{contFunc, testcases[i].end} } testseq2(t, program, initialLocation, seqTestcases) } const traceTestseq2 = false func testseq2(t *testing.T, program string, initialLocation string, testcases []seqTest) { testseq2Args(".", []string{}, 0, t, program, initialLocation, testcases) } func testseq2Args(wd string, args []string, buildFlags protest.BuildFlags, t *testing.T, program string, initialLocation string, testcases []seqTest) { protest.AllowRecording(t) withTestProcessArgs(program, t, wd, args, buildFlags, func(p proc.Process, fixture protest.Fixture) { var bp *proc.Breakpoint var err error if initialLocation != "" { bp, err = setFunctionBreakpoint(p, initialLocation) } else if testcases[0].cf == contContinue { var pc uint64 pc, err = proc.FindFileLocation(p, fixture.Source, testcases[0].pos) assertNoError(err, t, "FindFileLocation()") bp, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) } else { panic("testseq2 can not set initial breakpoint") } if traceTestseq2 { t.Logf("initial breakpoint %v", bp) } assertNoError(err, t, "SetBreakpoint()") regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers") f, ln := currentLineNumber(p, t) for i, tc := range testcases { switch tc.cf { case contNext: if traceTestseq2 { t.Log("next") } assertNoError(proc.Next(p), t, "Next() returned an error") case contStep: if traceTestseq2 { t.Log("step") } assertNoError(proc.Step(p), t, "Step() returned an error") case contStepout: if traceTestseq2 { t.Log("stepout") } assertNoError(proc.StepOut(p), t, "StepOut() returned an error") case contContinue: if traceTestseq2 { t.Log("continue") } assertNoError(proc.Continue(p), t, "Continue() returned an error") if i == 0 { if traceTestseq2 { t.Log("clearing initial breakpoint") } _, err := p.ClearBreakpoint(bp.Addr) assertNoError(err, t, "ClearBreakpoint() returned an error") } } f, ln = currentLineNumber(p, t) regs, _ = p.CurrentThread().Registers(false) pc := regs.PC() if traceTestseq2 { t.Logf("at %#x %s:%d", pc, f, ln) } if ln != tc.pos { t.Fatalf("Program did not continue to correct next location expected %d was %s:%d (%#x) (testcase %d)", tc.pos, filepath.Base(f), ln, pc, i) } } if countBreakpoints(p) != 0 { t.Fatal("Not all breakpoints were cleaned up", len(p.Breakpoints().M)) } }) } func TestNextGeneral(t *testing.T) { var testcases []nextTest ver, _ := goversion.Parse(runtime.Version()) if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 7, -1, 0, 0, ""}) { testcases = []nextTest{ {17, 19}, {19, 20}, {20, 23}, {23, 24}, {24, 26}, {26, 31}, {31, 23}, {23, 24}, {24, 26}, {26, 31}, {31, 23}, {23, 24}, {24, 26}, {26, 27}, {27, 28}, {28, 34}, } } else { testcases = []nextTest{ {17, 19}, {19, 20}, {20, 23}, {23, 24}, {24, 26}, {26, 31}, {31, 23}, {23, 24}, {24, 26}, {26, 31}, {31, 23}, {23, 24}, {24, 26}, {26, 27}, {27, 34}, } } testseq("testnextprog", contNext, testcases, "main.testnext", t) } func TestNextConcurrent(t *testing.T) { testcases := []nextTest{ {8, 9}, {9, 10}, {10, 11}, } protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "SetBreakpoint") assertNoError(proc.Continue(p), t, "Continue") f, ln := currentLineNumber(p, t) initV := evalVariable(p, t, "n") initVval, _ := constant.Int64Val(initV.Value) _, err = p.ClearBreakpoint(bp.Addr) assertNoError(err, t, "ClearBreakpoint()") for _, tc := range testcases { g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG()") if p.SelectedGoroutine().ID != g.ID { t.Fatalf("SelectedGoroutine not CurrentThread's goroutine: %d %d", g.ID, p.SelectedGoroutine().ID) } if ln != tc.begin { t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln) } assertNoError(proc.Next(p), t, "Next() returned an error") f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to the expected location") v := evalVariable(p, t, "n") vval, _ := constant.Int64Val(v.Value) if vval != initVval { t.Fatal("Did not end up on same goroutine") } } }) } func TestNextConcurrentVariant2(t *testing.T) { // Just like TestNextConcurrent but instead of removing the initial breakpoint we check that when it happens is for other goroutines testcases := []nextTest{ {8, 9}, {9, 10}, {10, 11}, } protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "SetBreakpoint") assertNoError(proc.Continue(p), t, "Continue") f, ln := currentLineNumber(p, t) initV := evalVariable(p, t, "n") initVval, _ := constant.Int64Val(initV.Value) for _, tc := range testcases { t.Logf("test case %v", tc) g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG()") if p.SelectedGoroutine().ID != g.ID { t.Fatalf("SelectedGoroutine not CurrentThread's goroutine: %d %d", g.ID, p.SelectedGoroutine().ID) } if ln != tc.begin { t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln) } assertNoError(proc.Next(p), t, "Next() returned an error") var vval int64 for { v := evalVariable(p, t, "n") for _, thread := range p.ThreadList() { proc.GetG(thread) } vval, _ = constant.Int64Val(v.Value) if bpstate := p.CurrentThread().Breakpoint(); bpstate.Breakpoint == nil { if vval != initVval { t.Fatal("Did not end up on same goroutine") } break } else { if vval == initVval { t.Fatal("Initial breakpoint triggered twice for the same goroutine") } assertNoError(proc.Continue(p), t, "Continue 2") } } f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to the expected location") } }) } func TestNextFunctionReturn(t *testing.T) { testcases := []nextTest{ {13, 14}, {14, 15}, {15, 35}, } protest.AllowRecording(t) testseq("testnextprog", contNext, testcases, "main.helloworld", t) } func TestNextFunctionReturnDefer(t *testing.T) { var testcases []nextTest ver, _ := goversion.Parse(runtime.Version()) if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { testcases = []nextTest{ {5, 6}, {6, 9}, {9, 10}, } } else { testcases = []nextTest{ {5, 8}, {8, 9}, {9, 10}, } } protest.AllowRecording(t) testseq("testnextdefer", contNext, testcases, "main.main", t) } func TestNextNetHTTP(t *testing.T) { testcases := []nextTest{ {11, 12}, {12, 13}, } withTestProcess("testnextnethttp", t, func(p proc.Process, fixture protest.Fixture) { go func() { // Wait for program to start listening. for { conn, err := net.Dial("tcp", "localhost:9191") if err == nil { conn.Close() break } time.Sleep(50 * time.Millisecond) } http.Get("http://localhost:9191") }() if err := proc.Continue(p); err != nil { t.Fatal(err) } f, ln := currentLineNumber(p, t) for _, tc := range testcases { if ln != tc.begin { t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln) } assertNoError(proc.Next(p), t, "Next() returned an error") f, ln = assertLineNumber(p, t, tc.end, "Program did not continue to correct next location") } }) } func TestRuntimeBreakpoint(t *testing.T) { withTestProcess("testruntimebreakpoint", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) if err != nil { t.Fatal(err) } regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers") pc := regs.PC() f, l, _ := p.BinInfo().PCToLine(pc) if l != 10 { t.Fatalf("did not respect breakpoint %s:%d", f, l) } }) } func returnAddress(thread proc.Thread) (uint64, error) { locations, err := proc.ThreadStacktrace(thread, 2) if err != nil { return 0, err } if len(locations) < 2 { return 0, fmt.Errorf("no return address for function: %s", locations[0].Current.Fn.BaseName()) } return locations[1].Current.PC, nil } func TestFindReturnAddress(t *testing.T) { protest.AllowRecording(t) withTestProcess("testnextprog", t, func(p proc.Process, fixture protest.Fixture) { start, _, err := p.BinInfo().LineToPC(fixture.Source, 24) if err != nil { t.Fatal(err) } _, err = p.SetBreakpoint(start, proc.UserBreakpoint, nil) if err != nil { t.Fatal(err) } err = proc.Continue(p) if err != nil { t.Fatal(err) } addr, err := returnAddress(p.CurrentThread()) if err != nil { t.Fatal(err) } _, l, _ := p.BinInfo().PCToLine(addr) if l != 40 { t.Fatalf("return address not found correctly, expected line 40") } }) } func TestFindReturnAddressTopOfStackFn(t *testing.T) { protest.AllowRecording(t) withTestProcess("testreturnaddress", t, func(p proc.Process, fixture protest.Fixture) { fnName := "runtime.rt0_go" fnentry, err := proc.FindFunctionLocation(p, fnName, false, 0) assertNoError(err, t, "FindFunctionLocation") if _, err := p.SetBreakpoint(fnentry, proc.UserBreakpoint, nil); err != nil { t.Fatal(err) } if err := proc.Continue(p); err != nil { t.Fatal(err) } if _, err := returnAddress(p.CurrentThread()); err == nil { t.Fatal("expected error to be returned") } }) } func TestSwitchThread(t *testing.T) { protest.AllowRecording(t) withTestProcess("testnextprog", t, func(p proc.Process, fixture protest.Fixture) { // With invalid thread id err := p.SwitchThread(-1) if err == nil { t.Fatal("Expected error for invalid thread id") } pc, err := proc.FindFunctionLocation(p, "main.main", true, 0) if err != nil { t.Fatal(err) } _, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) if err != nil { t.Fatal(err) } err = proc.Continue(p) if err != nil { t.Fatal(err) } var nt int ct := p.CurrentThread().ThreadID() for _, thread := range p.ThreadList() { if thread.ThreadID() != ct { nt = thread.ThreadID() break } } if nt == 0 { t.Fatal("could not find thread to switch to") } // With valid thread id err = p.SwitchThread(nt) if err != nil { t.Fatal(err) } if p.CurrentThread().ThreadID() != nt { t.Fatal("Did not switch threads") } }) } func TestCGONext(t *testing.T) { // Test if one can do 'next' in a cgo binary // On OSX with Go < 1.5 CGO is not supported due to: https://github.com/golang/go/issues/8973 if runtime.GOOS == "darwin" && strings.Contains(runtime.Version(), "1.4") { return } if os.Getenv("CGO_ENABLED") == "" { return } protest.AllowRecording(t) withTestProcess("cgotest", t, func(p proc.Process, fixture protest.Fixture) { pc, err := proc.FindFunctionLocation(p, "main.main", true, 0) if err != nil { t.Fatal(err) } _, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) if err != nil { t.Fatal(err) } err = proc.Continue(p) if err != nil { t.Fatal(err) } err = proc.Next(p) if err != nil { t.Fatal(err) } }) } type loc struct { line int fn string } func (l1 *loc) match(l2 proc.Stackframe) bool { if l1.line >= 0 { if l1.line != l2.Call.Line { return false } } return l1.fn == l2.Call.Fn.Name } func TestStacktrace(t *testing.T) { stacks := [][]loc{ {{4, "main.stacktraceme"}, {8, "main.func1"}, {16, "main.main"}}, {{4, "main.stacktraceme"}, {8, "main.func1"}, {12, "main.func2"}, {17, "main.main"}}, } protest.AllowRecording(t) withTestProcess("stacktraceprog", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.stacktraceme") assertNoError(err, t, "BreakByLocation()") for i := range stacks { assertNoError(proc.Continue(p), t, "Continue()") locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if len(locations) != len(stacks[i])+2 { t.Fatalf("Wrong stack trace size %d %d\n", len(locations), len(stacks[i])+2) } t.Logf("Stacktrace %d:\n", i) for i := range locations { t.Logf("\t%s:%d\n", locations[i].Call.File, locations[i].Call.Line) } for j := range stacks[i] { if !stacks[i][j].match(locations[j]) { t.Fatalf("Wrong stack trace pos %d\n", j) } } } p.ClearBreakpoint(bp.Addr) proc.Continue(p) }) } func TestStacktrace2(t *testing.T) { withTestProcess("retstack", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if !stackMatch([]loc{{-1, "main.f"}, {16, "main.main"}}, locations, false) { for i := range locations { t.Logf("\t%s:%d [%s]\n", locations[i].Call.File, locations[i].Call.Line, locations[i].Call.Fn.Name) } t.Fatalf("Stack error at main.f()\n%v\n", locations) } assertNoError(proc.Continue(p), t, "Continue()") locations, err = proc.ThreadStacktrace(p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if !stackMatch([]loc{{-1, "main.g"}, {17, "main.main"}}, locations, false) { for i := range locations { t.Logf("\t%s:%d [%s]\n", locations[i].Call.File, locations[i].Call.Line, locations[i].Call.Fn.Name) } t.Fatalf("Stack error at main.g()\n%v\n", locations) } }) } func stackMatch(stack []loc, locations []proc.Stackframe, skipRuntime bool) bool { if len(stack) > len(locations) { return false } i := 0 for j := range locations { if i >= len(stack) { break } if skipRuntime { if locations[j].Call.Fn == nil || strings.HasPrefix(locations[j].Call.Fn.Name, "runtime.") { continue } } if !stack[i].match(locations[j]) { return false } i++ } return i >= len(stack) } func TestStacktraceGoroutine(t *testing.T) { mainStack := []loc{{14, "main.stacktraceme"}, {29, "main.main"}} if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { mainStack[0].line = 15 } agoroutineStacks := [][]loc{ {{8, "main.agoroutine"}}, {{9, "main.agoroutine"}}, {{10, "main.agoroutine"}}, } protest.AllowRecording(t) withTestProcess("goroutinestackprog", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.stacktraceme") assertNoError(err, t, "BreakByLocation()") assertNoError(proc.Continue(p), t, "Continue()") gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo") agoroutineCount := 0 mainCount := 0 for i, g := range gs { locations, err := g.Stacktrace(40, false) if err != nil { // On windows we do not have frame information for goroutines doing system calls. t.Logf("Could not retrieve goroutine stack for goid=%d: %v", g.ID, err) continue } if stackMatch(mainStack, locations, false) { mainCount++ } found := false for _, agoroutineStack := range agoroutineStacks { if stackMatch(agoroutineStack, locations, true) { found = true } } if found { agoroutineCount++ } else { t.Logf("Non-goroutine stack: %d (%d)", i, len(locations)) for i := range locations { name := "" if locations[i].Call.Fn != nil { name = locations[i].Call.Fn.Name } t.Logf("\t%s:%d %s (%#x)\n", locations[i].Call.File, locations[i].Call.Line, name, locations[i].Current.PC) } } } if mainCount != 1 { t.Fatalf("Main goroutine stack not found %d", mainCount) } if agoroutineCount != 10 { t.Fatalf("Goroutine stacks not found (%d)", agoroutineCount) } p.ClearBreakpoint(bp.Addr) proc.Continue(p) }) } func TestKill(t *testing.T) { if testBackend == "lldb" { // k command presumably works but leaves the process around? return } withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { if err := p.Detach(true); err != nil { t.Fatal(err) } if valid, _ := p.Valid(); valid { t.Fatal("expected process to have exited") } if runtime.GOOS == "linux" { _, err := os.Open(fmt.Sprintf("/proc/%d/", p.Pid())) if err == nil { t.Fatal("process has not exited", p.Pid()) } } }) } func testGSupportFunc(name string, t *testing.T, p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, name+": BreakByLocation()") assertNoError(proc.Continue(p), t, name+": Continue()") g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, name+": GetG()") if g == nil { t.Fatal(name + ": g was nil") } t.Logf(name+": g is: %v", g) p.ClearBreakpoint(bp.Addr) } func TestGetG(t *testing.T) { withTestProcess("testprog", t, func(p proc.Process, fixture protest.Fixture) { testGSupportFunc("nocgo", t, p, fixture) }) // On OSX with Go < 1.5 CGO is not supported due to: https://github.com/golang/go/issues/8973 if runtime.GOOS == "darwin" && strings.Contains(runtime.Version(), "1.4") { return } if os.Getenv("CGO_ENABLED") == "" { return } protest.AllowRecording(t) withTestProcess("cgotest", t, func(p proc.Process, fixture protest.Fixture) { testGSupportFunc("cgo", t, p, fixture) }) } func TestContinueMulti(t *testing.T) { protest.AllowRecording(t) withTestProcess("integrationprog", t, func(p proc.Process, fixture protest.Fixture) { bp1, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, "BreakByLocation()") bp2, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "BreakByLocation()") mainCount := 0 sayhiCount := 0 for { err := proc.Continue(p) if valid, _ := p.Valid(); !valid { break } assertNoError(err, t, "Continue()") if bp := p.CurrentThread().Breakpoint(); bp.ID == bp1.ID { mainCount++ } if bp := p.CurrentThread().Breakpoint(); bp.ID == bp2.ID { sayhiCount++ } } if mainCount != 1 { t.Fatalf("Main breakpoint hit wrong number of times: %d\n", mainCount) } if sayhiCount != 3 { t.Fatalf("Sayhi breakpoint hit wrong number of times: %d\n", sayhiCount) } }) } func TestBreakpointOnFunctionEntry(t *testing.T) { testseq2(t, "testprog", "main.main", []seqTest{{contContinue, 17}}) } func TestProcessReceivesSIGCHLD(t *testing.T) { protest.AllowRecording(t) withTestProcess("sigchldprog", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) _, ok := err.(proc.ErrProcessExited) if !ok { t.Fatalf("Continue() returned unexpected error type %v", err) } }) } func TestIssue239(t *testing.T) { withTestProcess("is sue239", t, func(p proc.Process, fixture protest.Fixture) { pos, _, err := p.BinInfo().LineToPC(fixture.Source, 17) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(pos, proc.UserBreakpoint, nil) assertNoError(err, t, fmt.Sprintf("SetBreakpoint(%d)", pos)) assertNoError(proc.Continue(p), t, fmt.Sprintf("Continue()")) }) } func findFirstNonRuntimeFrame(p proc.Process) (proc.Stackframe, error) { frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10) if err != nil { return proc.Stackframe{}, err } for _, frame := range frames { if frame.Current.Fn != nil && !strings.HasPrefix(frame.Current.Fn.Name, "runtime.") { return frame, nil } } return proc.Stackframe{}, fmt.Errorf("non-runtime frame not found") } func evalVariableOrError(p proc.Process, symbol string) (*proc.Variable, error) { var scope *proc.EvalScope var err error if testBackend == "rr" { var frame proc.Stackframe frame, err = findFirstNonRuntimeFrame(p) if err == nil { scope = proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, frame) } } else { scope, err = proc.GoroutineScope(p.CurrentThread()) } if err != nil { return nil, err } return scope.EvalVariable(symbol, normalLoadConfig) } func evalVariable(p proc.Process, t testing.TB, symbol string) *proc.Variable { v, err := evalVariableOrError(p, symbol) if err != nil { _, file, line, _ := runtime.Caller(1) fname := filepath.Base(file) t.Fatalf("%s:%d: EvalVariable(%q): %v", fname, line, symbol, err) } return v } func setVariable(p proc.Process, symbol, value string) error { scope, err := proc.GoroutineScope(p.CurrentThread()) if err != nil { return err } return scope.SetVariable(symbol, value) } func TestVariableEvaluation(t *testing.T) { protest.AllowRecording(t) testcases := []struct { name string st reflect.Kind value interface{} length, cap int64 childrenlen int }{ {"a1", reflect.String, "foofoofoofoofoofoo", 18, 0, 0}, {"a11", reflect.Array, nil, 3, -1, 3}, {"a12", reflect.Slice, nil, 2, 2, 2}, {"a13", reflect.Slice, nil, 3, 3, 3}, {"a2", reflect.Int, int64(6), 0, 0, 0}, {"a3", reflect.Float64, float64(7.23), 0, 0, 0}, {"a4", reflect.Array, nil, 2, -1, 2}, {"a5", reflect.Slice, nil, 5, 5, 5}, {"a6", reflect.Struct, nil, 2, 0, 2}, {"a7", reflect.Ptr, nil, 1, 0, 1}, {"a8", reflect.Struct, nil, 2, 0, 2}, {"a9", reflect.Ptr, nil, 1, 0, 1}, {"baz", reflect.String, "bazburzum", 9, 0, 0}, {"neg", reflect.Int, int64(-1), 0, 0, 0}, {"f32", reflect.Float32, float64(float32(1.2)), 0, 0, 0}, {"c64", reflect.Complex64, complex128(complex64(1 + 2i)), 0, 0, 0}, {"c128", reflect.Complex128, complex128(2 + 3i), 0, 0, 0}, {"a6.Baz", reflect.Int, int64(8), 0, 0, 0}, {"a7.Baz", reflect.Int, int64(5), 0, 0, 0}, {"a8.Baz", reflect.String, "feh", 3, 0, 0}, {"a8", reflect.Struct, nil, 2, 0, 2}, {"i32", reflect.Array, nil, 2, -1, 2}, {"b1", reflect.Bool, true, 0, 0, 0}, {"b2", reflect.Bool, false, 0, 0, 0}, {"f", reflect.Func, "main.barfoo", 0, 0, 0}, {"ba", reflect.Slice, nil, 200, 200, 64}, } withTestProcess("testvariables", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue() returned an error") for _, tc := range testcases { v := evalVariable(p, t, tc.name) if v.Kind != tc.st { t.Fatalf("%s simple type: expected: %s got: %s", tc.name, tc.st, v.Kind.String()) } if v.Value == nil && tc.value != nil { t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value) } else { switch v.Kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: x, _ := constant.Int64Val(v.Value) if y, ok := tc.value.(int64); !ok || x != y { t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value) } case reflect.Float32, reflect.Float64: x, _ := constant.Float64Val(v.Value) if y, ok := tc.value.(float64); !ok || x != y { t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value) } case reflect.Complex64, reflect.Complex128: xr, _ := constant.Float64Val(constant.Real(v.Value)) xi, _ := constant.Float64Val(constant.Imag(v.Value)) if y, ok := tc.value.(complex128); !ok || complex(xr, xi) != y { t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value) } case reflect.String: if y, ok := tc.value.(string); !ok || constant.StringVal(v.Value) != y { t.Fatalf("%s value: expected: %v got: %v", tc.name, tc.value, v.Value) } } } if v.Len != tc.length { t.Fatalf("%s len: expected: %d got: %d", tc.name, tc.length, v.Len) } if v.Cap != tc.cap { t.Fatalf("%s cap: expected: %d got: %d", tc.name, tc.cap, v.Cap) } if len(v.Children) != tc.childrenlen { t.Fatalf("%s children len: expected %d got: %d", tc.name, tc.childrenlen, len(v.Children)) } } }) } func TestFrameEvaluation(t *testing.T) { protest.AllowRecording(t) withTestProcess("goroutinestackprog", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.stacktraceme") assertNoError(err, t, "setFunctionBreakpoint") assertNoError(proc.Continue(p), t, "Continue()") t.Logf("stopped on thread %d, goroutine: %#v", p.CurrentThread().ThreadID(), p.SelectedGoroutine()) // Testing evaluation on goroutines gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo") found := make([]bool, 10) for _, g := range gs { frame := -1 frames, err := g.Stacktrace(10, false) if err != nil { t.Logf("could not stacktrace goroutine %d: %v\n", g.ID, err) continue } t.Logf("Goroutine %d", g.ID) logStacktrace(t, p.BinInfo(), frames) for i := range frames { if frames[i].Call.Fn != nil && frames[i].Call.Fn.Name == "main.agoroutine" { frame = i break } } if frame < 0 { t.Logf("Goroutine %d: could not find correct frame", g.ID) continue } scope, err := proc.ConvertEvalScope(p, g.ID, frame, 0) assertNoError(err, t, "ConvertEvalScope()") t.Logf("scope = %v", scope) v, err := scope.EvalVariable("i", normalLoadConfig) t.Logf("v = %v", v) if err != nil { t.Logf("Goroutine %d: %v\n", g.ID, err) continue } vval, _ := constant.Int64Val(v.Value) found[vval] = true } for i := range found { if !found[i] { t.Fatalf("Goroutine %d not found\n", i) } } // Testing evaluation on frames assertNoError(proc.Continue(p), t, "Continue() 2") g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG()") for i := 0; i <= 3; i++ { scope, err := proc.ConvertEvalScope(p, g.ID, i+1, 0) assertNoError(err, t, fmt.Sprintf("ConvertEvalScope() on frame %d", i+1)) v, err := scope.EvalVariable("n", normalLoadConfig) assertNoError(err, t, fmt.Sprintf("EvalVariable() on frame %d", i+1)) n, _ := constant.Int64Val(v.Value) t.Logf("frame %d n %d\n", i+1, n) if n != int64(3-i) { t.Fatalf("On frame %d value of n is %d (not %d)", i+1, n, 3-i) } } }) } func TestPointerSetting(t *testing.T) { withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue() returned an error") pval := func(n int64) { variable := evalVariable(p, t, "p1") c0val, _ := constant.Int64Val(variable.Children[0].Value) if c0val != n { t.Fatalf("Wrong value of p1, *%d expected *%d", c0val, n) } } pval(1) // change p1 to point to i2 scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "Scope()") i2addr, err := scope.EvalExpression("i2", normalLoadConfig) assertNoError(err, t, "EvalExpression()") assertNoError(setVariable(p, "p1", fmt.Sprintf("(*int)(0x%x)", i2addr.Addr)), t, "SetVariable()") pval(2) // change the value of i2 check that p1 also changes assertNoError(setVariable(p, "i2", "5"), t, "SetVariable()") pval(5) }) } func TestVariableFunctionScoping(t *testing.T) { withTestProcess("testvariables", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) assertNoError(err, t, "Continue() returned an error") evalVariable(p, t, "a1") evalVariable(p, t, "a2") // Move scopes, a1 exists here by a2 does not err = proc.Continue(p) assertNoError(err, t, "Continue() returned an error") evalVariable(p, t, "a1") _, err = evalVariableOrError(p, "a2") if err == nil { t.Fatalf("Can eval out of scope variable a2") } }) } func TestRecursiveStructure(t *testing.T) { protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") v := evalVariable(p, t, "aas") t.Logf("v: %v\n", v) }) } func TestIssue316(t *testing.T) { // A pointer loop that includes one interface should not send dlv into an infinite loop protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") evalVariable(p, t, "iface5") }) } func TestIssue325(t *testing.T) { // nil pointer dereference when evaluating interfaces to function pointers protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") iface2fn1v := evalVariable(p, t, "iface2fn1") t.Logf("iface2fn1: %v\n", iface2fn1v) iface2fn2v := evalVariable(p, t, "iface2fn2") t.Logf("iface2fn2: %v\n", iface2fn2v) }) } func TestBreakpointCounts(t *testing.T) { protest.AllowRecording(t) withTestProcess("bpcountstest", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 12) assertNoError(err, t, "LineToPC") bp, err := p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") for { if err := proc.Continue(p); err != nil { if _, exited := err.(proc.ErrProcessExited); exited { break } assertNoError(err, t, "Continue()") } } t.Logf("TotalHitCount: %d", bp.TotalHitCount) if bp.TotalHitCount != 200 { t.Fatalf("Wrong TotalHitCount for the breakpoint (%d)", bp.TotalHitCount) } if len(bp.HitCount) != 2 { t.Fatalf("Wrong number of goroutines for breakpoint (%d)", len(bp.HitCount)) } for _, v := range bp.HitCount { if v != 100 { t.Fatalf("Wrong HitCount for breakpoint (%v)", bp.HitCount) } } }) } func BenchmarkArray(b *testing.B) { // each bencharr struct is 128 bytes, bencharr is 64 elements long protest.AllowRecording(b) b.SetBytes(int64(64 * 128)) withTestProcess("testvariables2", b, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), b, "Continue()") for i := 0; i < b.N; i++ { evalVariable(p, b, "bencharr") } }) } const doTestBreakpointCountsWithDetection = false func TestBreakpointCountsWithDetection(t *testing.T) { if !doTestBreakpointCountsWithDetection { return } m := map[int64]int64{} protest.AllowRecording(t) withTestProcess("bpcountstest", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 12) assertNoError(err, t, "LineToPC") bp, err := p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") for { if err := proc.Continue(p); err != nil { if _, exited := err.(proc.ErrProcessExited); exited { break } assertNoError(err, t, "Continue()") } for _, th := range p.ThreadList() { if bp := th.Breakpoint(); bp.Breakpoint == nil { continue } scope, err := proc.GoroutineScope(th) assertNoError(err, t, "Scope()") v, err := scope.EvalVariable("i", normalLoadConfig) assertNoError(err, t, "evalVariable") i, _ := constant.Int64Val(v.Value) v, err = scope.EvalVariable("id", normalLoadConfig) assertNoError(err, t, "evalVariable") id, _ := constant.Int64Val(v.Value) m[id] = i } total := int64(0) for i := range m { total += m[i] + 1 } if uint64(total) != bp.TotalHitCount { t.Fatalf("Mismatched total count %d %d\n", total, bp.TotalHitCount) } } t.Logf("TotalHitCount: %d", bp.TotalHitCount) if bp.TotalHitCount != 200 { t.Fatalf("Wrong TotalHitCount for the breakpoint (%d)", bp.TotalHitCount) } if len(bp.HitCount) != 2 { t.Fatalf("Wrong number of goroutines for breakpoint (%d)", len(bp.HitCount)) } for _, v := range bp.HitCount { if v != 100 { t.Fatalf("Wrong HitCount for breakpoint (%v)", bp.HitCount) } } }) } func BenchmarkArrayPointer(b *testing.B) { // each bencharr struct is 128 bytes, benchparr is an array of 64 pointers to bencharr // each read will read 64 bencharr structs plus the 64 pointers of benchparr protest.AllowRecording(b) b.SetBytes(int64(64*128 + 64*8)) withTestProcess("testvariables2", b, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), b, "Continue()") for i := 0; i < b.N; i++ { evalVariable(p, b, "bencharr") } }) } func BenchmarkMap(b *testing.B) { // m1 contains 41 entries, each one has a value that's 2 int values (2* 8 bytes) and a string key // each string key has an average of 9 character // reading strings and the map structure imposes a overhead that we ignore here protest.AllowRecording(b) b.SetBytes(int64(41 * (2*8 + 9))) withTestProcess("testvariables2", b, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), b, "Continue()") for i := 0; i < b.N; i++ { evalVariable(p, b, "m1") } }) } func BenchmarkGoroutinesInfo(b *testing.B) { protest.AllowRecording(b) withTestProcess("testvariables2", b, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), b, "Continue()") for i := 0; i < b.N; i++ { p.Common().ClearAllGCache() _, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, b, "GoroutinesInfo") } }) } func TestIssue262(t *testing.T) { // Continue does not work when the current breakpoint is set on a NOP instruction protest.AllowRecording(t) withTestProcess("issue262", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 11) assertNoError(err, t, "LineToPC") _, err = p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") err = proc.Continue(p) if err == nil { t.Fatalf("No error on second continue") } _, exited := err.(proc.ErrProcessExited) if !exited { t.Fatalf("Process did not exit after second continue: %v", err) } }) } func TestIssue305(t *testing.T) { // If 'next' hits a breakpoint on the goroutine it's stepping through // the internal breakpoints aren't cleared preventing further use of // 'next' command protest.AllowRecording(t) withTestProcess("issue305", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 5) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Next(p), t, "Next() 1") assertNoError(proc.Next(p), t, "Next() 2") assertNoError(proc.Next(p), t, "Next() 3") assertNoError(proc.Next(p), t, "Next() 4") assertNoError(proc.Next(p), t, "Next() 5") }) } func TestPointerLoops(t *testing.T) { // Pointer loops through map entries, pointers and slices // Regression test for issue #341 protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") for _, expr := range []string{"mapinf", "ptrinf", "sliceinf"} { t.Logf("requesting %s", expr) v := evalVariable(p, t, expr) t.Logf("%s: %v\n", expr, v) } }) } func BenchmarkLocalVariables(b *testing.B) { protest.AllowRecording(b) withTestProcess("testvariables", b, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), b, "Continue() returned an error") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, b, "Scope()") for i := 0; i < b.N; i++ { _, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, b, "LocalVariables()") } }) } func TestCondBreakpoint(t *testing.T) { protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 9) assertNoError(err, t, "LineToPC") bp, err := p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") bp.Cond = &ast.BinaryExpr{ Op: token.EQL, X: &ast.Ident{Name: "n"}, Y: &ast.BasicLit{Kind: token.INT, Value: "7"}, } assertNoError(proc.Continue(p), t, "Continue()") nvar := evalVariable(p, t, "n") n, _ := constant.Int64Val(nvar.Value) if n != 7 { t.Fatalf("Stoppend on wrong goroutine %d\n", n) } }) } func TestCondBreakpointError(t *testing.T) { protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 9) assertNoError(err, t, "LineToPC") bp, err := p.SetBreakpoint(addr, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") bp.Cond = &ast.BinaryExpr{ Op: token.EQL, X: &ast.Ident{Name: "nonexistentvariable"}, Y: &ast.BasicLit{Kind: token.INT, Value: "7"}, } err = proc.Continue(p) if err == nil { t.Fatalf("No error on first Continue()") } if err.Error() != "error evaluating expression: could not find symbol value for nonexistentvariable" && err.Error() != "multiple errors evaluating conditions" { t.Fatalf("Unexpected error on first Continue(): %v", err) } bp.Cond = &ast.BinaryExpr{ Op: token.EQL, X: &ast.Ident{Name: "n"}, Y: &ast.BasicLit{Kind: token.INT, Value: "7"}, } err = proc.Continue(p) if err != nil { if _, exited := err.(proc.ErrProcessExited); !exited { t.Fatalf("Unexpected error on second Continue(): %v", err) } } else { nvar := evalVariable(p, t, "n") n, _ := constant.Int64Val(nvar.Value) if n != 7 { t.Fatalf("Stoppend on wrong goroutine %d\n", n) } } }) } func TestIssue356(t *testing.T) { // slice with a typedef does not get printed correctly protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue() returned an error") mmvar := evalVariable(p, t, "mainMenu") if mmvar.Kind != reflect.Slice { t.Fatalf("Wrong kind for mainMenu: %v\n", mmvar.Kind) } }) } func TestStepIntoFunction(t *testing.T) { withTestProcess("teststep", t, func(p proc.Process, fixture protest.Fixture) { // Continue until breakpoint assertNoError(proc.Continue(p), t, "Continue() returned an error") // Step into function assertNoError(proc.Step(p), t, "Step() returned an error") // We should now be inside the function. loc, err := p.CurrentThread().Location() if err != nil { t.Fatal(err) } if loc.Fn.Name != "main.callme" { t.Fatalf("expected to be within the 'callme' function, was in %s instead", loc.Fn.Name) } if !strings.Contains(loc.File, "teststep") { t.Fatalf("debugger stopped at incorrect location: %s:%d", loc.File, loc.Line) } if loc.Line != 8 { t.Fatalf("debugger stopped at incorrect line: %d", loc.Line) } }) } func TestIssue384(t *testing.T) { // Crash related to reading uninitialized memory, introduced by the memory prefetching optimization ver, _ := goversion.Parse(runtime.Version()) if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // go 1.10 emits DW_AT_decl_line and we won't be able to evaluate 'st' // which is declared after line 13. t.Skip("can not evaluate not-yet-declared variables with go 1.10") } protest.AllowRecording(t) withTestProcess("issue384", t, func(p proc.Process, fixture protest.Fixture) { start, _, err := p.BinInfo().LineToPC(fixture.Source, 13) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(start, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") evalVariable(p, t, "st") }) } func TestIssue332_Part1(t *testing.T) { // Next shouldn't step inside a function call protest.AllowRecording(t) withTestProcess("issue332", t, func(p proc.Process, fixture protest.Fixture) { start, _, err := p.BinInfo().LineToPC(fixture.Source, 8) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(start, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Next(p), t, "first Next()") locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2) assertNoError(err, t, "Stacktrace()") if locations[0].Call.Fn == nil { t.Fatalf("Not on a function") } if locations[0].Call.Fn.Name != "main.main" { t.Fatalf("Not on main.main after Next: %s (%s:%d)", locations[0].Call.Fn.Name, locations[0].Call.File, locations[0].Call.Line) } if locations[0].Call.Line != 9 { t.Fatalf("Not on line 9 after Next: %s (%s:%d)", locations[0].Call.Fn.Name, locations[0].Call.File, locations[0].Call.Line) } }) } func TestIssue332_Part2(t *testing.T) { // Step should skip a function's prologue // In some parts of the prologue, for some functions, the FDE data is incorrect // which leads to 'next' and 'stack' failing with error "could not find FDE for PC: <garbage>" // because the incorrect FDE data leads to reading the wrong stack address as the return address protest.AllowRecording(t) withTestProcess("issue332", t, func(p proc.Process, fixture protest.Fixture) { start, _, err := p.BinInfo().LineToPC(fixture.Source, 8) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(start, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") // step until we enter changeMe for { assertNoError(proc.Step(p), t, "Step()") locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2) assertNoError(err, t, "Stacktrace()") if locations[0].Call.Fn == nil { t.Fatalf("Not on a function") } if locations[0].Call.Fn.Name == "main.changeMe" { break } } regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers()") pc := regs.PC() pcAfterPrologue, err := proc.FindFunctionLocation(p, "main.changeMe", true, -1) assertNoError(err, t, "FindFunctionLocation()") pcEntry, err := proc.FindFunctionLocation(p, "main.changeMe", false, 0) if err != nil { t.Fatalf("got error while finding function location: %v", err) } if pcAfterPrologue == pcEntry { t.Fatalf("main.changeMe and main.changeMe:0 are the same (%x)", pcAfterPrologue) } if pc != pcAfterPrologue { t.Fatalf("Step did not skip the prologue: current pc: %x, first instruction after prologue: %x", pc, pcAfterPrologue) } assertNoError(proc.Next(p), t, "first Next()") assertNoError(proc.Next(p), t, "second Next()") assertNoError(proc.Next(p), t, "third Next()") err = proc.Continue(p) if _, exited := err.(proc.ErrProcessExited); !exited { assertNoError(err, t, "final Continue()") } }) } func TestIssue396(t *testing.T) { withTestProcess("callme", t, func(p proc.Process, fixture protest.Fixture) { _, err := proc.FindFunctionLocation(p, "main.init", true, -1) assertNoError(err, t, "FindFunctionLocation()") }) } func TestIssue414(t *testing.T) { // Stepping until the program exits protest.AllowRecording(t) withTestProcess("math", t, func(p proc.Process, fixture protest.Fixture) { start, _, err := p.BinInfo().LineToPC(fixture.Source, 9) assertNoError(err, t, "LineToPC()") _, err = p.SetBreakpoint(start, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") for { err := proc.Step(p) if err != nil { if _, exited := err.(proc.ErrProcessExited); exited { break } } assertNoError(err, t, "Step()") } }) } func TestPackageVariables(t *testing.T) { protest.AllowRecording(t) withTestProcess("testvariables", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) assertNoError(err, t, "Continue()") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "Scope()") vars, err := scope.PackageVariables(normalLoadConfig) assertNoError(err, t, "PackageVariables()") failed := false for _, v := range vars { if v.Unreadable != nil && v.Unreadable.Error() != "no location attribute Location" { failed = true t.Logf("Unreadable variable %s: %v", v.Name, v.Unreadable) } } if failed { t.Fatalf("previous errors") } }) } func TestIssue149(t *testing.T) { ver, _ := goversion.Parse(runtime.Version()) if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 7, -1, 0, 0, ""}) { return } // setting breakpoint on break statement withTestProcess("break", t, func(p proc.Process, fixture protest.Fixture) { _, err := proc.FindFileLocation(p, fixture.Source, 8) assertNoError(err, t, "FindFileLocation()") }) } func TestPanicBreakpoint(t *testing.T) { protest.AllowRecording(t) withTestProcess("panic", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") bp := p.CurrentThread().Breakpoint() if bp.Breakpoint == nil || bp.Name != proc.UnrecoveredPanic { t.Fatalf("not on unrecovered-panic breakpoint: %v", bp) } }) } func TestCmdLineArgs(t *testing.T) { expectSuccess := func(p proc.Process, fixture protest.Fixture) { err := proc.Continue(p) bp := p.CurrentThread().Breakpoint() if bp.Breakpoint != nil && bp.Name == proc.UnrecoveredPanic { t.Fatalf("testing args failed on unrecovered-panic breakpoint: %v", bp) } exit, exited := err.(proc.ErrProcessExited) if !exited { t.Fatalf("Process did not exit: %v", err) } else { if exit.Status != 0 { t.Fatalf("process exited with invalid status %d", exit.Status) } } } expectPanic := func(p proc.Process, fixture protest.Fixture) { proc.Continue(p) bp := p.CurrentThread().Breakpoint() if bp.Breakpoint == nil || bp.Name != proc.UnrecoveredPanic { t.Fatalf("not on unrecovered-panic breakpoint: %v", bp) } } // make sure multiple arguments (including one with spaces) are passed to the binary correctly withTestProcessArgs("testargs", t, ".", []string{"test"}, 0, expectSuccess) withTestProcessArgs("testargs", t, ".", []string{"-test"}, 0, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"test", "pass flag"}, 0, expectSuccess) // check that arguments with spaces are *only* passed correctly when correctly called withTestProcessArgs("testargs", t, ".", []string{"test pass", "flag"}, 0, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"test", "pass", "flag"}, 0, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"test pass flag"}, 0, expectPanic) // and that invalid cases (wrong arguments or no arguments) panic withTestProcess("testargs", t, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"invalid"}, 0, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"test", "invalid"}, 0, expectPanic) withTestProcessArgs("testargs", t, ".", []string{"invalid", "pass flag"}, 0, expectPanic) } func TestIssue462(t *testing.T) { // Stacktrace of Goroutine 0 fails with an error if runtime.GOOS == "windows" { return } withTestProcess("testnextnethttp", t, func(p proc.Process, fixture protest.Fixture) { go func() { // Wait for program to start listening. for { conn, err := net.Dial("tcp", "localhost:9191") if err == nil { conn.Close() break } time.Sleep(50 * time.Millisecond) } p.RequestManualStop() }() assertNoError(proc.Continue(p), t, "Continue()") _, err := proc.ThreadStacktrace(p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") }) } func TestNextParked(t *testing.T) { protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "SetBreakpoint()") // continue until a parked goroutine exists var parkedg *proc.G for parkedg == nil { err := proc.Continue(p) if _, exited := err.(proc.ErrProcessExited); exited { t.Log("could not find parked goroutine") return } assertNoError(err, t, "Continue()") gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo()") // Search for a parked goroutine that we know for sure will have to be // resumed before the program can exit. This is a parked goroutine that: // 1. is executing main.sayhi // 2. hasn't called wg.Done yet for _, g := range gs { if g.Thread != nil { continue } frames, _ := g.Stacktrace(5, false) for _, frame := range frames { // line 11 is the line where wg.Done is called if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.sayhi" && frame.Current.Line < 11 { parkedg = g break } } if parkedg != nil { break } } } assertNoError(p.SwitchGoroutine(parkedg.ID), t, "SwitchGoroutine()") p.ClearBreakpoint(bp.Addr) assertNoError(proc.Next(p), t, "Next()") if p.SelectedGoroutine().ID != parkedg.ID { t.Fatalf("Next did not continue on the selected goroutine, expected %d got %d", parkedg.ID, p.SelectedGoroutine().ID) } }) } func TestStepParked(t *testing.T) { protest.AllowRecording(t) withTestProcess("parallel_next", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.sayhi") assertNoError(err, t, "SetBreakpoint()") // continue until a parked goroutine exists var parkedg *proc.G LookForParkedG: for { err := proc.Continue(p) if _, exited := err.(proc.ErrProcessExited); exited { t.Log("could not find parked goroutine") return } assertNoError(err, t, "Continue()") gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo()") for _, g := range gs { if g.Thread == nil && g.CurrentLoc.Fn != nil && g.CurrentLoc.Fn.Name == "main.sayhi" { parkedg = g break LookForParkedG } } } t.Logf("Parked g is: %v\n", parkedg) frames, _ := parkedg.Stacktrace(20, false) for _, frame := range frames { name := "" if frame.Call.Fn != nil { name = frame.Call.Fn.Name } t.Logf("\t%s:%d in %s (%#x)", frame.Call.File, frame.Call.Line, name, frame.Current.PC) } assertNoError(p.SwitchGoroutine(parkedg.ID), t, "SwitchGoroutine()") p.ClearBreakpoint(bp.Addr) assertNoError(proc.Step(p), t, "Step()") if p.SelectedGoroutine().ID != parkedg.ID { t.Fatalf("Step did not continue on the selected goroutine, expected %d got %d", parkedg.ID, p.SelectedGoroutine().ID) } }) } func TestIssue509(t *testing.T) { fixturesDir := protest.FindFixturesDir() nomaindir := filepath.Join(fixturesDir, "nomaindir") cmd := exec.Command("go", "build", "-gcflags=-N -l", "-o", "debug") cmd.Dir = nomaindir assertNoError(cmd.Run(), t, "go build") exepath := filepath.Join(nomaindir, "debug") defer os.Remove(exepath) var err error switch testBackend { case "native": _, err = native.Launch([]string{exepath}, ".", false, []string{}) case "lldb": _, err = gdbserial.LLDBLaunch([]string{exepath}, ".", false, []string{}) default: t.Skip("test not valid for this backend") } if err == nil { t.Fatalf("expected error but none was generated") } if err != proc.ErrNotExecutable { t.Fatalf("expected error \"%v\" got \"%v\"", proc.ErrNotExecutable, err) } } func TestUnsupportedArch(t *testing.T) { ver, _ := goversion.Parse(runtime.Version()) if ver.Major < 0 || !ver.AfterOrEqual(goversion.GoVersion{1, 6, -1, 0, 0, ""}) || ver.AfterOrEqual(goversion.GoVersion{1, 7, -1, 0, 0, ""}) { // cross compile (with -N?) works only on select versions of go return } fixturesDir := protest.FindFixturesDir() infile := filepath.Join(fixturesDir, "math.go") outfile := filepath.Join(fixturesDir, "_math_debug_386") cmd := exec.Command("go", "build", "-gcflags=-N -l", "-o", outfile, infile) for _, v := range os.Environ() { if !strings.HasPrefix(v, "GOARCH=") { cmd.Env = append(cmd.Env, v) } } cmd.Env = append(cmd.Env, "GOARCH=386") out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("go build failed: %v: %v", err, string(out)) } defer os.Remove(outfile) var p proc.Process switch testBackend { case "native": p, err = native.Launch([]string{outfile}, ".", false, []string{}) case "lldb": p, err = gdbserial.LLDBLaunch([]string{outfile}, ".", false, []string{}) default: t.Skip("test not valid for this backend") } switch err { case proc.ErrUnsupportedLinuxArch, proc.ErrUnsupportedWindowsArch, proc.ErrUnsupportedDarwinArch: // all good case nil: p.Detach(true) t.Fatal("Launch is expected to fail, but succeeded") default: t.Fatal(err) } } func TestIssue573(t *testing.T) { // calls to runtime.duffzero and runtime.duffcopy jump directly into the middle // of the function and the internal breakpoint set by StepInto may be missed. protest.AllowRecording(t) withTestProcess("issue573", t, func(p proc.Process, fixture protest.Fixture) { fentry, _ := proc.FindFunctionLocation(p, "main.foo", false, 0) _, err := p.SetBreakpoint(fentry, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Step(p), t, "Step() #1") assertNoError(proc.Step(p), t, "Step() #2") // Bug exits here. assertNoError(proc.Step(p), t, "Step() #3") // Third step ought to be possible; program ought not have exited. }) } func TestTestvariables2Prologue(t *testing.T) { withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { addrEntry, err := proc.FindFunctionLocation(p, "main.main", false, 0) assertNoError(err, t, "FindFunctionLocation - entrypoint") addrPrologue, err := proc.FindFunctionLocation(p, "main.main", true, 0) assertNoError(err, t, "FindFunctionLocation - postprologue") if addrEntry == addrPrologue { t.Fatalf("Prologue detection failed on testvariables2.go/main.main") } }) } func TestNextDeferReturnAndDirectCall(t *testing.T) { // Next should not step into a deferred function if it is called // directly, only if it is called through a panic or a deferreturn. // Here we test the case where the function is called by a deferreturn testseq("defercall", contNext, []nextTest{ {9, 10}, {10, 11}, {11, 12}, {12, 13}, {13, 28}}, "main.callAndDeferReturn", t) } func TestNextPanicAndDirectCall(t *testing.T) { // Next should not step into a deferred function if it is called // directly, only if it is called through a panic or a deferreturn. // Here we test the case where the function is called by a panic if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { testseq("defercall", contNext, []nextTest{ {15, 16}, {16, 17}, {17, 18}, {18, 6}}, "main.callAndPanic2", t) } else { testseq("defercall", contNext, []nextTest{ {15, 16}, {16, 17}, {17, 18}, {18, 5}}, "main.callAndPanic2", t) } } func TestStepCall(t *testing.T) { testseq("testnextprog", contStep, []nextTest{ {34, 13}, {13, 14}}, "", t) } func TestStepCallPtr(t *testing.T) { // Tests that Step works correctly when calling functions with a // function pointer. if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { testseq("teststepprog", contStep, []nextTest{ {9, 10}, {10, 6}, {6, 7}, {7, 11}}, "", t) } else { testseq("teststepprog", contStep, []nextTest{ {9, 10}, {10, 5}, {5, 6}, {6, 7}, {7, 11}}, "", t) } } func TestStepReturnAndPanic(t *testing.T) { // Tests that Step works correctly when returning from functions // and when a deferred function is called when panic'ing. switch { case goversion.VersionAfterOrEqual(runtime.Version(), 1, 11): testseq("defercall", contStep, []nextTest{ {17, 6}, {6, 7}, {7, 18}, {18, 6}, {6, 7}}, "", t) case goversion.VersionAfterOrEqual(runtime.Version(), 1, 10): testseq("defercall", contStep, []nextTest{ {17, 5}, {5, 6}, {6, 7}, {7, 18}, {18, 5}, {5, 6}, {6, 7}}, "", t) case goversion.VersionAfterOrEqual(runtime.Version(), 1, 9): testseq("defercall", contStep, []nextTest{ {17, 5}, {5, 6}, {6, 7}, {7, 17}, {17, 18}, {18, 5}, {5, 6}, {6, 7}}, "", t) default: testseq("defercall", contStep, []nextTest{ {17, 5}, {5, 6}, {6, 7}, {7, 18}, {18, 5}, {5, 6}, {6, 7}}, "", t) } } func TestStepDeferReturn(t *testing.T) { // Tests that Step works correctly when a deferred function is // called during a return. if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { testseq("defercall", contStep, []nextTest{ {11, 6}, {6, 7}, {7, 12}, {12, 13}, {13, 6}, {6, 7}, {7, 13}, {13, 28}}, "", t) } else { testseq("defercall", contStep, []nextTest{ {11, 5}, {5, 6}, {6, 7}, {7, 12}, {12, 13}, {13, 5}, {5, 6}, {6, 7}, {7, 13}, {13, 28}}, "", t) } } func TestStepIgnorePrivateRuntime(t *testing.T) { // Tests that Step will ignore calls to private runtime functions // (such as runtime.convT2E in this case) switch { case goversion.VersionAfterOrEqual(runtime.Version(), 1, 11): testseq("teststepprog", contStep, []nextTest{ {21, 14}, {14, 15}, {15, 22}}, "", t) case goversion.VersionAfterOrEqual(runtime.Version(), 1, 10): testseq("teststepprog", contStep, []nextTest{ {21, 13}, {13, 14}, {14, 15}, {15, 22}}, "", t) case goversion.VersionAfterOrEqual(runtime.Version(), 1, 7): testseq("teststepprog", contStep, []nextTest{ {21, 13}, {13, 14}, {14, 15}, {15, 14}, {14, 17}, {17, 22}}, "", t) default: testseq("teststepprog", contStep, []nextTest{ {21, 13}, {13, 14}, {14, 15}, {15, 17}, {17, 22}}, "", t) } } func TestIssue561(t *testing.T) { // Step fails to make progress when PC is at a CALL instruction // where a breakpoint is also set. protest.AllowRecording(t) withTestProcess("issue561", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 10) assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Step(p), t, "Step()") assertLineNumber(p, t, 5, "wrong line number after Step,") }) } func TestStepOut(t *testing.T) { testseq2(t, "testnextprog", "main.helloworld", []seqTest{{contContinue, 13}, {contStepout, 35}}) } func TestStepConcurrentDirect(t *testing.T) { protest.AllowRecording(t) withTestProcess("teststepconcurrent", t, func(p proc.Process, fixture protest.Fixture) { pc, err := proc.FindFileLocation(p, fixture.Source, 37) assertNoError(err, t, "FindFileLocation()") bp, err := p.SetBreakpoint(pc, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") _, err = p.ClearBreakpoint(bp.Addr) assertNoError(err, t, "ClearBreakpoint()") for _, b := range p.Breakpoints().M { if b.Name == proc.UnrecoveredPanic { _, err := p.ClearBreakpoint(b.Addr) assertNoError(err, t, "ClearBreakpoint(unrecovered-panic)") break } } gid := p.SelectedGoroutine().ID seq := []int{37, 38, 13, 15, 16, 38} i := 0 count := 0 for { anyerr := false if p.SelectedGoroutine().ID != gid { t.Errorf("Step switched to different goroutine %d %d\n", gid, p.SelectedGoroutine().ID) anyerr = true } f, ln := currentLineNumber(p, t) if ln != seq[i] { if i == 1 && ln == 40 { // loop exited break } frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20) if err != nil { t.Errorf("Could not get stacktrace of goroutine %d\n", p.SelectedGoroutine().ID) } else { t.Logf("Goroutine %d (thread: %d):", p.SelectedGoroutine().ID, p.CurrentThread().ThreadID()) for _, frame := range frames { t.Logf("\t%s:%d (%#x)", frame.Call.File, frame.Call.Line, frame.Current.PC) } } t.Errorf("Program did not continue at expected location (%d) %s:%d [i %d count %d]", seq[i], f, ln, i, count) anyerr = true } if anyerr { t.FailNow() } i = (i + 1) % len(seq) if i == 0 { count++ } assertNoError(proc.Step(p), t, "Step()") } if count != 100 { t.Fatalf("Program did not loop expected number of times: %d", count) } }) } func TestStepConcurrentPtr(t *testing.T) { protest.AllowRecording(t) withTestProcess("teststepconcurrent", t, func(p proc.Process, fixture protest.Fixture) { pc, err := proc.FindFileLocation(p, fixture.Source, 24) assertNoError(err, t, "FindFileLocation()") _, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") for _, b := range p.Breakpoints().M { if b.Name == proc.UnrecoveredPanic { _, err := p.ClearBreakpoint(b.Addr) assertNoError(err, t, "ClearBreakpoint(unrecovered-panic)") break } } kvals := map[int]int64{} count := 0 for { err := proc.Continue(p) _, exited := err.(proc.ErrProcessExited) if exited { break } assertNoError(err, t, "Continue()") f, ln := currentLineNumber(p, t) if ln != 24 { for _, th := range p.ThreadList() { t.Logf("thread %d stopped on breakpoint %v", th.ThreadID(), th.Breakpoint()) } curbp := p.CurrentThread().Breakpoint() t.Fatalf("Program did not continue at expected location (24): %s:%d %#x [%v] (gid %d count %d)", f, ln, currentPC(p, t), curbp, p.SelectedGoroutine().ID, count) } gid := p.SelectedGoroutine().ID kvar := evalVariable(p, t, "k") k, _ := constant.Int64Val(kvar.Value) if oldk, ok := kvals[gid]; ok { if oldk >= k { t.Fatalf("Goroutine %d did not make progress?", gid) } } kvals[gid] = k assertNoError(proc.Step(p), t, "Step()") for p.Breakpoints().HasInternalBreakpoints() { if p.SelectedGoroutine().ID == gid { t.Fatalf("step did not step into function call (but internal breakpoints still active?) (%d %d)", gid, p.SelectedGoroutine().ID) } assertNoError(proc.Continue(p), t, "Continue()") } if p.SelectedGoroutine().ID != gid { t.Fatalf("Step switched goroutines (wanted: %d got: %d)", gid, p.SelectedGoroutine().ID) } f, ln = assertLineNumber(p, t, 13, "Step did not step into function call") count++ if count > 50 { // this test could potentially go on for 10000 cycles, since that's // too slow we cut the execution after 50 cycles break } } if count == 0 { t.Fatalf("Breakpoint never hit") } }) } func TestStepOutDefer(t *testing.T) { protest.AllowRecording(t) withTestProcess("testnextdefer", t, func(p proc.Process, fixture protest.Fixture) { pc, err := proc.FindFileLocation(p, fixture.Source, 9) assertNoError(err, t, "FindFileLocation()") bp, err := p.SetBreakpoint(pc, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") p.ClearBreakpoint(bp.Addr) assertLineNumber(p, t, 9, "wrong line number") assertNoError(proc.StepOut(p), t, "StepOut()") f, l, _ := p.BinInfo().PCToLine(currentPC(p, t)) if f == fixture.Source || l == 6 { t.Fatalf("wrong location %s:%d, expected to end somewhere in runtime", f, l) } }) } func TestStepOutDeferReturnAndDirectCall(t *testing.T) { // StepOut should not step into a deferred function if it is called // directly, only if it is called through a panic. // Here we test the case where the function is called by a deferreturn testseq2(t, "defercall", "", []seqTest{ {contContinue, 11}, {contStepout, 28}}) } const maxInstructionLength uint64 = 15 func TestStepOnCallPtrInstr(t *testing.T) { protest.AllowRecording(t) withTestProcess("teststepprog", t, func(p proc.Process, fixture protest.Fixture) { pc, err := proc.FindFileLocation(p, fixture.Source, 10) assertNoError(err, t, "FindFileLocation()") _, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) assertNoError(err, t, "SetBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") found := false for { _, ln := currentLineNumber(p, t) if ln != 10 { break } regs, err := p.CurrentThread().Registers(false) assertNoError(err, t, "Registers()") pc := regs.PC() text, err := proc.Disassemble(p, nil, pc, pc+maxInstructionLength) assertNoError(err, t, "Disassemble()") if text[0].IsCall() { found = true break } assertNoError(p.StepInstruction(), t, "StepInstruction()") } if !found { t.Fatal("Could not find CALL instruction") } assertNoError(proc.Step(p), t, "Step()") if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { assertLineNumber(p, t, 6, "Step continued to wrong line,") } else { assertLineNumber(p, t, 5, "Step continued to wrong line,") } }) } func TestIssue594(t *testing.T) { if runtime.GOOS == "darwin" && testBackend == "lldb" { // debugserver will receive an EXC_BAD_ACCESS for this, at that point // there is no way to reconvert this exception into a unix signal and send // it to the process. // This is a bug in debugserver/lldb: // https://bugs.llvm.org//show_bug.cgi?id=22868 return } // Exceptions that aren't caused by breakpoints should be propagated // back to the target. // In particular the target should be able to cause a nil pointer // dereference panic and recover from it. protest.AllowRecording(t) withTestProcess("issue594", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") var f string var ln int if testBackend == "rr" { frame, err := findFirstNonRuntimeFrame(p) assertNoError(err, t, "findFirstNonRuntimeFrame") f, ln = frame.Current.File, frame.Current.Line } else { f, ln = currentLineNumber(p, t) } if ln != 21 { t.Fatalf("Program stopped at %s:%d, expected :21", f, ln) } }) } func TestStepOutPanicAndDirectCall(t *testing.T) { // StepOut should not step into a deferred function if it is called // directly, only if it is called through a panic. // Here we test the case where the function is called by a panic if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { testseq2(t, "defercall", "", []seqTest{ {contContinue, 17}, {contStepout, 6}}) } else { testseq2(t, "defercall", "", []seqTest{ {contContinue, 17}, {contStepout, 5}}) } } func TestWorkDir(t *testing.T) { wd := os.TempDir() // For Darwin `os.TempDir()` returns `/tmp` which is symlink to `/private/tmp`. if runtime.GOOS == "darwin" { wd = "/private/tmp" } protest.AllowRecording(t) withTestProcessArgs("workdir", t, wd, []string{}, 0, func(p proc.Process, fixture protest.Fixture) { addr, _, err := p.BinInfo().LineToPC(fixture.Source, 14) assertNoError(err, t, "LineToPC") p.SetBreakpoint(addr, proc.UserBreakpoint, nil) proc.Continue(p) v := evalVariable(p, t, "pwd") str := constant.StringVal(v.Value) if wd != str { t.Fatalf("Expected %s got %s\n", wd, str) } }) } func TestNegativeIntEvaluation(t *testing.T) { testcases := []struct { name string typ string value interface{} }{ {"ni8", "int8", int64(-5)}, {"ni16", "int16", int64(-5)}, {"ni32", "int32", int64(-5)}, } protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") for _, tc := range testcases { v := evalVariable(p, t, tc.name) if typ := v.RealType.String(); typ != tc.typ { t.Fatalf("Wrong type for variable %q: %q (expected: %q)", tc.name, typ, tc.typ) } if val, _ := constant.Int64Val(v.Value); val != tc.value { t.Fatalf("Wrong value for variable %q: %v (expected: %v)", tc.name, val, tc.value) } } }) } func TestIssue683(t *testing.T) { // Step panics when source file can not be found protest.AllowRecording(t) withTestProcess("issue683", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "First Continue()") for i := 0; i < 20; i++ { // eventually an error about the source file not being found will be // returned, the important thing is that we shouldn't panic err := proc.Step(p) if err != nil { break } } }) } func TestIssue664(t *testing.T) { protest.AllowRecording(t) withTestProcess("issue664", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 4) assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Next(p), t, "Next()") assertLineNumber(p, t, 5, "Did not continue to correct location,") }) } // Benchmarks (*Processs).Continue + (*Scope).FunctionArguments func BenchmarkTrace(b *testing.B) { protest.AllowRecording(b) withTestProcess("traceperf", b, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.PerfCheck") assertNoError(err, b, "setFunctionBreakpoint()") b.ResetTimer() for i := 0; i < b.N; i++ { assertNoError(proc.Continue(p), b, "Continue()") s, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, b, "Scope()") _, err = s.FunctionArguments(proc.LoadConfig{false, 0, 64, 0, 3, 0}) assertNoError(err, b, "FunctionArguments()") } b.StopTimer() }) } func TestNextInDeferReturn(t *testing.T) { // runtime.deferreturn updates the G struct in a way that for one // instruction leaves the curg._defer field non-nil but with curg._defer.fn // field being nil. // We need to deal with this without panicing. protest.AllowRecording(t) withTestProcess("defercall", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "runtime.deferreturn") assertNoError(err, t, "setFunctionBreakpoint(runtime.deferreturn)") assertNoError(proc.Continue(p), t, "First Continue()") // Set a breakpoint on the deferred function so that the following loop // can not step out of the runtime.deferreturn and all the way to the // point where the target program panics. _, err = setFunctionBreakpoint(p, "main.sampleFunction") assertNoError(err, t, "setFunctionBreakpoint(main.sampleFunction)") for i := 0; i < 20; i++ { loc, err := p.CurrentThread().Location() assertNoError(err, t, "CurrentThread().Location()") t.Logf("at %#x %s:%d", loc.PC, loc.File, loc.Line) if loc.Fn != nil && loc.Fn.Name == "main.sampleFunction" { break } assertNoError(proc.Next(p), t, fmt.Sprintf("Next() %d", i)) } }) } func getg(goid int, gs []*proc.G) *proc.G { for _, g := range gs { if g.ID == goid { return g } } return nil } func TestStacktraceWithBarriers(t *testing.T) { // Go's Garbage Collector will insert stack barriers into stacks. // This stack barrier is inserted by overwriting the return address for the // stack frame with the address of runtime.stackBarrier. // The original return address is saved into the stkbar slice inside the G // struct. // In Go 1.9 stack barriers have been removed and this test must be disabled. if ver, _ := goversion.Parse(runtime.Version()); ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { return } // In Go 1.8 stack barriers are not inserted by default, this enables them. godebugOld := os.Getenv("GODEBUG") defer os.Setenv("GODEBUG", godebugOld) os.Setenv("GODEBUG", "gcrescanstacks=1") withTestProcess("binarytrees", t, func(p proc.Process, fixture protest.Fixture) { // We want to get a user goroutine with a stack barrier, to get that we execute the program until runtime.gcInstallStackBarrier is executed AND the goroutine it was executed onto contains a call to main.bottomUpTree _, err := setFunctionBreakpoint(p, "runtime.gcInstallStackBarrier") assertNoError(err, t, "setFunctionBreakpoint()") stackBarrierGoids := []int{} for len(stackBarrierGoids) == 0 { err := proc.Continue(p) if _, exited := err.(proc.ErrProcessExited); exited { t.Logf("Could not run test") return } assertNoError(err, t, "Continue()") gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo()") for _, th := range p.ThreadList() { if bp := th.Breakpoint(); bp.Breakpoint == nil { continue } goidVar := evalVariable(p, t, "gp.goid") goid, _ := constant.Int64Val(goidVar.Value) if g := getg(int(goid), gs); g != nil { stack, err := g.Stacktrace(50, false) assertNoError(err, t, fmt.Sprintf("Stacktrace(goroutine = %d)", goid)) for _, frame := range stack { if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.bottomUpTree" { stackBarrierGoids = append(stackBarrierGoids, int(goid)) break } } } } } if len(stackBarrierGoids) == 0 { t.Fatalf("Could not find a goroutine with stack barriers") } t.Logf("stack barrier goids: %v\n", stackBarrierGoids) assertNoError(proc.StepOut(p), t, "StepOut()") gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo()") for _, goid := range stackBarrierGoids { g := getg(goid, gs) stack, err := g.Stacktrace(200, false) assertNoError(err, t, "Stacktrace()") // Check that either main.main or main.main.func1 appear in the // stacktrace of this goroutine, if we failed at resolving stack barriers // correctly the stacktrace will be truncated and neither main.main or // main.main.func1 will appear found := false for _, frame := range stack { if frame.Current.Fn == nil { continue } if name := frame.Current.Fn.Name; name == "main.main" || name == "main.main.func1" { found = true } } t.Logf("Stacktrace for %d:\n", goid) for _, frame := range stack { name := "<>" if frame.Current.Fn != nil { name = frame.Current.Fn.Name } t.Logf("\t%s [CFA: %x Ret: %x] at %s:%d", name, frame.Regs.CFA, frame.Ret, frame.Current.File, frame.Current.Line) } if !found { t.Logf("Truncated stacktrace for %d\n", goid) } } }) } func TestAttachDetach(t *testing.T) { if testBackend == "lldb" && runtime.GOOS == "linux" { bs, _ := ioutil.ReadFile("/proc/sys/kernel/yama/ptrace_scope") if bs == nil || strings.TrimSpace(string(bs)) != "0" { t.Logf("can not run TestAttachDetach: %v\n", bs) return } } if testBackend == "rr" { return } var buildFlags protest.BuildFlags if buildMode == "pie" { buildFlags |= protest.BuildModePIE } fixture := protest.BuildFixture("testnextnethttp", buildFlags) cmd := exec.Command(fixture.Path) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr assertNoError(cmd.Start(), t, "starting fixture") // wait for testnextnethttp to start listening t0 := time.Now() for { conn, err := net.Dial("tcp", "localhost:9191") if err == nil { conn.Close() break } time.Sleep(50 * time.Millisecond) if time.Since(t0) > 10*time.Second { t.Fatal("fixture did not start") } } var p proc.Process var err error switch testBackend { case "native": p, err = native.Attach(cmd.Process.Pid, []string{}) case "lldb": path := "" if runtime.GOOS == "darwin" { path = fixture.Path } p, err = gdbserial.LLDBAttach(cmd.Process.Pid, path, []string{}) default: err = fmt.Errorf("unknown backend %q", testBackend) } assertNoError(err, t, "Attach") go func() { time.Sleep(1 * time.Second) http.Get("http://localhost:9191") }() assertNoError(proc.Continue(p), t, "Continue") assertLineNumber(p, t, 11, "Did not continue to correct location,") assertNoError(p.Detach(false), t, "Detach") resp, err := http.Get("http://localhost:9191/nobp") assertNoError(err, t, "Page request after detach") bs, err := ioutil.ReadAll(resp.Body) assertNoError(err, t, "Reading /nobp page") if out := string(bs); !strings.Contains(out, "hello, world!") { t.Fatalf("/nobp page does not contain \"hello, world!\": %q", out) } cmd.Process.Kill() } func TestVarSum(t *testing.T) { protest.AllowRecording(t) withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") sumvar := evalVariable(p, t, "s1[0] + s1[1]") sumvarstr := constant.StringVal(sumvar.Value) if sumvarstr != "onetwo" { t.Fatalf("s1[0] + s1[1] == %q (expected \"onetwo\")", sumvarstr) } if sumvar.Len != int64(len(sumvarstr)) { t.Fatalf("sumvar.Len == %d (expected %d)", sumvar.Len, len(sumvarstr)) } }) } func TestPackageWithPathVar(t *testing.T) { protest.AllowRecording(t) withTestProcess("pkgrenames", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") evalVariable(p, t, "pkg.SomeVar") evalVariable(p, t, "pkg.SomeVar.X") }) } func TestEnvironment(t *testing.T) { protest.AllowRecording(t) os.Setenv("SOMEVAR", "bah") withTestProcess("testenv", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") v := evalVariable(p, t, "x") vv := constant.StringVal(v.Value) t.Logf("v = %q", vv) if vv != "bah" { t.Fatalf("value of v is %q (expected \"bah\")", vv) } }) } func getFrameOff(p proc.Process, t *testing.T) int64 { frameoffvar := evalVariable(p, t, "runtime.frameoff") frameoff, _ := constant.Int64Val(frameoffvar.Value) return frameoff } func TestRecursiveNext(t *testing.T) { protest.AllowRecording(t) testcases := []nextTest{ {6, 7}, {7, 10}, {10, 11}, {11, 17}, } testseq("increment", contNext, testcases, "main.Increment", t) withTestProcess("increment", t, func(p proc.Process, fixture protest.Fixture) { bp, err := setFunctionBreakpoint(p, "main.Increment") assertNoError(err, t, "setFunctionBreakpoint") assertNoError(proc.Continue(p), t, "Continue") _, err = p.ClearBreakpoint(bp.Addr) assertNoError(err, t, "ClearBreakpoint") assertNoError(proc.Next(p), t, "Next 1") assertNoError(proc.Next(p), t, "Next 2") assertNoError(proc.Next(p), t, "Next 3") frameoff0 := getFrameOff(p, t) assertNoError(proc.Step(p), t, "Step") frameoff1 := getFrameOff(p, t) if frameoff0 == frameoff1 { t.Fatalf("did not step into function?") } assertLineNumber(p, t, 6, "program did not continue to expected location,") assertNoError(proc.Next(p), t, "Next 4") assertLineNumber(p, t, 7, "program did not continue to expected location,") assertNoError(proc.StepOut(p), t, "StepOut") assertLineNumber(p, t, 11, "program did not continue to expected location,") frameoff2 := getFrameOff(p, t) if frameoff0 != frameoff2 { t.Fatalf("frame offset mismatch %x != %x", frameoff0, frameoff2) } }) } // TestIssue877 ensures that the environment variables starting with DYLD_ and LD_ // are passed when executing the binary on OSX via debugserver func TestIssue877(t *testing.T) { if runtime.GOOS != "darwin" && testBackend == "lldb" { return } if os.Getenv("TRAVIS") == "true" && runtime.GOOS == "darwin" { // Something changed on Travis side that makes the Go compiler fail if // DYLD_LIBRARY_PATH is set. t.Skip("broken") } const envval = "/usr/local/lib" os.Setenv("DYLD_LIBRARY_PATH", envval) withTestProcess("issue877", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") v := evalVariable(p, t, "dyldenv") vv := constant.StringVal(v.Value) t.Logf("v = %q", vv) if vv != envval { t.Fatalf("value of v is %q (expected %q)", vv, envval) } }) } func TestIssue893(t *testing.T) { // Test what happens when next is called immediately after launching the // executable, acceptable behaviors are: (a) no error, (b) no source at PC // error, (c) program runs to completion protest.AllowRecording(t) withTestProcess("increment", t, func(p proc.Process, fixture protest.Fixture) { err := proc.Next(p) if err == nil { return } if _, ok := err.(*frame.ErrNoFDEForPC); ok { return } if _, ok := err.(proc.ErrThreadBlocked); ok { return } if _, ok := err.(*proc.ErrNoSourceForPC); ok { return } if _, ok := err.(proc.ErrProcessExited); ok { return } assertNoError(err, t, "Next") }) } func TestStepInstructionNoGoroutine(t *testing.T) { protest.AllowRecording(t) withTestProcess("increment", t, func(p proc.Process, fixture protest.Fixture) { // Call StepInstruction immediately after launching the program, it should // work even though no goroutine is selected. assertNoError(p.StepInstruction(), t, "StepInstruction") }) } func TestIssue871(t *testing.T) { protest.AllowRecording(t) withTestProcess("issue871", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue") var scope *proc.EvalScope var err error if testBackend == "rr" { var frame proc.Stackframe frame, err = findFirstNonRuntimeFrame(p) if err == nil { scope = proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, frame) } } else { scope, err = proc.GoroutineScope(p.CurrentThread()) } assertNoError(err, t, "scope") locals, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables") foundA, foundB := false, false for _, v := range locals { t.Logf("local %v", v) switch v.Name { case "a": foundA = true if v.Flags&proc.VariableEscaped == 0 { t.Errorf("variable a not flagged as escaped") } case "b": foundB = true } } if !foundA { t.Errorf("variable a not found") } if !foundB { t.Errorf("variable b not found") } }) } func TestShadowedFlag(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { return } withTestProcess("testshadow", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope") locals, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables") foundShadowed := false foundNonShadowed := false for _, v := range locals { if v.Flags&proc.VariableShadowed != 0 { if v.Name != "a" { t.Errorf("wrong shadowed variable %s", v.Name) } foundShadowed = true if n, _ := constant.Int64Val(v.Value); n != 0 { t.Errorf("wrong value for shadowed variable a: %d", n) } } else { if v.Name != "a" { t.Errorf("wrong non-shadowed variable %s", v.Name) } foundNonShadowed = true if n, _ := constant.Int64Val(v.Value); n != 1 { t.Errorf("wrong value for non-shadowed variable a: %d", n) } } } if !foundShadowed { t.Error("could not find any shadowed variable") } if !foundNonShadowed { t.Error("could not find any non-shadowed variable") } }) } func TestAttachStripped(t *testing.T) { if testBackend == "lldb" && runtime.GOOS == "linux" { bs, _ := ioutil.ReadFile("/proc/sys/kernel/yama/ptrace_scope") if bs == nil || strings.TrimSpace(string(bs)) != "0" { t.Logf("can not run TestAttachStripped: %v\n", bs) return } } if testBackend == "rr" { return } if runtime.GOOS == "darwin" { t.Log("-s does not produce stripped executables on macOS") return } if buildMode != "" { t.Skip("not enabled with buildmode=PIE") } fixture := protest.BuildFixture("testnextnethttp", protest.LinkStrip) cmd := exec.Command(fixture.Path) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr assertNoError(cmd.Start(), t, "starting fixture") // wait for testnextnethttp to start listening t0 := time.Now() for { conn, err := net.Dial("tcp", "localhost:9191") if err == nil { conn.Close() break } time.Sleep(50 * time.Millisecond) if time.Since(t0) > 10*time.Second { t.Fatal("fixture did not start") } } var p proc.Process var err error switch testBackend { case "native": p, err = native.Attach(cmd.Process.Pid, []string{}) case "lldb": path := "" if runtime.GOOS == "darwin" { path = fixture.Path } p, err = gdbserial.LLDBAttach(cmd.Process.Pid, path, []string{}) default: t.Fatalf("unknown backend %q", testBackend) } t.Logf("error is %v", err) if err == nil { p.Detach(true) t.Fatalf("expected error after attach, got nothing") } else { cmd.Process.Kill() } os.Remove(fixture.Path) } func TestIssue844(t *testing.T) { // Conditional breakpoints should not prevent next from working if their // condition isn't met. withTestProcess("nextcond", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 9) condbp := setFileBreakpoint(p, t, fixture, 10) condbp.Cond = &ast.BinaryExpr{ Op: token.EQL, X: &ast.Ident{Name: "n"}, Y: &ast.BasicLit{Kind: token.INT, Value: "11"}, } assertNoError(proc.Continue(p), t, "Continue") assertNoError(proc.Next(p), t, "Next") assertLineNumber(p, t, 10, "continued to wrong location,") }) } func logStacktrace(t *testing.T, bi *proc.BinaryInfo, frames []proc.Stackframe) { for j := range frames { name := "?" if frames[j].Current.Fn != nil { name = frames[j].Current.Fn.Name } t.Logf("\t%#x %#x %#x %s at %s:%d\n", frames[j].Call.PC, frames[j].FrameOffset(), frames[j].FramePointerOffset(), name, filepath.Base(frames[j].Call.File), frames[j].Call.Line) if frames[j].TopmostDefer != nil { f, l, fn := bi.PCToLine(frames[j].TopmostDefer.DeferredPC) fnname := "" if fn != nil { fnname = fn.Name } t.Logf("\t\ttopmost defer: %#x %s at %s:%d\n", frames[j].TopmostDefer.DeferredPC, fnname, f, l) } for deferIdx, _defer := range frames[j].Defers { f, l, fn := bi.PCToLine(_defer.DeferredPC) fnname := "" if fn != nil { fnname = fn.Name } t.Logf("\t\t%d defer: %#x %s at %s:%d\n", deferIdx, _defer.DeferredPC, fnname, f, l) } } } // stacktraceCheck checks that all the functions listed in tc appear in // frames in the same order. // Checks that all the functions in tc starting with "C." or with "!" are in // a systemstack frame. // Returns a slice m where m[i] is the index in frames of the function tc[i] // or nil if any check fails. func stacktraceCheck(t *testing.T, tc []string, frames []proc.Stackframe) []int { m := make([]int, len(tc)) i, j := 0, 0 for i < len(tc) { tcname := tc[i] tcsystem := strings.HasPrefix(tcname, "C.") if tcname[0] == '!' { tcsystem = true tcname = tcname[1:] } for j < len(frames) { name := "?" if frames[j].Current.Fn != nil { name = frames[j].Current.Fn.Name } if name == tcname { m[i] = j if tcsystem != frames[j].SystemStack { t.Logf("system stack check failed for frame %d (expected %v got %v)", j, tcsystem, frames[j].SystemStack) t.Logf("expected: %v\n", tc) return nil } break } j++ } if j >= len(frames) { t.Logf("couldn't find frame %d %s", i, tc) t.Logf("expected: %v\n", tc) return nil } i++ } return m } func frameInFile(frame proc.Stackframe, file string) bool { for _, loc := range []proc.Location{frame.Current, frame.Call} { if !strings.HasSuffix(loc.File, "/"+file) && !strings.HasSuffix(loc.File, "\\"+file) { return false } if loc.Line <= 0 { return false } } return true } func TestCgoStacktrace(t *testing.T) { if runtime.GOOS == "windows" { ver, _ := goversion.Parse(runtime.Version()) if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { t.Skip("disabled on windows with go before version 1.9") } } if runtime.GOOS == "darwin" { ver, _ := goversion.Parse(runtime.Version()) if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 8, -1, 0, 0, ""}) { t.Skip("disabled on macOS with go before version 1.8") } } // Tests that: // a) we correctly identify the goroutine while we are executing cgo code // b) that we can stitch together the system stack (where cgo code // executes) and the normal goroutine stack // Each test case describes how the stack trace should appear after a // continue. The first function on each test case is the topmost function // that should be found on the stack, the actual stack trace can have more // frame than those listed here but all the frames listed must appear in // the specified order. testCases := [][]string{ []string{"main.main"}, []string{"C.helloworld_pt2", "C.helloworld", "main.main"}, []string{"main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"}, []string{"C.helloworld_pt4", "C.helloworld_pt3", "main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"}, []string{"main.helloWorld2", "C.helloworld_pt4", "C.helloworld_pt3", "main.helloWorldS", "main.helloWorld", "C.helloworld_pt2", "C.helloworld", "main.main"}} var gid int frameOffs := map[string]int64{} framePointerOffs := map[string]int64{} withTestProcess("cgostacktest/", t, func(p proc.Process, fixture protest.Fixture) { for itidx, tc := range testCases { assertNoError(proc.Continue(p), t, fmt.Sprintf("Continue at iteration step %d", itidx)) g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, fmt.Sprintf("GetG at iteration step %d", itidx)) if itidx == 0 { gid = g.ID } else { if gid != g.ID { t.Fatalf("wrong goroutine id at iteration step %d (expected %d got %d)", itidx, gid, g.ID) } } frames, err := g.Stacktrace(100, false) assertNoError(err, t, fmt.Sprintf("Stacktrace at iteration step %d", itidx)) t.Logf("iteration step %d", itidx) logStacktrace(t, p.BinInfo(), frames) m := stacktraceCheck(t, tc, frames) mismatch := (m == nil) for i, j := range m { if strings.HasPrefix(tc[i], "C.hellow") { if !frameInFile(frames[j], "hello.c") { t.Logf("position in %q is %s:%d (call %s:%d)", tc[i], frames[j].Current.File, frames[j].Current.Line, frames[j].Call.File, frames[j].Call.Line) mismatch = true break } } if frameOff, ok := frameOffs[tc[i]]; ok { if frameOff != frames[j].FrameOffset() { t.Logf("frame %s offset mismatch", tc[i]) } if framePointerOffs[tc[i]] != frames[j].FramePointerOffset() { t.Logf("frame %s pointer offset mismatch", tc[i]) } } else { frameOffs[tc[i]] = frames[j].FrameOffset() framePointerOffs[tc[i]] = frames[j].FramePointerOffset() } } // also check that ThreadStacktrace produces the same list of frames threadFrames, err := proc.ThreadStacktrace(p.CurrentThread(), 100) assertNoError(err, t, fmt.Sprintf("ThreadStacktrace at iteration step %d", itidx)) if len(threadFrames) != len(frames) { mismatch = true } else { for j := range frames { if frames[j].Current.File != threadFrames[j].Current.File || frames[j].Current.Line != threadFrames[j].Current.Line { t.Logf("stack mismatch between goroutine stacktrace and thread stacktrace") t.Logf("thread stacktrace:") logStacktrace(t, p.BinInfo(), threadFrames) mismatch = true break } } } if mismatch { t.Fatal("see previous loglines") } } }) } func TestCgoSources(t *testing.T) { if runtime.GOOS == "windows" { ver, _ := goversion.Parse(runtime.Version()) if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { t.Skip("disabled on windows with go before version 1.9") } } withTestProcess("cgostacktest/", t, func(p proc.Process, fixture protest.Fixture) { sources := p.BinInfo().Sources for _, needle := range []string{"main.go", "hello.c"} { found := false for _, k := range sources { if strings.HasSuffix(k, "/"+needle) || strings.HasSuffix(k, "\\"+needle) { found = true break } } if !found { t.Errorf("File %s not found", needle) } } }) } func TestSystemstackStacktrace(t *testing.T) { // check that we can follow a stack switch initiated by runtime.systemstack() withTestProcess("panic", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "runtime.startpanic_m") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "first continue") assertNoError(proc.Continue(p), t, "second continue") g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG") frames, err := g.Stacktrace(100, false) assertNoError(err, t, "stacktrace") logStacktrace(t, p.BinInfo(), frames) m := stacktraceCheck(t, []string{"!runtime.startpanic_m", "runtime.gopanic", "main.main"}, frames) if m == nil { t.Fatal("see previous loglines") } }) } func TestSystemstackOnRuntimeNewstack(t *testing.T) { // The bug being tested here manifests as follows: // - set a breakpoint somewhere or interrupt the program with Ctrl-C // - try to look at stacktraces of other goroutines // If one of the other goroutines is resizing its own stack the stack // command won't work for it. withTestProcess("binarytrees", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, "setFunctionBreakpoint(main.main)") assertNoError(proc.Continue(p), t, "first continue") g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG") mainGoroutineID := g.ID _, err = setFunctionBreakpoint(p, "runtime.newstack") assertNoError(err, t, "setFunctionBreakpoint(runtime.newstack)") for { assertNoError(proc.Continue(p), t, "second continue") g, err = proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG") if g.ID == mainGoroutineID { break } } frames, err := g.Stacktrace(100, false) assertNoError(err, t, "stacktrace") logStacktrace(t, p.BinInfo(), frames) m := stacktraceCheck(t, []string{"!runtime.newstack", "main.main"}, frames) if m == nil { t.Fatal("see previous loglines") } }) } func TestIssue1034(t *testing.T) { // The external linker on macOS produces an abbrev for DW_TAG_subprogram // without the "has children" flag, we should support this. withTestProcess("cgostacktest/", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") frames, err := p.SelectedGoroutine().Stacktrace(10, false) assertNoError(err, t, "Stacktrace") scope := proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, frames[2:]...) args, _ := scope.FunctionArguments(normalLoadConfig) assertNoError(err, t, "FunctionArguments()") if len(args) > 0 { t.Fatalf("wrong number of arguments for frame %v (%d)", frames[2], len(args)) } }) } func TestIssue1008(t *testing.T) { // The external linker on macOS inserts "end of sequence" extended opcodes // in debug_line. which we should support correctly. withTestProcess("cgostacktest/", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.main") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") loc, err := p.CurrentThread().Location() assertNoError(err, t, "CurrentThread().Location()") t.Logf("location %v\n", loc) if !strings.HasSuffix(loc.File, "/main.go") { t.Errorf("unexpected location %s:%d\n", loc.File, loc.Line) } if loc.Line > 31 { t.Errorf("unexpected location %s:%d (file only has 30 lines)\n", loc.File, loc.Line) } }) } func TestDeclLine(t *testing.T) { ver, _ := goversion.Parse(runtime.Version()) if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { t.Skip("go 1.9 and prior versions do not emit DW_AT_decl_line") } withTestProcess("decllinetest", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope (1)") vars, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables (1)") if len(vars) != 1 { t.Fatalf("wrong number of variables %d", len(vars)) } assertNoError(proc.Continue(p), t, "Continue") scope, err = proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope (2)") scope.LocalVariables(normalLoadConfig) vars, err = scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables (2)") if len(vars) != 2 { t.Fatalf("wrong number of variables %d", len(vars)) } }) } func TestIssue1137(t *testing.T) { withTestProcess("dotpackagesiface", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") v := evalVariable(p, t, "iface") assertNoError(v.Unreadable, t, "iface unreadable") v2 := evalVariable(p, t, "iface2") assertNoError(v2.Unreadable, t, "iface2 unreadable") }) } func TestIssue1101(t *testing.T) { // If a breakpoint is hit close to process death on a thread that isn't the // group leader the process could die while we are trying to stop it. // // This can be easily reproduced by having the goroutine that's executing // main.main (which will almost always run on the thread group leader) wait // for a second goroutine before exiting, then setting a breakpoint on the // second goroutine and stepping through it (see TestIssue1101 in // proc_test.go). // // When stepping over the return instruction of main.f the deferred // wg.Done() call will be executed which will cause the main goroutine to // resume and proceed to exit. Both the temporary breakpoint on wg.Done and // the temporary breakpoint on the return address of main.f will be in // close proximity to main.main calling os.Exit() and causing the death of // the thread group leader. withTestProcess("issue1101", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.f") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Next(p), t, "Next() 1") assertNoError(proc.Next(p), t, "Next() 2") lastCmd := "Next() 3" exitErr := proc.Next(p) if exitErr == nil { lastCmd = "final Continue()" exitErr = proc.Continue(p) } if pexit, exited := exitErr.(proc.ErrProcessExited); exited { if pexit.Status != 2 && testBackend != "lldb" { // looks like there's a bug with debugserver on macOS that sometimes // will report exit status 0 instead of the proper exit status. t.Fatalf("process exited status %d (expected 2)", pexit.Status) } } else { assertNoError(exitErr, t, lastCmd) t.Fatalf("process did not exit after %s", lastCmd) } }) } func TestIssue1145(t *testing.T) { withTestProcess("sleep", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 18) assertNoError(proc.Continue(p), t, "Continue()") resumeChan := make(chan struct{}, 1) p.ResumeNotify(resumeChan) go func() { <-resumeChan time.Sleep(100 * time.Millisecond) p.RequestManualStop() }() assertNoError(proc.Next(p), t, "Next()") if p.Breakpoints().HasInternalBreakpoints() { t.Fatal("has internal breakpoints after manual stop request") } }) } func TestDisassembleGlobalVars(t *testing.T) { withTestProcess("teststepconcurrent", t, func(p proc.Process, fixture protest.Fixture) { mainfn := p.BinInfo().LookupFunc["main.main"] text, err := proc.Disassemble(p, nil, mainfn.Entry, mainfn.End) assertNoError(err, t, "Disassemble") found := false for i := range text { if strings.Index(text[i].Text(proc.IntelFlavour, p.BinInfo()), "main.v") > 0 { found = true break } } if !found { t.Fatalf("could not find main.v reference in disassembly") } }) } func checkFrame(frame proc.Stackframe, fnname, file string, line int, inlined bool) error { if frame.Call.Fn == nil || frame.Call.Fn.Name != fnname { return fmt.Errorf("wrong function name: %s", fnname) } if frame.Call.File != file || frame.Call.Line != line { return fmt.Errorf("wrong file:line %s:%d", frame.Call.File, frame.Call.Line) } if frame.Inlined != inlined { if inlined { return fmt.Errorf("not inlined") } else { return fmt.Errorf("inlined") } } return nil } func TestInlinedStacktraceAndVariables(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } firstCallCheck := &scopeCheck{ line: 7, ok: false, varChecks: []varCheck{ varCheck{ name: "a", typ: "int", kind: reflect.Int, hasVal: true, intVal: 3, }, varCheck{ name: "z", typ: "int", kind: reflect.Int, hasVal: true, intVal: 9, }, }, } secondCallCheck := &scopeCheck{ line: 7, ok: false, varChecks: []varCheck{ varCheck{ name: "a", typ: "int", kind: reflect.Int, hasVal: true, intVal: 4, }, varCheck{ name: "z", typ: "int", kind: reflect.Int, hasVal: true, intVal: 16, }, }, } withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining, func(p proc.Process, fixture protest.Fixture) { pcs := p.BinInfo().AllPCsForFileLine(fixture.Source, 7) if len(pcs) < 2 { t.Fatalf("expected at least two locations for %s:%d (got %d: %#x)", fixture.Source, 6, len(pcs), pcs) } for _, pc := range pcs { t.Logf("setting breakpoint at %#x\n", pc) _, err := p.SetBreakpoint(pc, proc.UserBreakpoint, nil) assertNoError(err, t, fmt.Sprintf("SetBreakpoint(%#x)", pc)) } // first inlined call assertNoError(proc.Continue(p), t, "Continue") frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20) assertNoError(err, t, "ThreadStacktrace") t.Logf("Stacktrace:\n") for i := range frames { t.Logf("\t%s at %s:%d (%#x)\n", frames[i].Call.Fn.Name, frames[i].Call.File, frames[i].Call.Line, frames[i].Current.PC) } if err := checkFrame(frames[0], "main.inlineThis", fixture.Source, 7, true); err != nil { t.Fatalf("Wrong frame 0: %v", err) } if err := checkFrame(frames[1], "main.main", fixture.Source, 18, false); err != nil { t.Fatalf("Wrong frame 1: %v", err) } if avar, _ := constant.Int64Val(evalVariable(p, t, "a").Value); avar != 3 { t.Fatalf("value of 'a' variable is not 3 (%d)", avar) } if zvar, _ := constant.Int64Val(evalVariable(p, t, "z").Value); zvar != 9 { t.Fatalf("value of 'z' variable is not 9 (%d)", zvar) } if _, ok := firstCallCheck.checkLocalsAndArgs(p, t); !ok { t.Fatalf("exiting for past errors") } // second inlined call assertNoError(proc.Continue(p), t, "Continue") frames, err = proc.ThreadStacktrace(p.CurrentThread(), 20) assertNoError(err, t, "ThreadStacktrace (2)") t.Logf("Stacktrace 2:\n") for i := range frames { t.Logf("\t%s at %s:%d (%#x)\n", frames[i].Call.Fn.Name, frames[i].Call.File, frames[i].Call.Line, frames[i].Current.PC) } if err := checkFrame(frames[0], "main.inlineThis", fixture.Source, 7, true); err != nil { t.Fatalf("Wrong frame 0: %v", err) } if err := checkFrame(frames[1], "main.main", fixture.Source, 19, false); err != nil { t.Fatalf("Wrong frame 1: %v", err) } if avar, _ := constant.Int64Val(evalVariable(p, t, "a").Value); avar != 4 { t.Fatalf("value of 'a' variable is not 3 (%d)", avar) } if zvar, _ := constant.Int64Val(evalVariable(p, t, "z").Value); zvar != 16 { t.Fatalf("value of 'z' variable is not 9 (%d)", zvar) } if bvar, err := evalVariableOrError(p, "b"); err == nil { t.Fatalf("expected error evaluating 'b', but it succeeded instead: %v", bvar) } if _, ok := secondCallCheck.checkLocalsAndArgs(p, t); !ok { t.Fatalf("exiting for past errors") } }) } func TestInlineStep(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{ {contContinue, 18}, {contStep, 6}, {contStep, 7}, {contStep, 18}, {contStep, 19}, }) } func TestInlineNext(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{ {contContinue, 18}, {contStep, 6}, {contNext, 7}, {contNext, 18}, {contNext, 19}, }) } func TestInlineStepOver(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{ {contContinue, 18}, {contNext, 19}, {contNext, 20}, }) } func TestInlineStepOut(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } testseq2Args(".", []string{}, protest.EnableInlining, t, "testinline", "", []seqTest{ {contContinue, 18}, {contStep, 6}, {contStepout, 18}, }) } func TestInlineFunctionList(t *testing.T) { // We should be able to list all functions, even inlined ones. if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p proc.Process, fixture protest.Fixture) { var found bool for _, fn := range p.BinInfo().Functions { if strings.Contains(fn.Name, "inlineThis") { found = true break } } if !found { t.Fatal("inline function not returned") } }) } func TestInlineBreakpoint(t *testing.T) { // We should be able to set a breakpoint on the call site of an inlined function. if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { // Versions of go before 1.10 do not have DWARF information for inlined calls t.Skip("inlining not supported") } withTestProcessArgs("testinline", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p proc.Process, fixture protest.Fixture) { pc, fn, err := p.BinInfo().LineToPC(fixture.Source, 17) if pc == 0 { t.Fatal("unable to get PC for inlined function call") } expectedFn := "main.main" if fn.Name != expectedFn { t.Fatalf("incorrect function returned, expected %s, got %s", expectedFn, fn.Name) } _, err = p.SetBreakpoint(pc, proc.UserBreakpoint, nil) if err != nil { t.Fatalf("unable to set breakpoint: %v", err) } }) } func TestIssue951(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 9, -1, 0, 0, ""}) { t.Skip("scopes not implemented in <=go1.8") } withTestProcess("issue951", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope") args, err := scope.FunctionArguments(normalLoadConfig) assertNoError(err, t, "FunctionArguments") t.Logf("%#v", args[0]) if args[0].Flags&proc.VariableShadowed == 0 { t.Error("argument is not shadowed") } vars, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables") shadowed, notShadowed := 0, 0 for i := range vars { t.Logf("var %d: %#v\n", i, vars[i]) if vars[i].Flags&proc.VariableShadowed != 0 { shadowed++ } else { notShadowed++ } } if shadowed != 1 || notShadowed != 1 { t.Errorf("Wrong number of shadowed/non-shadowed local variables: %d %d", shadowed, notShadowed) } }) } func TestDWZCompression(t *testing.T) { // If dwz is not available in the system, skip this test if _, err := exec.LookPath("dwz"); err != nil { t.Skip("dwz not installed") } withTestProcessArgs("dwzcompression", t, ".", []string{}, protest.EnableDWZCompression, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "C.fortytwo") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "first Continue()") val := evalVariable(p, t, "stdin") if val.RealType == nil { t.Errorf("Can't find type for \"stdin\" global variable") } }) } func TestMapLoadConfigWithReslice(t *testing.T) { // Check that load configuration is respected for resliced maps. withTestProcess("testvariables2", t, func(p proc.Process, fixture protest.Fixture) { zolotovLoadCfg := proc.LoadConfig{FollowPointers: true, MaxStructFields: -1, MaxVariableRecurse: 3, MaxStringLen: 10, MaxArrayValues: 10} assertNoError(proc.Continue(p), t, "First Continue()") scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope") m1, err := scope.EvalExpression("m1", zolotovLoadCfg) assertNoError(err, t, "EvalVariable") t.Logf("m1 returned children %d (%d)", len(m1.Children)/2, m1.Len) expr := fmt.Sprintf("(*(*%q)(%d))[10:]", m1.DwarfType.String(), m1.Addr) t.Logf("expr %q\n", expr) m1cont, err := scope.EvalExpression(expr, zolotovLoadCfg) assertNoError(err, t, "EvalVariable") t.Logf("m1cont returned children %d", len(m1cont.Children)/2) if len(m1cont.Children) != 20 { t.Fatalf("wrong number of children returned %d\n", len(m1cont.Children)/2) } }) } func TestStepOutReturn(t *testing.T) { ver, _ := goversion.Parse(runtime.Version()) if ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) { t.Skip("return variables aren't marked on 1.9 or earlier") } withTestProcess("stepoutret", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.stepout") assertNoError(err, t, "SetBreakpoint") assertNoError(proc.Continue(p), t, "Continue") assertNoError(proc.StepOut(p), t, "StepOut") ret := p.CurrentThread().Common().ReturnValues(normalLoadConfig) if len(ret) != 2 { t.Fatalf("wrong number of return values %v", ret) } stridx := 0 numidx := 1 if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 12) { // in 1.11 and earlier the order of return values in DWARF is // unspecified, in 1.11 and later it follows the order of definition // specified by the user for i := range ret { if ret[i].Name == "str" { stridx = i numidx = 1 - i break } } } if ret[stridx].Name != "str" { t.Fatalf("(str) bad return value name %s", ret[stridx].Name) } if ret[stridx].Kind != reflect.String { t.Fatalf("(str) bad return value kind %v", ret[stridx].Kind) } if s := constant.StringVal(ret[stridx].Value); s != "return 47" { t.Fatalf("(str) bad return value %q", s) } if ret[numidx].Name != "num" { t.Fatalf("(num) bad return value name %s", ret[numidx].Name) } if ret[numidx].Kind != reflect.Int { t.Fatalf("(num) bad return value kind %v", ret[numidx].Kind) } if n, _ := constant.Int64Val(ret[numidx].Value); n != 48 { t.Fatalf("(num) bad return value %d", n) } }) } func TestOptimizationCheck(t *testing.T) { withTestProcess("continuetestprog", t, func(p proc.Process, fixture protest.Fixture) { fn := p.BinInfo().LookupFunc["main.main"] if fn.Optimized() { t.Fatalf("main.main is optimized") } }) if goversion.VersionAfterOrEqual(runtime.Version(), 1, 10) { withTestProcessArgs("continuetestprog", t, ".", []string{}, protest.EnableOptimization|protest.EnableInlining, func(p proc.Process, fixture protest.Fixture) { fn := p.BinInfo().LookupFunc["main.main"] if !fn.Optimized() { t.Fatalf("main.main is not optimized") } }) } } func TestIssue1264(t *testing.T) { // It should be possible to set a breakpoint condition that consists only // of evaluating a single boolean variable. withTestProcess("issue1264", t, func(p proc.Process, fixture protest.Fixture) { bp := setFileBreakpoint(p, t, fixture, 8) bp.Cond = &ast.Ident{Name: "equalsTwo"} assertNoError(proc.Continue(p), t, "Continue()") assertLineNumber(p, t, 8, "after continue") }) } func TestReadDefer(t *testing.T) { withTestProcess("deferstack", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue") frames, err := p.SelectedGoroutine().Stacktrace(10, true) assertNoError(err, t, "Stacktrace") logStacktrace(t, p.BinInfo(), frames) examples := []struct { frameIdx int topmostDefer string defers []string }{ // main.call3 (defers nothing, topmost defer main.f2) {0, "main.f2", []string{}}, // main.call2 (defers main.f2, main.f3, topmost defer main.f2) {1, "main.f2", []string{"main.f2", "main.f3"}}, // main.call1 (defers main.f1, main.f2, topmost defer main.f1) {2, "main.f1", []string{"main.f1", "main.f2"}}, // main.main (defers nothing) {3, "", []string{}}} defercheck := func(d *proc.Defer, deferName, tgt string, frameIdx int) { if d == nil { t.Fatalf("expected %q as %s of frame %d, got nothing", tgt, deferName, frameIdx) } if d.Unreadable != nil { t.Fatalf("expected %q as %s of frame %d, got unreadable defer: %v", tgt, deferName, frameIdx, d.Unreadable) } _, _, dfn := p.BinInfo().PCToLine(d.DeferredPC) if dfn == nil { t.Fatalf("expected %q as %s of frame %d, got %#x", tgt, deferName, frameIdx, d.DeferredPC) } if dfn.Name != tgt { t.Fatalf("expected %q as %s of frame %d, got %q", tgt, deferName, frameIdx, dfn.Name) } } for _, example := range examples { frame := &frames[example.frameIdx] if example.topmostDefer != "" { defercheck(frame.TopmostDefer, "topmost defer", example.topmostDefer, example.frameIdx) } if len(example.defers) != len(frames[example.frameIdx].Defers) { t.Fatalf("expected %d defers for %d, got %v", len(example.defers), example.frameIdx, frame.Defers) } for deferIdx := range example.defers { defercheck(frame.Defers[deferIdx], fmt.Sprintf("defer %d", deferIdx), example.defers[deferIdx], example.frameIdx) } } }) } func TestNextUnknownInstr(t *testing.T) { if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 10) { t.Skip("versions of Go before 1.10 can't assemble the instruction VPUNPCKLWD") } withTestProcess("nodisasm/", t, func(p proc.Process, fixture protest.Fixture) { _, err := setFunctionBreakpoint(p, "main.asmFunc") assertNoError(err, t, "setFunctionBreakpoint()") assertNoError(proc.Continue(p), t, "Continue()") assertNoError(proc.Next(p), t, "Next()") }) } func TestReadDeferArgs(t *testing.T) { var tests = []struct { frame, deferCall int a, b int64 }{ {1, 1, 42, 61}, {2, 2, 1, -1}, } withTestProcess("deferstack", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") for _, test := range tests { scope, err := proc.ConvertEvalScope(p, -1, test.frame, test.deferCall) assertNoError(err, t, fmt.Sprintf("ConvertEvalScope(-1, %d, %d)", test.frame, test.deferCall)) if scope.Fn.Name != "main.f2" { t.Fatalf("expected function \"main.f2\" got %q", scope.Fn.Name) } avar, err := scope.EvalVariable("a", normalLoadConfig) if err != nil { t.Fatal(err) } bvar, err := scope.EvalVariable("b", normalLoadConfig) if err != nil { t.Fatal(err) } a, _ := constant.Int64Val(avar.Value) b, _ := constant.Int64Val(bvar.Value) if a != test.a { t.Errorf("value of argument 'a' at frame %d, deferred call %d: %d (expected %d)", test.frame, test.deferCall, a, test.a) } if b != test.b { t.Errorf("value of argument 'b' at frame %d, deferred call %d: %d (expected %d)", test.frame, test.deferCall, b, test.b) } } }) } func TestIssue1374(t *testing.T) { // Continue did not work when stopped at a breakpoint immediately after calling CallFunction. protest.MustSupportFunctionCalls(t, testBackend) withTestProcess("issue1374", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 7) assertNoError(proc.Continue(p), t, "First Continue") assertLineNumber(p, t, 7, "Did not continue to correct location (first continue),") assertNoError(proc.CallFunction(p, "getNum()", &normalLoadConfig, true), t, "Call") err := proc.Continue(p) if _, isexited := err.(proc.ErrProcessExited); !isexited { regs, _ := p.CurrentThread().Registers(false) f, l, _ := p.BinInfo().PCToLine(regs.PC()) t.Fatalf("expected process exited error got %v at %s:%d", err, f, l) } }) } func TestIssue1432(t *testing.T) { // Check that taking the address of a struct, casting it into a pointer to // the struct's type and then accessing a member field will still: // - perform auto-dereferencing on struct member access // - yield a Variable that's ultimately assignable (i.e. has an address) withTestProcess("issue1432", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue") svar := evalVariable(p, t, "s") t.Logf("%#x", svar.Addr) scope, err := proc.GoroutineScope(p.CurrentThread()) assertNoError(err, t, "GoroutineScope()") err = scope.SetVariable(fmt.Sprintf("(*\"main.s\")(%#x).i", svar.Addr), "10") assertNoError(err, t, "SetVariable") }) } func TestGoroutinesInfoLimit(t *testing.T) { withTestProcess("teststepconcurrent", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 37) assertNoError(proc.Continue(p), t, "Continue()") gcount := 0 nextg := 0 const goroutinesInfoLimit = 10 for nextg >= 0 { oldnextg := nextg var gs []*proc.G var err error gs, nextg, err = proc.GoroutinesInfo(p, nextg, goroutinesInfoLimit) assertNoError(err, t, fmt.Sprintf("GoroutinesInfo(%d, %d)", oldnextg, goroutinesInfoLimit)) gcount += len(gs) t.Logf("got %d goroutines\n", len(gs)) } t.Logf("number of goroutines: %d\n", gcount) gs, _, err := proc.GoroutinesInfo(p, 0, 0) assertNoError(err, t, "GoroutinesInfo(0, 0)") t.Logf("number of goroutines (full scan): %d\n", gcount) if len(gs) != gcount { t.Fatalf("mismatch in the number of goroutines %d %d\n", gcount, len(gs)) } }) } func TestIssue1469(t *testing.T) { withTestProcess("issue1469", t, func(p proc.Process, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture, 13) assertNoError(proc.Continue(p), t, "Continue()") gid2thread := make(map[int][]proc.Thread) for _, thread := range p.ThreadList() { g, _ := proc.GetG(thread) if g == nil { continue } gid2thread[g.ID] = append(gid2thread[g.ID], thread) } for gid := range gid2thread { if len(gid2thread[gid]) > 1 { t.Logf("too many threads running goroutine %d", gid) for _, thread := range gid2thread[gid] { t.Logf("\tThread %d", thread.ThreadID()) frames, err := proc.ThreadStacktrace(thread, 20) if err != nil { t.Logf("\t\tcould not get stacktrace %v", err) } for _, frame := range frames { t.Logf("\t\t%#x at %s:%d (systemstack: %v)", frame.Call.PC, frame.Call.File, frame.Call.Line, frame.SystemStack) } } } } }) } func TestDeadlockBreakpoint(t *testing.T) { if buildMode == "pie" { t.Skip("See https://github.com/golang/go/issues/29322") } deadlockBp := proc.FatalThrow if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { deadlockBp = proc.UnrecoveredPanic } withTestProcess("testdeadlock", t, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "Continue()") bp := p.CurrentThread().Breakpoint() if bp.Breakpoint == nil || bp.Name != deadlockBp { t.Fatalf("did not stop at deadlock breakpoint %v", bp) } }) } func TestListImages(t *testing.T) { pluginFixtures := protest.WithPlugins(t, "plugin1/", "plugin2/") withTestProcessArgs("plugintest", t, ".", []string{pluginFixtures[0].Path, pluginFixtures[1].Path}, 0, func(p proc.Process, fixture protest.Fixture) { assertNoError(proc.Continue(p), t, "first continue") plugin1Found := false t.Logf("Libraries before:") for _, image := range p.BinInfo().Images { t.Logf("\t%#v", image) if image.Path == pluginFixtures[0].Path { plugin1Found = true } } if !plugin1Found { t.Fatalf("Could not find plugin1") } assertNoError(proc.Continue(p), t, "second continue") plugin1Found, plugin2Found := false, false t.Logf("Libraries after:") for _, image := range p.BinInfo().Images { t.Logf("\t%#v", image) switch image.Path { case pluginFixtures[0].Path: plugin1Found = true case pluginFixtures[1].Path: plugin2Found = true } } if !plugin1Found { t.Fatalf("Could not find plugin1") } if !plugin2Found { t.Fatalf("Could not find plugin2") } }) }
[ "\"CGO_ENABLED\"", "\"CGO_ENABLED\"", "\"GODEBUG\"", "\"TRAVIS\"" ]
[]
[ "CGO_ENABLED", "GODEBUG", "TRAVIS" ]
[]
["CGO_ENABLED", "GODEBUG", "TRAVIS"]
go
3
0
xtrm/wsgi.py
# flake8: noqa """ WSGI config for xtrm project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "xtrm.settings.production" ) from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
pkg/minikube/cluster/cluster_windows.go
/* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "fmt" "os" "os/exec" "path/filepath" "github.com/golang/glog" "github.com/pkg/errors" "golang.org/x/sys/windows/registry" ) func detectVBoxManageCmd() string { cmd := "VBoxManage" if p := os.Getenv("VBOX_INSTALL_PATH"); p != "" { if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { return path } } if p := os.Getenv("VBOX_MSI_INSTALL_PATH"); p != "" { if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { return path } } // Look in default installation path for VirtualBox version > 5 if path, err := exec.LookPath(filepath.Join("C:\\Program Files\\Oracle\\VirtualBox", cmd)); err == nil { return path } // Look in windows registry if p, err := findVBoxInstallDirInRegistry(); err == nil { if path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil { return path } } if path, err := exec.LookPath(cmd); err == nil { return path } return cmd } func findVBoxInstallDirInRegistry() (string, error) { registryKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Oracle\VirtualBox`, registry.QUERY_VALUE) if err != nil { errorMessage := fmt.Sprintf("Can't find VirtualBox registry entries, is VirtualBox really installed properly? %v", err) glog.Errorf(errorMessage) return "", errors.New(errorMessage) } defer registryKey.Close() installDir, _, err := registryKey.GetStringValue("InstallDir") if err != nil { errorMessage := fmt.Sprintf("Can't find InstallDir registry key within VirtualBox registries entries, is VirtualBox really installed properly? %v", err) glog.Errorf(errorMessage) return "", errors.New(errorMessage) } return installDir, nil }
[ "\"VBOX_INSTALL_PATH\"", "\"VBOX_MSI_INSTALL_PATH\"" ]
[]
[ "VBOX_MSI_INSTALL_PATH", "VBOX_INSTALL_PATH" ]
[]
["VBOX_MSI_INSTALL_PATH", "VBOX_INSTALL_PATH"]
go
2
0
datalabeling/create_annotation_spec_set_test.py
#!/usr/bin/env python # Copyright 2019 Google, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import backoff from google.api_core.exceptions import ServerError import pytest import create_annotation_spec_set import testing_lib PROJECT_ID = os.getenv('GCLOUD_PROJECT') @pytest.fixture(scope='module') def cleaner(): resource_names = [] yield resource_names for resource_name in resource_names: testing_lib.delete_annotation_spec_set(resource_name) def test_create_annotation_spec_set(cleaner, capsys): @backoff.on_exception( backoff.expo, ServerError, max_time=testing_lib.RETRY_DEADLINE) def run_sample(): return create_annotation_spec_set.create_annotation_spec_set(PROJECT_ID) response = run_sample() # For cleanup cleaner.append(response.name) out, _ = capsys.readouterr() assert 'The annotation_spec_set resource name:' in out
[]
[]
[ "GCLOUD_PROJECT" ]
[]
["GCLOUD_PROJECT"]
python
1
0
one_barangay/scripts/google_cloud_storage.py
"""File for Google Cloud Storage.""" import logging import os import urllib.parse from pathlib import Path import aiohttp from aiofile import AIOFile from gcloud.aio.storage import Storage from google.cloud import storage from one_barangay.local_settings import logger async def async_upload_to_bucket( filepath: str, file_obj, gcs_path: str, ): """Upload files to bucket. Args: filepath: str: The path to the file to be uploaded. file_obj: The file object from reading a file gcs_path: str: The target bucket name and sub-folder in GCS to upload to. (e.g. documents/photo) Returns: The path to the uploaded file. """ async with aiohttp.ClientSession() as session: gcs_storage = Storage(session=session) # skipcq gcs_filename = filepath.split("/")[-1] await gcs_storage.upload(gcs_path, gcs_filename, file_obj) return f"https://storage.googleapis.com/{gcs_path}/{urllib.parse.quote(gcs_filename)}" async def upload_to_gcs_runner( filepath: str, gcs_path: str, ): """Call the 'async_upload_to_bucket'. Args: filepath: str: The path to the file to be uploaded. gcs_path: str: The target bucket name and sub-folder in GCS. Returns: The path to the uploaded file. """ # target_bucket_name = target_bucket_name # bucket_folder = bucket_folder try: async with AIOFile(filepath, mode="rb") as afp: f = await afp.read() path = await async_upload_to_bucket(filepath, f, gcs_path) return path except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("File not uploaded. %s", e) def download_from_gcs( filename: str, target_bucket_name: str, bucket_folder: str, ): """Download file from Google Cloud Storage bucket. Args: filename: str: The name of file being downloaded. target_bucket_name: str: The bucket name from which to download to. bucket_folder: str: The folder from the bucket name from which to download to. Returns: None. """ try: storage_client = storage.Client(os.getenv("GOOGLE_PROJECT_ID")) bucket_name = storage_client.get_bucket(target_bucket_name) bucket = storage_client.get_bucket(bucket_name) path = os.path.join(bucket_folder, filename) base_dir = Path(__file__).resolve().parent.parent # TODO: Change to user location destination = os.path.join(base_dir, filename) blob = bucket.blob(path) blob.download_to_filename(destination) logging.info("%s downloaded to %s.", filename, destination) except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("%s not downloaded. %s", filename, e) # if __name__ == "__main__": # Sample Calls to Uploading to GCS # asyncio.run( # upload_to_gcs_runner( # "<your_absolute_filepath>" # ) # ) # Sample Calls to Downloading from GCS # download_from_gcs( # "kath.png", # str(os.getenv("GS_MEDIA_BUCKET_NAME")), # str(os.getenv("FILE_BUCKET_FOLDER")), # )
[]
[]
[ "GOOGLE_PROJECT_ID", "GS_MEDIA_BUCKET_NAME", "FILE_BUCKET_FOLDER" ]
[]
["GOOGLE_PROJECT_ID", "GS_MEDIA_BUCKET_NAME", "FILE_BUCKET_FOLDER"]
python
3
0
src/main.go
package main import ( "fmt" "log" "os" "strings" "time" "github.com/karrick/tparse" tb "gopkg.in/tucnak/telebot.v2" ) func main() { b, err := tb.NewBot(tb.Settings{ Token: os.Getenv("TELEGRAM_BOT_TOKEN"), Poller: &tb.LongPoller{Timeout: 1 * time.Second}, }) if err != nil { log.Fatal(err) return } b.Handle("/remind", handleRemindMe(b)) b.Start() } func handleRemindMe(bot *tb.Bot) func(*tb.Message) { return func(message *tb.Message) { messageContent := strings.TrimSpace(message.Text[len("/remind"):]) remindMsg, err := decodeReminderMessage(messageContent) if err != nil { errorMessage := fmt.Sprintf( "Could not parse time for message \"%v\"", messageContent) log.Printf("Error parsing message %v: %v", messageContent, err.Error()) trySendMessage(bot, message.Sender, errorMessage) return } log.Printf("Scheduled sending %v to %v", remindMsg, message.Sender) duration := time.Until(remindMsg.time) timer1 := time.NewTimer(duration) <-timer1.C trySendMessage(bot, message.Sender, remindMsg.content) } } func trySendMessage(bot *tb.Bot, sender *tb.User, message string) { log.Printf("Sending \"%v\" to \"%v\"", message, sender.ID) msg, err := bot.Send(sender, message) if err != nil { log.Printf("Failed to send message \"%v\" to \"%v\".", message, sender.ID) } log.Printf("Message \"%v\" sent to \"%v\"", msg.Text, sender.ID) } func decodeReminderMessage(message string) (*remindMessage, error) { splitResponse := strings.Fields(message) timeString := splitResponse[0] messageString := strings.Join(splitResponse[1:], " ") actual, err := tparse.ParseNow(time.RFC3339, timeString) if err != nil { return nil, err } remindMsg := remindMessage{time: actual, content: messageString} return &remindMsg, nil } type remindMessage struct { time time.Time content string } func (remindMsg *remindMessage) String() string { return fmt.Sprintf("%v at %v", remindMsg.content, remindMsg.time) }
[ "\"TELEGRAM_BOT_TOKEN\"" ]
[]
[ "TELEGRAM_BOT_TOKEN" ]
[]
["TELEGRAM_BOT_TOKEN"]
go
1
0
api/server.go
package main import ( "context" "os" "github.com/kelseyhightower/envconfig" "github.com/labstack/echo/v4" echoMiddleware "github.com/labstack/echo/v4/middleware" storecache "github.com/shellhub-io/shellhub/api/cache" "github.com/shellhub-io/shellhub/api/pkg/gateway" "github.com/shellhub-io/shellhub/api/routes" apiMiddleware "github.com/shellhub-io/shellhub/api/routes/middleware" "github.com/shellhub-io/shellhub/api/services" "github.com/shellhub-io/shellhub/api/store/mongo" requests "github.com/shellhub-io/shellhub/pkg/api/internalclient" "github.com/shellhub-io/shellhub/pkg/geoip" "github.com/shellhub-io/shellhub/pkg/middleware" "github.com/sirupsen/logrus" "github.com/spf13/cobra" mongodriver "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) var serverCmd = &cobra.Command{ Use: "server", RunE: func(cmd *cobra.Command, args []string) error { return startServer() }, } // Provides the configuration for the API service. // The values are load from the system environment variables. type config struct { // MongoDB connection string (URI format) MongoURI string `envconfig:"mongo_uri" default:"mongodb://mongo:27017"` // Redis connection stirng (URI format) RedisURI string `envconfig:"redis_uri" default:"redis://redis:6379"` // Enable store cache StoreCache bool `envconfig:"store_cache" default:"false"` // Enable geoip feature GeoIP bool `envconfig:"geoip" default:"false"` } func startServer() error { if os.Getenv("SHELLHUB_ENV") == "development" { logrus.SetLevel(logrus.DebugLevel) } logrus.Info("Starting API server") e := echo.New() e.Use(middleware.Log) e.Use(echoMiddleware.RequestID()) // Populates configuration based on environment variables prefixed with 'API_' var cfg config if err := envconfig.Process("api", &cfg); err != nil { logrus.WithError(err).Fatal("Failed to load environment variables") } logrus.Info("Connecting to MongoDB") clientOptions := options.Client().ApplyURI(cfg.MongoURI) client, err := mongodriver.Connect(context.TODO(), clientOptions) if err != nil { logrus.WithError(err).Fatal("Failed to connect to MongoDB") } if err = client.Ping(context.TODO(), nil); err != nil { logrus.WithError(err).Fatal("Failed to ping MongoDB") } logrus.Info("Running database migrations") if err := mongo.ApplyMigrations(client.Database("main")); err != nil { logrus.WithError(err).Fatal("Failed to apply mongo migrations") } var cache storecache.Cache if cfg.StoreCache { logrus.Info("Using redis as store cache backend") cache, err = storecache.NewRedisCache(cfg.RedisURI) if err != nil { logrus.WithError(err).Error("Failed to configure redis store cache") } } else { logrus.Info("Store cache disabled") cache = storecache.NewNullCache() } requestClient := requests.NewClient() // apply dependency injection through project layers store := mongo.NewStore(client.Database("main"), cache) var locator geoip.Locator if cfg.GeoIP { logrus.Info("Using GeoIp for geolocation") locator, err = geoip.NewGeoLite2() if err != nil { logrus.WithError(err).Fatalln("Failed to init GeoIp") } } else { logrus.Info("GeoIp is disabled") locator = geoip.NewNullGeoLite() } service := services.NewService(store, nil, nil, cache, requestClient, locator) handler := routes.NewHandler(service) e.Use(func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { apicontext := gateway.NewContext(service, c) return next(apicontext) } }) // Public routes for external access through API gateway publicAPI := e.Group("/api") // Internal routes only accessible by other services in the local container network internalAPI := e.Group("/internal") internalAPI.GET(routes.AuthRequestURL, gateway.Handler(handler.AuthRequest), gateway.Middleware(routes.AuthMiddleware)) publicAPI.POST(routes.AuthDeviceURL, gateway.Handler(handler.AuthDevice)) publicAPI.POST(routes.AuthDeviceURLV2, gateway.Handler(handler.AuthDevice)) publicAPI.POST(routes.AuthUserURL, gateway.Handler(handler.AuthUser)) publicAPI.POST(routes.AuthUserURLV2, gateway.Handler(handler.AuthUser)) publicAPI.GET(routes.AuthUserURLV2, gateway.Handler(handler.AuthUserInfo)) internalAPI.GET(routes.AuthUserTokenURL, gateway.Handler(handler.AuthGetToken)) publicAPI.POST(routes.AuthPublicKeyURL, gateway.Handler(handler.AuthPublicKey)) publicAPI.GET(routes.AuthUserTokenURL, gateway.Handler(handler.AuthSwapToken)) publicAPI.PATCH(routes.UpdateUserDataURL, gateway.Handler(handler.UpdateUserData)) publicAPI.PATCH(routes.UpdateUserPasswordURL, gateway.Handler(handler.UpdateUserPassword)) publicAPI.PUT(routes.EditSessionRecordStatusURL, gateway.Handler(handler.EditSessionRecordStatus)) publicAPI.GET(routes.GetSessionRecordURL, gateway.Handler(handler.GetSessionRecord)) publicAPI.GET(routes.GetDeviceListURL, apiMiddleware.Authorize(gateway.Handler(handler.GetDeviceList))) publicAPI.GET(routes.GetDeviceURL, apiMiddleware.Authorize(gateway.Handler(handler.GetDevice))) publicAPI.DELETE(routes.DeleteDeviceURL, gateway.Handler(handler.DeleteDevice)) publicAPI.PATCH(routes.RenameDeviceURL, gateway.Handler(handler.RenameDevice)) internalAPI.POST(routes.OfflineDeviceURL, gateway.Handler(handler.OfflineDevice)) internalAPI.POST(routes.HeartbeatDeviceURL, gateway.Handler(handler.HeartbeatDevice)) internalAPI.GET(routes.LookupDeviceURL, gateway.Handler(handler.LookupDevice)) publicAPI.PATCH(routes.UpdateStatusURL, gateway.Handler(handler.UpdatePendingStatus)) publicAPI.POST(routes.CreateTagURL, gateway.Handler(handler.CreateDeviceTag)) publicAPI.DELETE(routes.RemoveTagURL, gateway.Handler(handler.RemoveDeviceTag)) publicAPI.PUT(routes.UpdateTagURL, gateway.Handler(handler.UpdateDeviceTag)) publicAPI.GET(routes.GetTagsURL, gateway.Handler(handler.GetTags)) publicAPI.PUT(routes.RenameTagURL, gateway.Handler(handler.RenameTag)) publicAPI.DELETE(routes.DeleteTagsURL, gateway.Handler(handler.DeleteTag)) publicAPI.GET(routes.GetSessionsURL, apiMiddleware.Authorize(gateway.Handler(handler.GetSessionList))) publicAPI.GET(routes.GetSessionURL, apiMiddleware.Authorize(gateway.Handler(handler.GetSession))) internalAPI.PATCH(routes.SetSessionAuthenticatedURL, gateway.Handler(handler.SetSessionAuthenticated)) internalAPI.POST(routes.CreateSessionURL, gateway.Handler(handler.CreateSession)) internalAPI.POST(routes.FinishSessionURL, gateway.Handler(handler.FinishSession)) internalAPI.POST(routes.KeepAliveSessionURL, gateway.Handler(handler.KeepAliveSession)) internalAPI.POST(routes.RecordSessionURL, gateway.Handler(handler.RecordSession)) publicAPI.GET(routes.PlaySessionURL, gateway.Handler(handler.PlaySession)) publicAPI.DELETE(routes.RecordSessionURL, gateway.Handler(handler.DeleteRecordedSession)) publicAPI.GET(routes.GetStatsURL, apiMiddleware.Authorize(gateway.Handler(handler.GetStats))) publicAPI.GET(routes.GetPublicKeysURL, gateway.Handler(handler.GetPublicKeys)) publicAPI.POST(routes.CreatePublicKeyURL, gateway.Handler(handler.CreatePublicKey)) publicAPI.PUT(routes.UpdatePublicKeyURL, gateway.Handler(handler.UpdatePublicKey)) publicAPI.DELETE(routes.DeletePublicKeyURL, gateway.Handler(handler.DeletePublicKey)) internalAPI.GET(routes.GetPublicKeyURL, gateway.Handler(handler.GetPublicKey)) internalAPI.POST(routes.CreatePrivateKeyURL, gateway.Handler(handler.CreatePrivateKey)) internalAPI.POST(routes.EvaluateKeyURL, gateway.Handler(handler.EvaluateKey)) publicAPI.POST(routes.AddPublicKeyTagURL, gateway.Handler(handler.AddPublicKeyTag)) publicAPI.DELETE(routes.RemovePublicKeyTagURL, gateway.Handler(handler.RemovePublicKeyTag)) publicAPI.PUT(routes.UpdatePublicKeyTagsURL, gateway.Handler(handler.UpdatePublicKeyTags)) publicAPI.GET(routes.ListNamespaceURL, gateway.Handler(handler.GetNamespaceList)) publicAPI.GET(routes.GetNamespaceURL, gateway.Handler(handler.GetNamespace)) publicAPI.POST(routes.CreateNamespaceURL, gateway.Handler(handler.CreateNamespace)) publicAPI.DELETE(routes.DeleteNamespaceURL, gateway.Handler(handler.DeleteNamespace)) publicAPI.PUT(routes.EditNamespaceURL, gateway.Handler(handler.EditNamespace)) publicAPI.POST(routes.AddNamespaceUserURL, gateway.Handler(handler.AddNamespaceUser)) publicAPI.DELETE(routes.RemoveNamespaceUserURL, gateway.Handler(handler.RemoveNamespaceUser)) publicAPI.PATCH(routes.EditNamespaceUserURL, gateway.Handler(handler.EditNamespaceUser)) e.Logger.Fatal(e.Start(":8080")) return nil }
[ "\"SHELLHUB_ENV\"" ]
[]
[ "SHELLHUB_ENV" ]
[]
["SHELLHUB_ENV"]
go
1
0
cmd/entrypoint/env.go
package entrypoint import ( "fmt" "os" "os/exec" "path" "strings" ) func GetAgentService() string { return env("AGENT_SERVICE", "") } func GetAmbassadorId() string { id := os.Getenv("AMBASSADOR_ID") if id != "" { return id } svc := GetAgentService() if svc != "" { return fmt.Sprintf("intercept-%s", svc) } return "default" } func GetAmbassadorNamespace() string { return env("AMBASSADOR_NAMESPACE", "default") } func GetAmbassadorFieldSelector() string { return env("AMBASSADOR_FIELD_SELECTOR", "") } func GetAmbassadorLabelSelector() string { return env("AMBASSADOR_LABEL_SELECTOR", "") } func GetAmbassadorRoot() string { return env("ambassador_root", "/ambassador") } func GetAmbassadorConfigBaseDir() string { return env("AMBASSADOR_CONFIG_BASE_DIR", GetAmbassadorRoot()) } func GetEnvoyDir() string { return env("ENVOY_DIR", path.Join(GetAmbassadorConfigBaseDir(), "envoy")) } func GetEnvoyBootstrapFile() string { return env("ENVOY_BOOTSTRAP_FILE", path.Join(GetAmbassadorConfigBaseDir(), "bootstrap-ads.json")) } func GetEnvoyBaseId() string { return env("AMBASSADOR_ENVOY_BASE_ID", "0") } func GetAppDir() string { return env("APPDIR", GetAmbassadorRoot()) } func GetConfigDir() string { return env("config_dir", path.Join(GetAmbassadorConfigBaseDir(), "ambassador-config")) } func GetSnapshotDir() string { return env("snapshot_dir", path.Join(GetAmbassadorConfigBaseDir(), "snapshots")) } func GetEnvoyConfigFile() string { return env("envoy_config_file", path.Join(GetEnvoyDir(), "envoy.json")) } func GetAmbassadorDebug() string { return env("AMBASSADOR_DEBUG", "") } func isDebug(name string) bool { return strings.Contains(GetAmbassadorDebug(), name) } func GetEnvoyFlags() []string { result := []string{"-c", GetEnvoyBootstrapFile(), "--base-id", GetEnvoyBaseId()} svc := GetAgentService() if svc != "" { result = append(result, "--drain-time-s", "1") } else { result = append(result, "--drain-time-s", env("AMBASSADOR_DRAIN_TIME", "600")) } if isDebug("envoy") { result = append(result, "-l", "debug") } else { result = append(result, "-l", "error") } return result } func GetDiagdBindAddress() string { return env("AMBASSADOR_DIAGD_BIND_ADDREASS", "") } func IsDiagdOnly() bool { return envbool("DIAGD_ONLY") } func GetDiagdBindPort() string { return env("AMBASSADOR_DIAGD_BIND_PORT", "8004") } func IsEnvoyAvailable() bool { _, err := exec.LookPath("envoy") return err == nil } func GetDiagdFlags() []string { result := []string{"--notices", path.Join(GetAmbassadorConfigBaseDir(), "notices.json")} if isDebug("diagd") { result = append(result, "--debug") } diagdBind := GetDiagdBindAddress() if diagdBind != "" { result = append(result, "--host", diagdBind) } // XXX: this was not in entrypoint.sh result = append(result, "--port", GetDiagdBindPort()) if IsDiagdOnly() { result = append(result, "--no-checks", "--no-envoy") } else { result = append(result, "--kick", fmt.Sprintf("kill -HUP %d", os.Getpid())) // XXX: this was not in entrypoint.sh if !IsEnvoyAvailable() { result = append(result, "--no-envoy") } } return result } func GetDiagdArgs() []string { return append([]string{GetSnapshotDir(), GetEnvoyBootstrapFile(), GetEnvoyConfigFile()}, GetDiagdFlags()...) } func IsAmbassadorSingleNamespace() bool { return envbool("AMBASSADOR_SINGLE_NAMESPACE") } func IsEdgeStack() bool { _, err := os.Stat("/ambassador/.edge_stack") if err == nil { return true } else if os.IsNotExist(err) { return false } else { panic(err) } } func GetLicenseSecretName() string { return env("AMBASSADOR_AES_SECRET_NAME", "ambassador-edge-stack") } func GetLicenseSecretNamespace() string { return env("AMBASSADOR_AES_SECRET_NAMESPACE", GetAmbassadorNamespace()) } func GetEventHost() string { return env("DEV_AMBASSADOR_EVENT_HOST", fmt.Sprintf("http://localhost:%s", GetDiagdBindPort())) } func GetEventPath() string { return env("DEV_AMBASSADOR_EVENT_PATH", fmt.Sprintf("_internal/v0")) } func GetSidecarHost() string { return env("DEV_AMBASSADOR_SIDECAR_HOST", "http://localhost:8500") } func GetSidecarPath() string { return env("DEV_AMBASSADOR_SIDECAR_PATH", "_internal/v0") } func GetEventUrl() string { return fmt.Sprintf("%s/%s/watt", GetEventHost(), GetEventPath()) } func GetSidecarUrl() string { return fmt.Sprintf("%s/%s/watt", GetSidecarHost(), GetSidecarPath()) } func IsKnativeEnabled() bool { return strings.ToLower(env("AMBASSADOR_KNATIVE_SUPPORT", "")) == "true" }
[ "\"AMBASSADOR_ID\"" ]
[]
[ "AMBASSADOR_ID" ]
[]
["AMBASSADOR_ID"]
go
1
0
NewsReader/settings.py
# -*- coding: utf-8 -*- import os os_env = os.environ class Config(object): SECRET_KEY = os_env.get('NEWSREADER_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 ASSETS_DEBUG = False DEBUG_TB_ENABLED = False # Disable Debug toolbar DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class ProdConfig(Config): """Production configuration.""" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example' # TODO: Change me DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = 'dev' DEBUG = True DB_NAME = 'dev.db' # Put the db file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True ASSETS_DEBUG = True # Don't bundle/minify static assets CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class TestConfig(Config): TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' BCRYPT_LOG_ROUNDS = 1 # For faster tests WTF_CSRF_ENABLED = False # Allows form testing
[]
[]
[]
[]
[]
python
0
0
pilot/pilot.go
package pilot import ( "bytes" "fmt" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "golang.org/x/net/context" "io" "io/ioutil" "os" "path/filepath" "sort" "strings" "sync" "text/template" "time" "path" "github.com/docker/docker/api/types/mount" ) /** Label: aliyun.log: /var/log/hello.log[:json][;/var/log/abc/def.log[:txt]] */ const LABEL_SERVICE_LOGS = "aliyun.logs." const ENV_SERVICE_LOGS = "aliyun_logs_" const SYMLINK_LOGS_BASE = "/acs/log/" const LABEL_PROJECT = "com.docker.compose.project" const LABEL_PROJECT_SWARM_MODE = "com.docker.stack.namespace" const LABEL_SERVICE = "com.docker.compose.service" const LABEL_SERVICE_SWARM_MODE = "com.docker.swarm.service.name" const LABEL_POD = "io.kubernetes.pod.name" const ERR_ALREADY_STARTED = "already started" type Pilot struct { mutex sync.Mutex tpl *template.Template base string dockerClient *client.Client reloadChan chan bool lastReload time.Time piloter Piloter } type Piloter interface { Name() string Start() error Reload() error Stop() error ConfHome() string ConfPathOf(container string) string OnDestroyEvent(container string) error } var NeedCreateSymlink = false func Run(tpl string, baseDir string) error { if os.Getenv("CREATE_SYMLINK") == "true" { NeedCreateSymlink = true } p, err := New(tpl, baseDir) if err != nil { panic(err) } return p.watch() } func New(tplStr string, baseDir string) (*Pilot, error) { tpl, err := template.New("pilot").Parse(tplStr) if err != nil { return nil, err } if os.Getenv("DOCKER_API_VERSION") == "" { os.Setenv("DOCKER_API_VERSION", "1.23") } client, err := client.NewEnvClient() if err != nil { return nil, err } piloter, _ := NewFluentdPiloter() if os.Getenv("PILOT_TYPE") == PILOT_FILEBEAT { piloter, _ = NewFilebeatPiloter() } return &Pilot{ dockerClient: client, tpl: tpl, base: baseDir, reloadChan: make(chan bool), piloter: piloter, }, nil } func (p *Pilot) watch() error { if err := p.processAllContainers(); err != nil { return err } err := p.piloter.Start() if err != nil && ERR_ALREADY_STARTED != err.Error() { return err } p.lastReload = time.Now() go p.doReload() ctx := context.Background() filter := filters.NewArgs() filter.Add("type", "container") options := types.EventsOptions{ Filters: filter, } msgs, errs := p.client().Events(ctx, options) for { select { case msg := <-msgs: if err := p.processEvent(msg); err != nil { log.Errorf("fail to process event: %v, %v", msg, err) } case err := <-errs: log.Warnf("error: %v", err) if err == io.EOF || err == io.ErrUnexpectedEOF { return nil } else { msgs, errs = p.client().Events(ctx, options) } } } } type LogConfig struct { Name string HostDir string ContainerDir string Format string FormatConfig map[string]string File string Tags map[string]string Target string TimeKey string TimeFormat string HostKey string } func (p *Pilot) cleanConfigs() error { confDir := fmt.Sprintf(p.piloter.ConfHome()) d, err := os.Open(confDir) if err != nil { return err } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return err } for _, name := range names { path := filepath.Join(confDir, name) stat, err := os.Stat(filepath.Join(confDir, name)) if err != nil { return err } if stat.Mode().IsRegular() { if err := os.Remove(path); err != nil { return err } } } return nil } func (p *Pilot) processAllContainers() error { p.mutex.Lock() defer p.mutex.Unlock() opts := types.ContainerListOptions{} containers, err := p.client().ContainerList(context.Background(), opts) if err != nil { return err } //clean config if err := p.cleanConfigs(); err != nil { return err } containerIDs := make(map[string]string, 0) for _, c := range containers { if _, ok := containerIDs[c.ID]; !ok { containerIDs[c.ID] = c.ID } if c.State == "removing" { continue } containerJSON, err := p.client().ContainerInspect(context.Background(), c.ID) if err != nil { return err } if err = p.newContainer(&containerJSON); err != nil { log.Errorf("fail to process container %s: %v", containerJSON.Name, err) } } return p.processAllVolumeSymlink(containerIDs) } func (p *Pilot) processAllVolumeSymlink(existingContainerIDs map[string]string) error { symlinkContainerIDs := p.listAllSymlinkContainer() for containerID := range symlinkContainerIDs { if _, ok := existingContainerIDs[containerID]; !ok { p.removeVolumeSymlink(containerID) } } return nil } func (p *Pilot) listAllSymlinkContainer() map[string]string { containerIDs := make(map[string]string, 0) linkBaseDir := path.Join(p.base, SYMLINK_LOGS_BASE) if _, err := os.Stat(linkBaseDir); err != nil && os.IsNotExist(err) { return containerIDs } projects := listSubDirectory(linkBaseDir) for _, project := range projects { projectPath := path.Join(linkBaseDir, project) services := listSubDirectory(projectPath) for _, service := range services { servicePath := path.Join(projectPath, service) containers := listSubDirectory(servicePath) for _, containerID := range containers { if _, ok := containerIDs[containerID]; !ok { containerIDs[containerID] = containerID } } } } return containerIDs } func listSubDirectory(path string) []string { subdirs := make([]string, 0) if _, err := os.Stat(path); os.IsNotExist(err) { return subdirs } files, err := ioutil.ReadDir(path) if err != nil { log.Warnf("read %s error: %v", path, err) return subdirs } for _, file := range files { if file.IsDir() { subdirs = append(subdirs, file.Name()) } } return subdirs } func putIfNotEmpty(store map[string]string, key, value string) { if key == "" || value == "" { return } store[key] = value } func container(containerJSON *types.ContainerJSON) map[string]string { labels := containerJSON.Config.Labels c := make(map[string]string) putIfNotEmpty(c, "docker_app", labels[LABEL_PROJECT]) putIfNotEmpty(c, "docker_app", labels[LABEL_PROJECT_SWARM_MODE]) putIfNotEmpty(c, "docker_service", labels[LABEL_SERVICE]) putIfNotEmpty(c, "docker_service", labels[LABEL_SERVICE_SWARM_MODE]) putIfNotEmpty(c, "k8s_pod", labels[LABEL_POD]) putIfNotEmpty(c, "docker_container", strings.TrimPrefix(containerJSON.Name, "/")) extension(c, containerJSON) return c } func (p *Pilot) newContainer(containerJSON *types.ContainerJSON) error { id := containerJSON.ID jsonLogPath := containerJSON.LogPath mounts := containerJSON.Mounts labels := containerJSON.Config.Labels env := containerJSON.Config.Env //logConfig.containerDir match types.mountPoint /** 场景: 1. 容器一个路径,中间有多级目录对应宿主机不同的目录 2. containerdir对应的目录不是直接挂载的,挂载的是它上级的目录 查找:从containerdir开始查找最近的一层挂载 */ container := container(containerJSON) for _, e := range env { if !strings.HasPrefix(e, ENV_SERVICE_LOGS) { continue } envLabel := strings.SplitN(e, "=", 2) if len(envLabel) == 2 { labelKey := strings.Replace(envLabel[0], "_", ".", -1) labels[labelKey] = envLabel[1] } } logConfigs, err := p.getLogConfigs(jsonLogPath, mounts, labels) if err != nil { return err } if len(logConfigs) == 0 { log.Debugf("%s has not log config, skip", id) return nil } // create symlink p.createVolumeSymlink(containerJSON) //pilot.findMounts(logConfigs, jsonLogPath, mounts) //生成配置 logConfig, err := p.render(id, container, logConfigs) if err != nil { return err } //TODO validate config before save //log.Debugf("container %s log config: %s", id, logConfig) if err = ioutil.WriteFile(p.piloter.ConfPathOf(id), []byte(logConfig), os.FileMode(0644)); err != nil { return err } p.tryReload() return nil } func (p *Pilot) tryReload() { select { case p.reloadChan <- true: default: log.Info("Another load is pending") } } func (p *Pilot) doReload() { log.Info("Reload gorouting is ready") for { <-p.reloadChan p.reload() } } func (p *Pilot) delContainer(id string) error { p.removeVolumeSymlink(id) // refactor in the future if p.piloter.Name() == PILOT_FLUENTD { clean := func() { log.Infof("Try removing log config %s", id) if err := os.Remove(p.piloter.ConfPathOf(id)); err != nil { log.Warnf("removing %s log config failure", id) return } p.tryReload() } time.AfterFunc(15*time.Minute, clean) return nil } else { return p.piloter.OnDestroyEvent(id) } } func (p *Pilot) client() *client.Client { return p.dockerClient } func (p *Pilot) processEvent(msg events.Message) error { containerId := msg.Actor.ID ctx := context.Background() switch msg.Action { case "start", "restart": log.Debugf("Process container start event: %s", containerId) if p.exists(containerId) { log.Debugf("%s is already exists.", containerId) return nil } containerJSON, err := p.client().ContainerInspect(ctx, containerId) if err != nil { return err } return p.newContainer(&containerJSON) case "destroy": log.Debugf("Process container destory event: %s", containerId) err := p.delContainer(containerId) if err != nil { log.Warnf("Process container destory event error: %s, %s", containerId, err.Error()) } } return nil } func (p *Pilot) hostDirOf(path string, mounts map[string]types.MountPoint) string { confPath := path for { if point, ok := mounts[path]; ok { if confPath == path { return point.Source } else { relPath, err := filepath.Rel(path, confPath) if err != nil { panic(err) } return fmt.Sprintf("%s/%s", point.Source, relPath) } } path = filepath.Dir(path) if path == "/" || path == "." { break } } return "" } func (p *Pilot) parseTags(tags string) (map[string]string, error) { tagMap := make(map[string]string) if tags == "" { return tagMap, nil } kvArray := strings.Split(tags, ",") for _, kv := range kvArray { arr := strings.Split(kv, "=") if len(arr) != 2 { return nil, fmt.Errorf("%s is not a valid k=v format", kv) } key := strings.TrimSpace(arr[0]) value := strings.TrimSpace(arr[1]) if key == "" || value == "" { return nil, fmt.Errorf("%s is not a valid k=v format", kv) } tagMap[key] = value } return tagMap, nil } func (p *Pilot) parseLogConfig(name string, info *LogInfoNode, jsonLogPath string, mounts map[string]types.MountPoint) (*LogConfig, error) { path := info.value if path == "" { return nil, fmt.Errorf("path for %s is empty", name) } tags := info.get("tags") tagMap, err := p.parseTags(tags) if err != nil { return nil, fmt.Errorf("parse tags for %s error: %v", name, err) } target := info.get("target") timeKey := info.get("time_key") if timeKey == "" { timeKey = "@timestamp" } timeFormat := info.get("time_format") if timeFormat == "" { timeFormat = "%Y-%m-%dT%H:%M:%S.%L" } hostKey := info.get("host_key") if hostKey == "" { hostKey = "host" } if path == "stdout" { logFile := filepath.Base(jsonLogPath) if p.piloter.Name() == PILOT_FILEBEAT { logFile = logFile + "*" } return &LogConfig{ Name: name, HostDir: filepath.Join(p.base, filepath.Dir(jsonLogPath)), Format: "json", File: logFile, Tags: tagMap, FormatConfig: map[string]string{"time_format": "%Y-%m-%dT%H:%M:%S.%NZ"}, Target: target, TimeKey: timeKey, TimeFormat: timeFormat, HostKey: hostKey, }, nil } if !filepath.IsAbs(path) { return nil, fmt.Errorf("%s must be absolute path, for %s", path, name) } containerDir := filepath.Dir(path) file := filepath.Base(path) if file == "" { return nil, fmt.Errorf("%s must be a file path, not directory, for %s", path, name) } hostDir := p.hostDirOf(containerDir, mounts) if hostDir == "" { return nil, fmt.Errorf("in log %s: %s is not mount on host", name, path) } format := info.children["format"] if format == nil { format = newLogInfoNode("none") } formatConfig, err := Convert(format) if err != nil { return nil, fmt.Errorf("in log %s: format error: %v", name, err) } //特殊处理regex if format.value == "regexp" { format.value = fmt.Sprintf("/%s/", formatConfig["pattern"]) delete(formatConfig, "pattern") } return &LogConfig{ Name: name, ContainerDir: containerDir, Format: format.value, File: file, Tags: tagMap, HostDir: filepath.Join(p.base, hostDir), FormatConfig: formatConfig, Target: target, TimeKey: timeKey, TimeFormat: timeFormat, HostKey: hostKey, }, nil } type LogInfoNode struct { value string children map[string]*LogInfoNode } func newLogInfoNode(value string) *LogInfoNode { return &LogInfoNode{ value: value, children: make(map[string]*LogInfoNode), } } func (node *LogInfoNode) insert(keys []string, value string) error { if len(keys) == 0 { return nil } key := keys[0] if len(keys) > 1 { if child, ok := node.children[key]; ok { child.insert(keys[1:], value) } else { return fmt.Errorf("%s has no parent node", key) } } else { child := newLogInfoNode(value) node.children[key] = child } return nil } func (node *LogInfoNode) get(key string) string { if child, ok := node.children[key]; ok { return child.value } return "" } func (p *Pilot) getLogConfigs(jsonLogPath string, mounts []types.MountPoint, labels map[string]string) ([]*LogConfig, error) { var ret []*LogConfig mountsMap := make(map[string]types.MountPoint) for _, mount := range mounts { mountsMap[mount.Destination] = mount } var labelNames []string //sort keys for k, _ := range labels { labelNames = append(labelNames, k) } sort.Strings(labelNames) root := newLogInfoNode("") for _, k := range labelNames { if !strings.HasPrefix(k, LABEL_SERVICE_LOGS) || strings.Count(k, ".") == 1 { continue } logLabel := strings.TrimPrefix(k, LABEL_SERVICE_LOGS) if err := root.insert(strings.Split(logLabel, "."), labels[k]); err != nil { return nil, err } } for name, node := range root.children { path := node.value if path != "stdout" && strings.Contains(path, ",") { paths := strings.Split(path, ",") hasTags := false childrenTags := "" if node.get("tags") != "" { hasTags = true childrenTags = node.children["tags"].value } for index, v := range paths { tags := fmt.Sprintf("stream=%s", v) vArray := strings.Split(v, ":") if len(vArray) == 2 { v = strings.TrimSpace(vArray[1]) tags = fmt.Sprintf("%s=%s", strings.TrimSpace(vArray[0]), v) } if hasTags { node.children["tags"].value = fmt.Sprintf("%s,%s", childrenTags, tags) } else { node.insert([]string{"tags"}, tags) } if node.get("target") == "" { node.insert([]string{"target"}, name) } node.value = v logConfig, err := p.parseLogConfig(fmt.Sprintf("%s-%d", name, index), node, jsonLogPath, mountsMap) if err != nil { return nil, err } ret = append(ret, logConfig) } } else { logConfig, err := p.parseLogConfig(name, node, jsonLogPath, mountsMap) if err != nil { return nil, err } ret = append(ret, logConfig) } } return ret, nil } func (p *Pilot) exists(containId string) bool { if _, err := os.Stat(p.piloter.ConfPathOf(containId)); os.IsNotExist(err) { return false } return true } func (p *Pilot) render(containerId string, container map[string]string, configList []*LogConfig) (string, error) { for _, config := range configList { log.Infof("logs: %s = %v", containerId, config) } output := os.Getenv("FLUENTD_OUTPUT") if p.piloter.Name() == PILOT_FILEBEAT { output = os.Getenv("FILEBEAT_OUTPUT") } var buf bytes.Buffer context := map[string]interface{}{ "containerId": containerId, "configList": configList, "container": container, "output": output, } if err := p.tpl.Execute(&buf, context); err != nil { return "", err } return buf.String(), nil } func (p *Pilot) reload() error { p.mutex.Lock() defer p.mutex.Unlock() log.Infof("Reload %s", p.piloter.Name()) interval := time.Now().Sub(p.lastReload) time.Sleep(30*time.Second - interval) log.Info("Start reloading") err := p.piloter.Reload() p.lastReload = time.Now() return err } func (p *Pilot) createVolumeSymlink(containerJSON *types.ContainerJSON) error { if !NeedCreateSymlink { return nil } linkBaseDir := path.Join(p.base, SYMLINK_LOGS_BASE) if _, err := os.Stat(linkBaseDir); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(linkBaseDir, 0777); err != nil { log.Errorf("create %s error: %v", linkBaseDir, err) } } applicationInfo := container(containerJSON) containerLinkBaseDir := path.Join(linkBaseDir, applicationInfo["docker_app"], applicationInfo["docker_service"], containerJSON.ID) symlinks := make(map[string]string, 0) for _, mountPoint := range containerJSON.Mounts { if mountPoint.Type != mount.TypeVolume { continue } volume, err := p.client().VolumeInspect(context.Background(), mountPoint.Name) if err != nil { log.Errorf("inspect volume %s error: %v", mountPoint.Name, err) continue } symlink := path.Join(containerLinkBaseDir, volume.Name) if _, ok := symlinks[volume.Mountpoint]; !ok { symlinks[volume.Mountpoint] = symlink } } if len(symlinks) == 0 { return nil } if _, err := os.Stat(containerLinkBaseDir); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(containerLinkBaseDir, 0777); err != nil { log.Errorf("create %s error: %v", containerLinkBaseDir, err) return err } } for mountPoint, symlink := range symlinks { err := os.Symlink(path.Join(p.base, mountPoint), symlink) if err != nil && !os.IsExist(err) { log.Errorf("create symlink %s error: %v", symlink, err) } } return nil } func (p *Pilot) removeVolumeSymlink(containerId string) error { if !NeedCreateSymlink { return nil } linkBaseDir := path.Join(p.base, SYMLINK_LOGS_BASE) containerLinkDirs, _ := filepath.Glob(path.Join(linkBaseDir, "*", "*", containerId)) if containerLinkDirs == nil { return nil } for _, containerLinkDir := range containerLinkDirs { if err := os.RemoveAll(containerLinkDir); err != nil { log.Warnf("remove error: %v", err) } } return nil }
[ "\"CREATE_SYMLINK\"", "\"DOCKER_API_VERSION\"", "\"PILOT_TYPE\"", "\"FLUENTD_OUTPUT\"", "\"FILEBEAT_OUTPUT\"" ]
[]
[ "CREATE_SYMLINK", "DOCKER_API_VERSION", "FILEBEAT_OUTPUT", "FLUENTD_OUTPUT", "PILOT_TYPE" ]
[]
["CREATE_SYMLINK", "DOCKER_API_VERSION", "FILEBEAT_OUTPUT", "FLUENTD_OUTPUT", "PILOT_TYPE"]
go
5
0