filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
cmd/explore/main.go
|
package main
import (
"fmt"
"os"
"github.com/willmadison/ossexplorer"
"github.com/willmadison/ossexplorer/cli"
"github.com/willmadison/ossexplorer/github"
"github.com/willmadison/ossexplorer/mocks"
)
func main() {
env := cli.Environment{
Stderr: os.Stderr,
Stdout: os.Stdout,
Stdin: os.Stdin,
}
accessToken := os.Getenv("GITHUB_ACCESS_TOKEN")
var explorer ossexplorer.Explorer
if accessToken == "" {
fmt.Fprintf(env.Stdout, "warning: no GITHUB_ACCESS_TOKEN found defaulting to an in memory OSS explorer.\n")
org := ossexplorer.Organization{Name: "InMemoryFakeOrg"}
explorer = mocks.NewStubExplorer(org, nil)
} else {
explorer = github.NewExplorer(accessToken)
}
os.Exit(cli.Run(explorer, env))
}
|
[
"\"GITHUB_ACCESS_TOKEN\""
] |
[] |
[
"GITHUB_ACCESS_TOKEN"
] |
[]
|
["GITHUB_ACCESS_TOKEN"]
|
go
| 1 | 0 | |
pkg/client/platform_configuration_security/platform_configuration_security_client.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated by go-swagger; DO NOT EDIT.
package platform_configuration_security
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new platform configuration security API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client {
return &Client{transport: transport, formats: formats}
}
/*
Client for platform configuration security API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
CreateActiveDirectoryConfiguration creates active directory configuration
Creates a new Active Directory configuration.
*/
func (a *Client) CreateActiveDirectoryConfiguration(params *CreateActiveDirectoryConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*CreateActiveDirectoryConfigurationCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateActiveDirectoryConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create-active-directory-configuration",
Method: "POST",
PathPattern: "/platform/configuration/security/realms/active-directory",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &CreateActiveDirectoryConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateActiveDirectoryConfigurationCreated), nil
}
/*
CreateEnrollmentToken creates enrollment token
Creates an enrollment token.
*/
func (a *Client) CreateEnrollmentToken(params *CreateEnrollmentTokenParams, authInfo runtime.ClientAuthInfoWriter) (*CreateEnrollmentTokenOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateEnrollmentTokenParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create-enrollment-token",
Method: "POST",
PathPattern: "/platform/configuration/security/enrollment-tokens",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &CreateEnrollmentTokenReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateEnrollmentTokenOK), nil
}
/*
CreateLdapConfiguration creates l d a p configuration
Creates a new LDAP configuration.
*/
func (a *Client) CreateLdapConfiguration(params *CreateLdapConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*CreateLdapConfigurationCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateLdapConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create-ldap-configuration",
Method: "POST",
PathPattern: "/platform/configuration/security/realms/ldap",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &CreateLdapConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateLdapConfigurationCreated), nil
}
/*
CreateSamlConfiguration creates s a m l configuration
Creates a new SAML configuration.
*/
func (a *Client) CreateSamlConfiguration(params *CreateSamlConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*CreateSamlConfigurationCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateSamlConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create-saml-configuration",
Method: "POST",
PathPattern: "/platform/configuration/security/realms/saml",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &CreateSamlConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateSamlConfigurationCreated), nil
}
/*
CreateSecurityDeployment creates security deployment
Creates a new security deployment configuration.
*/
func (a *Client) CreateSecurityDeployment(params *CreateSecurityDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*CreateSecurityDeploymentCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateSecurityDeploymentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create-security-deployment",
Method: "POST",
PathPattern: "/platform/configuration/security/deployment",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &CreateSecurityDeploymentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateSecurityDeploymentCreated), nil
}
/*
DeleteActiveDirectoryConfiguration deletes active directory configuration
Deletes a single Active Directory configuration.
*/
func (a *Client) DeleteActiveDirectoryConfiguration(params *DeleteActiveDirectoryConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteActiveDirectoryConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteActiveDirectoryConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "delete-active-directory-configuration",
Method: "DELETE",
PathPattern: "/platform/configuration/security/realms/active-directory/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &DeleteActiveDirectoryConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteActiveDirectoryConfigurationOK), nil
}
/*
DeleteEnrollmentToken deletes enrollment token
Revokes and deletes the enrollment token.
*/
func (a *Client) DeleteEnrollmentToken(params *DeleteEnrollmentTokenParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteEnrollmentTokenOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteEnrollmentTokenParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "delete-enrollment-token",
Method: "DELETE",
PathPattern: "/platform/configuration/security/enrollment-tokens/{token}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &DeleteEnrollmentTokenReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteEnrollmentTokenOK), nil
}
/*
DeleteLdapConfiguration deletes l d a p configuration
Deletes a single LDAP configuration.
*/
func (a *Client) DeleteLdapConfiguration(params *DeleteLdapConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteLdapConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteLdapConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "delete-ldap-configuration",
Method: "DELETE",
PathPattern: "/platform/configuration/security/realms/ldap/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &DeleteLdapConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteLdapConfigurationOK), nil
}
/*
DeleteSamlConfiguration deletes s a m l configuration
Deletes a single SAML configuration.
*/
func (a *Client) DeleteSamlConfiguration(params *DeleteSamlConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteSamlConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteSamlConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "delete-saml-configuration",
Method: "DELETE",
PathPattern: "/platform/configuration/security/realms/saml/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &DeleteSamlConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteSamlConfigurationOK), nil
}
/*
DisableSecurityDeployment disables the security deployment
Disables the existing security deployment configuration.
*/
func (a *Client) DisableSecurityDeployment(params *DisableSecurityDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*DisableSecurityDeploymentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDisableSecurityDeploymentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "disable-security-deployment",
Method: "POST",
PathPattern: "/platform/configuration/security/deployment/_disable",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &DisableSecurityDeploymentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DisableSecurityDeploymentOK), nil
}
/*
EnableSecurityDeployment enables the security deployment
Enables the existing security deployment configuration.
*/
func (a *Client) EnableSecurityDeployment(params *EnableSecurityDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*EnableSecurityDeploymentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewEnableSecurityDeploymentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "enable-security-deployment",
Method: "POST",
PathPattern: "/platform/configuration/security/deployment/_enable",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &EnableSecurityDeploymentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*EnableSecurityDeploymentOK), nil
}
/*
GetActiveDirectoryConfiguration gets active directory configuration
Retrieves a single Active Directory security realm configuration.
*/
func (a *Client) GetActiveDirectoryConfiguration(params *GetActiveDirectoryConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*GetActiveDirectoryConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetActiveDirectoryConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-active-directory-configuration",
Method: "GET",
PathPattern: "/platform/configuration/security/realms/active-directory/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetActiveDirectoryConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetActiveDirectoryConfigurationOK), nil
}
/*
GetEnrollmentTokens gets enrollment tokens
Retrieves a list of active enrollment tokens.
*/
func (a *Client) GetEnrollmentTokens(params *GetEnrollmentTokensParams, authInfo runtime.ClientAuthInfoWriter) (*GetEnrollmentTokensOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEnrollmentTokensParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-enrollment-tokens",
Method: "GET",
PathPattern: "/platform/configuration/security/enrollment-tokens",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetEnrollmentTokensReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetEnrollmentTokensOK), nil
}
/*
GetLdapConfiguration gets l d a p configuration
Retrieves a single LDAP security realm configuration.
*/
func (a *Client) GetLdapConfiguration(params *GetLdapConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*GetLdapConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetLdapConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-ldap-configuration",
Method: "GET",
PathPattern: "/platform/configuration/security/realms/ldap/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetLdapConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetLdapConfigurationOK), nil
}
/*
GetSamlConfiguration gets s a m l configuration
Retrieves a single SAML security realm configuration.
*/
func (a *Client) GetSamlConfiguration(params *GetSamlConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*GetSamlConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetSamlConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-saml-configuration",
Method: "GET",
PathPattern: "/platform/configuration/security/realms/saml/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetSamlConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetSamlConfigurationOK), nil
}
/*
GetSecurityDeployment gets current security deployment
Retrieves the current security deployment.
*/
func (a *Client) GetSecurityDeployment(params *GetSecurityDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*GetSecurityDeploymentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetSecurityDeploymentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-security-deployment",
Method: "GET",
PathPattern: "/platform/configuration/security/deployment",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetSecurityDeploymentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetSecurityDeploymentOK), nil
}
/*
GetSecurityRealmConfigurations lists security realm configurations
Retrieves a list of security realm configurations.
*/
func (a *Client) GetSecurityRealmConfigurations(params *GetSecurityRealmConfigurationsParams, authInfo runtime.ClientAuthInfoWriter) (*GetSecurityRealmConfigurationsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetSecurityRealmConfigurationsParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-security-realm-configurations",
Method: "GET",
PathPattern: "/platform/configuration/security/realms",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetSecurityRealmConfigurationsReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetSecurityRealmConfigurationsOK), nil
}
/*
GetTLSCertificate gets TLS certificate
Retrieves a certificate in the TLS certificate chain.
*/
func (a *Client) GetTLSCertificate(params *GetTLSCertificateParams, authInfo runtime.ClientAuthInfoWriter) (*GetTLSCertificateOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetTLSCertificateParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "get-tls-certificate",
Method: "GET",
PathPattern: "/platform/configuration/security/tls/{service_name}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &GetTLSCertificateReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetTLSCertificateOK), nil
}
/*
ReorderSecurityRealms reorders security realms
Reorder security realms.
*/
func (a *Client) ReorderSecurityRealms(params *ReorderSecurityRealmsParams, authInfo runtime.ClientAuthInfoWriter) (*ReorderSecurityRealmsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewReorderSecurityRealmsParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "reorder-security-realms",
Method: "POST",
PathPattern: "/platform/configuration/security/realms/_reorder",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &ReorderSecurityRealmsReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ReorderSecurityRealmsOK), nil
}
/*
SetTLSCertificate sets TLS certificate
Creates or updates the TLS certificate chain.
*/
func (a *Client) SetTLSCertificate(params *SetTLSCertificateParams, authInfo runtime.ClientAuthInfoWriter) (*SetTLSCertificateAccepted, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewSetTLSCertificateParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "set-tls-certificate",
Method: "POST",
PathPattern: "/platform/configuration/security/tls/{service_name}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &SetTLSCertificateReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*SetTLSCertificateAccepted), nil
}
/*
UpdateActiveDirectoryConfiguration updates active directory configuration
Updates an existing Active Directory configuration.
*/
func (a *Client) UpdateActiveDirectoryConfiguration(params *UpdateActiveDirectoryConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateActiveDirectoryConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewUpdateActiveDirectoryConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "update-active-directory-configuration",
Method: "PUT",
PathPattern: "/platform/configuration/security/realms/active-directory/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &UpdateActiveDirectoryConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*UpdateActiveDirectoryConfigurationOK), nil
}
/*
UpdateLdapConfiguration updates l d a p configuration
Updates an existing LDAP configuration.
*/
func (a *Client) UpdateLdapConfiguration(params *UpdateLdapConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateLdapConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewUpdateLdapConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "update-ldap-configuration",
Method: "PUT",
PathPattern: "/platform/configuration/security/realms/ldap/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &UpdateLdapConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*UpdateLdapConfigurationOK), nil
}
/*
UpdateSamlConfiguration updates s a m l configuration
Updates an existing SAML configuration.
*/
func (a *Client) UpdateSamlConfiguration(params *UpdateSamlConfigurationParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateSamlConfigurationOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewUpdateSamlConfigurationParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "update-saml-configuration",
Method: "PUT",
PathPattern: "/platform/configuration/security/realms/saml/{realm_id}",
ProducesMediaTypes: []string{""},
ConsumesMediaTypes: []string{""},
Schemes: []string{"https"},
Params: params,
Reader: &UpdateSamlConfigurationReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*UpdateSamlConfigurationOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/galileo/dht/StorageNode.java
|
/*
Copyright (c) 2013, Colorado State University
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall the copyright holder or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused and on
any theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use of this
software, even if advised of the possibility of such damage.
*/
package galileo.dht;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.json.JSONArray;
import org.json.JSONObject;
import galileo.bmp.Bitmap;
import galileo.bmp.GeoavailabilityGrid;
import galileo.bmp.GeoavailabilityQuery;
import galileo.bmp.QueryTransform;
import galileo.comm.BlockRequest;
import galileo.comm.BlockResponse;
import galileo.comm.FilesystemAction;
import galileo.comm.FilesystemEvent;
import galileo.comm.FilesystemRequest;
import galileo.comm.GalileoEventMap;
import galileo.comm.MetadataEvent;
import galileo.comm.MetadataRequest;
import galileo.comm.MetadataResponse;
import galileo.comm.QueryEvent;
import galileo.comm.QueryRequest;
import galileo.comm.QueryResponse;
import galileo.comm.StorageEvent;
import galileo.comm.StorageRequest;
import galileo.comm.TemporalType;
import galileo.config.SystemConfig;
import galileo.dataset.Block;
import galileo.dataset.Metadata;
import galileo.dataset.SpatialProperties;
import galileo.dataset.SpatialRange;
import galileo.dataset.TemporalProperties;
import galileo.dht.hash.HashException;
import galileo.dht.hash.HashTopologyException;
import galileo.dht.hash.TemporalHash;
import galileo.event.Event;
import galileo.event.EventContext;
import galileo.event.EventHandler;
import galileo.event.EventReactor;
import galileo.fs.FileSystemException;
import galileo.fs.GeospatialFileSystem;
import galileo.net.ClientConnectionPool;
import galileo.net.MessageListener;
import galileo.net.NetworkDestination;
import galileo.net.PortTester;
import galileo.net.RequestListener;
import galileo.net.ServerMessageRouter;
import galileo.serialization.SerializationException;
import galileo.util.GeoHash;
import galileo.util.Version;
/**
* Primary communication component in the Galileo DHT. StorageNodes service
* client requests and communication from other StorageNodes to disseminate
* state information throughout the DHT.
*
* @author malensek
*/
public class StorageNode implements RequestListener {
private static final Logger logger = Logger.getLogger("galileo");
private StatusLine nodeStatus;
private String hostname; // The name of this host
private String canonicalHostname; // The fqdn of this host
private int port;
private String rootDir;
private String resultsDir;
private int numCores;
private File pidFile;
private File fsFile;
private NetworkInfo network;
private ServerMessageRouter messageRouter;
private ClientConnectionPool connectionPool;
private Map<String, GeospatialFileSystem> fsMap;
private GalileoEventMap eventMap = new GalileoEventMap();
private EventReactor eventReactor = new EventReactor(this, eventMap);
private List<ClientRequestHandler> requestHandlers;
private ConcurrentHashMap<String, QueryTracker> queryTrackers = new ConcurrentHashMap<>();
// private String sessionId;
public StorageNode() throws IOException {
try {
this.hostname = InetAddress.getLocalHost().getHostName();
this.canonicalHostname = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
this.hostname = System.getenv("HOSTNAME");
if (hostname == null || hostname.length() == 0)
throw new UnknownHostException(
"Failed to identify host name of the storage node. Details follow: " + e.getMessage());
}
this.hostname = this.hostname.toLowerCase();
this.canonicalHostname = this.canonicalHostname.toLowerCase();
this.port = NetworkConfig.DEFAULT_PORT;
SystemConfig.reload();
this.rootDir = SystemConfig.getRootDir();
this.resultsDir = this.rootDir + "/.results";
this.nodeStatus = new StatusLine(SystemConfig.getRootDir() + "/status.txt");
this.fsFile = new File(SystemConfig.getRootDir() + "/storage-node.fs");
if (!this.fsFile.exists())
this.fsFile.createNewFile();
String pid = System.getProperty("pidFile");
if (pid != null) {
this.pidFile = new File(pid);
}
this.numCores = Runtime.getRuntime().availableProcessors();
this.requestHandlers = new CopyOnWriteArrayList<ClientRequestHandler>();
}
/**
* Begins Server execution. This method attempts to fail fast to provide
* immediate feedback to wrapper scripts or other user interface tools. Only
* once all the prerequisite components are initialized and in a sane state
* will the StorageNode begin accepting connections.
*/
public void start() throws Exception {
Version.printSplash();
/* First, make sure the port we're binding to is available. */
nodeStatus.set("Attempting to bind to port");
if (PortTester.portAvailable(port) == false) {
nodeStatus.set("Could not bind to port " + port + ".");
throw new IOException("Could not bind to port " + port);
}
/*
* Read the network configuration; if this is invalid, there is no need
* to execute the rest of this method.
*/
nodeStatus.set("Reading network configuration");
network = NetworkConfig.readNetworkDescription(SystemConfig.getNetworkConfDir());
// identifying the group of this storage node
boolean nodeFound = false;
for (NodeInfo node : network.getAllNodes()) {
String nodeName = node.getHostname();
if (nodeName.equals(this.hostname) || nodeName.equals(this.canonicalHostname)) {
nodeFound = true;
break;
}
}
if (!nodeFound)
throw new Exception("Failed to identify the group of the storage node. "
+ "Type 'hostname' in the terminal and make sure that it matches the "
+ "hostnames specified in the network configuration files.");
nodeStatus.set("Restoring filesystems");
File resultsDir = new File(this.resultsDir);
if (!resultsDir.exists())
resultsDir.mkdirs();
this.fsMap = new HashMap<>();
try (BufferedReader br = new BufferedReader(new FileReader(fsFile))) {
String jsonSource = br.readLine();
if (jsonSource != null && jsonSource.length() > 0) {
JSONObject fsJSON = new JSONObject(jsonSource);
for (String fsName : JSONObject.getNames(fsJSON)) {
try {
GeospatialFileSystem gfs = GeospatialFileSystem.restoreState(this, network,
fsJSON.getJSONObject(fsName));
this.fsMap.put(fsName, gfs);
logger.info("Successfully restored the filesystem - " + fsName);
} catch (Exception e) {
logger.log(Level.SEVERE, "could not restore filesystem - " + fsName, e);
}
}
}
} catch (IOException ioe) {
logger.log(Level.SEVERE, "Failed to restore filesystems", ioe);
}
/* Set up our Shutdown hook */
Runtime.getRuntime().addShutdownHook(new ShutdownHandler());
/* Pre-scheduler setup tasks */
connectionPool = new ClientConnectionPool();
connectionPool.addListener(eventReactor);
/* Start listening for incoming messages. */
messageRouter = new ServerMessageRouter();
messageRouter.addListener(eventReactor);
messageRouter.listen(port);
nodeStatus.set("Online");
/* Start processing the message loop */
while (true) {
try {
eventReactor.processNextEvent();
} catch (Exception e) {
logger.log(Level.SEVERE, "An exception occurred while processing next event. "
+ "Storage node is still up and running. Exception details follow:", e);
}
}
}
private void sendEvent(NodeInfo node, Event event) throws IOException {
connectionPool.sendMessage(node, eventReactor.wrapEvent(event));
}
@EventHandler
public void handleFileSystemRequest(FilesystemRequest request, EventContext context)
throws HashException, IOException, PartitionException {
String name = request.getName();
FilesystemAction action = request.getAction();
List<NodeInfo> nodes = network.getAllNodes();
FilesystemEvent event = new FilesystemEvent(name, action, request.getFeatureList(), request.getSpatialHint());
event.setPrecision(request.getPrecision());
event.setNodesPerGroup(request.getNodesPerGroup());
event.setTemporalType(request.getTemporalType());
for (NodeInfo node : nodes) {
logger.info("Requesting " + node + " to perform a file system action");
sendEvent(node, event);
}
}
@EventHandler
public void handleFileSystem(FilesystemEvent event, EventContext context) {
logger.log(Level.INFO,
"Performing action " + event.getAction().getAction() + " for file system " + event.getName());
if (event.getAction() == FilesystemAction.CREATE) {
GeospatialFileSystem fs = fsMap.get(event.getName());
if (fs == null) {
try {
fs = new GeospatialFileSystem(this, this.rootDir, event.getName(), event.getPrecision(),
event.getNodesPerGroup(), event.getTemporalValue(), this.network, event.getFeatures(),
event.getSpatialHint(), false);
fsMap.put(event.getName(), fs);
} catch (FileSystemException | SerializationException | IOException | PartitionException | HashException
| HashTopologyException e) {
logger.log(Level.SEVERE, "Could not initialize the Galileo File System!", e);
}
}
} else if (event.getAction() == FilesystemAction.DELETE) {
GeospatialFileSystem fs = fsMap.get(event.getName());
if (fs != null) {
fs.shutdown();
fsMap.remove(event.getName());
java.nio.file.Path directory = Paths.get(rootDir + File.separator + event.getName());
try {
Files.walkFileTree(directory, new SimpleFileVisitor<java.nio.file.Path>() {
@Override
public FileVisitResult visitFile(java.nio.file.Path file, BasicFileAttributes attrs)
throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(java.nio.file.Path dir, IOException exc)
throws IOException {
Files.delete(dir);
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to delete the requested file System!", e);
}
}
}
persistFilesystems();
}
/**
* Handles a storage request from a client. This involves determining where
* the data belongs via a {@link Partitioner} implementation and then
* forwarding the data on to its destination.
*/
@EventHandler
public void handleStorageRequest(StorageRequest request, EventContext context)
throws HashException, IOException, PartitionException {
/* Determine where this block goes. */
Block file = request.getBlock();
String fsName = file.getFilesystem();
if (fsName != null) {
GeospatialFileSystem gfs = this.fsMap.get(fsName);
if (gfs != null) {
Metadata metadata = file.getMetadata();
Partitioner<Metadata> partitioner = gfs.getPartitioner();
NodeInfo node = partitioner.locateData(metadata);
logger.log(Level.INFO, "Storage destination: {0}", node);
StorageEvent store = new StorageEvent(file);
sendEvent(node, store);
} else {
logger.log(Level.WARNING, "No filesystem found for the specified name " + fsName + ". Request ignored");
}
} else {
logger.log(Level.WARNING, "No filesystem name specified to store the block. Request ignored");
}
}
@EventHandler
public void handleStorage(StorageEvent store, EventContext context) {
String fsName = store.getBlock().getFilesystem();
GeospatialFileSystem fs = fsMap.get(fsName);
if (fs != null) {
logger.log(Level.INFO, "Storing block " + store.getBlock() + " to filesystem " + fsName);
try {
fs.storeBlock(store.getBlock());
} catch (FileSystemException | IOException e) {
logger.log(Level.SEVERE, "Something went wrong while storing the block.", e);
}
} else {
logger.log(Level.SEVERE, "Requested file system(" + fsName + ") not found. Ignoring the block.");
}
}
private class ParallelReader implements Runnable {
private Block block;
private GeospatialFileSystem gfs;
private String blockPath;
public ParallelReader(GeospatialFileSystem gfs, String blockPath){
this.gfs = gfs;
this.blockPath = blockPath;
}
public Block getBlock(){
return this.block;
}
@Override
public void run(){
try {
this.block = gfs.retrieveBlock(blockPath);
if(blockPath.startsWith(resultsDir))
new File(blockPath).delete();
} catch (IOException | SerializationException e) {
logger.log(Level.SEVERE, "Failed to retrieve the block", e);
}
}
}
@EventHandler
public void handleBlockRequest(BlockRequest blockRequest, EventContext context) {
String fsName = blockRequest.getFilesystem();
GeospatialFileSystem fs = fsMap.get(fsName);
List<Block> blocks = new ArrayList<Block>();
if (fs != null) {
logger.log(Level.FINE, "Retrieving blocks " + blockRequest.getFilePaths() + " from filesystem " + fsName);
try {
List<String> blockPaths = blockRequest.getFilePaths();
if(blockPaths.size() > 1){
ExecutorService executor = Executors.newFixedThreadPool(Math.min(blockPaths.size(), 2 * numCores));
List<ParallelReader> readers = new ArrayList<>();
for(String blockPath : blockPaths){
ParallelReader pr = new ParallelReader(fs, blockPath);
readers.add(pr);
executor.execute(pr);
}
executor.shutdown();
executor.awaitTermination(10, TimeUnit.MINUTES);
for(ParallelReader reader : readers)
if(reader.getBlock() != null)
blocks.add(reader.getBlock());
} else {
ParallelReader pr = new ParallelReader(fs, blockPaths.get(0));
pr.run();
blocks.add(pr.getBlock());
}
context.sendReply(new BlockResponse(blocks.toArray(new Block[blocks.size()])));
} catch (IOException | InterruptedException e) {
logger.log(Level.SEVERE, "Something went wrong while retrieving the block.", e);
try {
context.sendReply(new BlockResponse(new Block[]{}));
} catch (IOException e1) {
logger.log(Level.SEVERE, "Failed to send response to the original client", e1);
}
}
} else {
logger.log(Level.SEVERE, "Requested file system(" + fsName + ") not found. Returning empty content.");
try {
context.sendReply(new BlockResponse(new Block[]{}));
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to send response to the original client", e);
}
}
}
/**
* Handles a meta request that seeks information regarding the galileo
* system.
*/
@EventHandler
public void handleMetadataRequest(MetadataRequest request, EventContext context) {
try {
logger.info("Meta Request: " + request.getRequest().getString("kind"));
if ("galileo#filesystem".equalsIgnoreCase(request.getRequest().getString("kind"))) {
JSONObject response = new JSONObject();
response.put("kind", "galileo#filesystem");
response.put("result", new JSONArray());
ClientRequestHandler reqHandler = new ClientRequestHandler(network.getAllDestinations(), context, this);
reqHandler.handleRequest(new MetadataEvent(request.getRequest()), new MetadataResponse(response));
this.requestHandlers.add(reqHandler);
} else if ("galileo#features".equalsIgnoreCase(request.getRequest().getString("kind"))) {
JSONObject response = new JSONObject();
response.put("kind", "galileo#features");
response.put("result", new JSONArray());
ClientRequestHandler reqHandler = new ClientRequestHandler(network.getAllDestinations(), context, this);
reqHandler.handleRequest(new MetadataEvent(request.getRequest()), new MetadataResponse(response));
this.requestHandlers.add(reqHandler);
} else if ("galileo#overview".equalsIgnoreCase(request.getRequest().getString("kind"))) {
JSONObject response = new JSONObject();
response.put("kind", "galileo#overview");
response.put("result", new JSONArray());
ClientRequestHandler reqHandler = new ClientRequestHandler(network.getAllDestinations(), context, this);
reqHandler.handleRequest(new MetadataEvent(request.getRequest()), new MetadataResponse(response));
this.requestHandlers.add(reqHandler);
} else {
JSONObject response = new JSONObject();
response.put("kind", request.getRequest().getString("kind"));
response.put("error", "invalid request");
context.sendReply(new MetadataResponse(response));
}
} catch (Exception e) {
JSONObject response = new JSONObject();
String kind = "unknown";
if (request.getRequest().has("kind"))
kind = request.getRequest().getString("kind");
response.put("kind", kind);
response.put("error", e.getMessage());
try {
context.sendReply(new MetadataResponse(response));
} catch (IOException e1) {
logger.log(Level.SEVERE, "Failed to send response to the original client", e);
}
}
}
@EventHandler
public void handleMetadata(MetadataEvent event, EventContext context) throws IOException {
if ("galileo#filesystem".equalsIgnoreCase(event.getRequest().getString("kind"))) {
JSONObject response = new JSONObject();
response.put("kind", "galileo#filesystem");
JSONArray result = new JSONArray();
for (String fsName : fsMap.keySet()) {
GeospatialFileSystem fs = fsMap.get(fsName);
result.put(fs.obtainState());
}
response.put("result", result);
context.sendReply(new MetadataResponse(response));
} else if ("galileo#overview".equalsIgnoreCase(event.getRequest().getString("kind"))) {
JSONObject request = event.getRequest();
JSONObject response = new JSONObject();
response.put("kind", "galileo#overview");
JSONArray result = new JSONArray();
if (request.has("filesystem") && request.get("filesystem") instanceof JSONArray) {
JSONArray fsNames = request.getJSONArray("filesystem");
for (int i = 0; i < fsNames.length(); i++) {
GeospatialFileSystem fs = fsMap.get(fsNames.getString(i));
if (fs != null) {
JSONArray overview = fs.getOverview();
JSONObject fsOverview = new JSONObject();
fsOverview.put(fsNames.getString(i), overview);
result.put(fsOverview);
} else {
JSONObject fsOverview = new JSONObject();
fsOverview.put(fsNames.getString(i), new JSONArray());
result.put(fsOverview);
}
}
}
response.put("result", result);
logger.info(response.toString());
context.sendReply(new MetadataResponse(response));
} else if ("galileo#features".equalsIgnoreCase(event.getRequest().getString("kind"))) {
JSONObject request = event.getRequest();
JSONObject response = new JSONObject();
response.put("kind", "galileo#features");
JSONArray result = new JSONArray();
if (request.has("filesystem") && request.get("filesystem") instanceof JSONArray) {
JSONArray fsNames = request.getJSONArray("filesystem");
for (int i = 0; i < fsNames.length(); i++) {
GeospatialFileSystem fs = fsMap.get(fsNames.getString(i));
if (fs != null) {
JSONArray features = fs.getFeaturesJSON();
JSONObject fsFeatures = new JSONObject();
fsFeatures.put(fsNames.getString(i), features);
result.put(fsFeatures);
} else {
JSONObject fsFeatures = new JSONObject();
fsFeatures.put(fsNames.getString(i), new JSONArray());
result.put(fsFeatures);
}
}
} else {
for (String fsName : fsMap.keySet()) {
GeospatialFileSystem fs = fsMap.get(fsName);
if (fs != null) {
JSONArray features = fs.getFeaturesJSON();
JSONObject fsFeatures = new JSONObject();
fsFeatures.put(fsName, features);
result.put(fsFeatures);
} else {
JSONObject fsFeatures = new JSONObject();
fsFeatures.put(fsName, new JSONArray());
result.put(fsFeatures);
}
}
}
response.put("result", result);
context.sendReply(new MetadataResponse(response));
} else {
JSONObject response = new JSONObject();
response.put("kind", event.getRequest().getString("kind"));
response.put("result", new JSONArray());
context.sendReply(new MetadataResponse(response));
}
}
/**
* Handles a query request from a client. Query requests result in a number
* of subqueries being performed across the Galileo network.
*
* @throws PartitionException
* @throws HashException
*/
@EventHandler
public void handleQueryRequest(QueryRequest request, EventContext context) {
String featureQueryString = request.getFeatureQueryString();
String metadataQueryString = request.getMetadataQueryString();
logger.log(Level.INFO, "Feature query request: {0}", featureQueryString);
logger.log(Level.INFO, "Metadata query request: {0}", metadataQueryString);
String queryId = String.valueOf(System.currentTimeMillis());
GeospatialFileSystem gfs = this.fsMap.get(request.getFilesystemName());
if (gfs != null) {
QueryResponse response = new QueryResponse(queryId, gfs.getFeaturesRepresentation(), new JSONObject());
Metadata data = new Metadata();
if (request.isTemporal()) {
String[] timeSplit = request.getTime().split("-");
int timeIndex = Arrays.asList(TemporalType.values()).indexOf(gfs.getTemporalType());
if (!timeSplit[timeIndex].contains("x")) {
logger.log(Level.INFO, "Temporal query: {0}", request.getTime());
Calendar c = Calendar.getInstance();
c.setTimeZone(TemporalHash.TIMEZONE);
int year = timeSplit[0].charAt(0) == 'x' ? c.get(Calendar.YEAR) : Integer.parseInt(timeSplit[0]);
int month = timeSplit[1].charAt(0) == 'x' ? c.get(Calendar.MONTH)
: Integer.parseInt(timeSplit[1]) - 1;
int day = timeSplit[2].charAt(0) == 'x' ? c.get(Calendar.DAY_OF_MONTH)
: Integer.parseInt(timeSplit[2]);
int hour = timeSplit[3].charAt(0) == 'x' ? c.get(Calendar.HOUR_OF_DAY)
: Integer.parseInt(timeSplit[3]);
c.set(year, month, day, hour, 0);
data.setTemporalProperties(new TemporalProperties(c.getTimeInMillis()));
}
}
if (request.isSpatial()) {
logger.log(Level.INFO, "Spatial query: {0}", request.getPolygon());
data.setSpatialProperties(new SpatialProperties(new SpatialRange(request.getPolygon())));
}
Partitioner<Metadata> partitioner = gfs.getPartitioner();
List<NodeInfo> nodes;
try {
nodes = partitioner.findDestinations(data);
logger.info("destinations: " + nodes);
QueryEvent qEvent = (request.hasFeatureQuery() || request.hasMetadataQuery())
? new QueryEvent(queryId, request.getFilesystemName(), request.getFeatureQuery(),
request.getMetadataQuery())
: (request.isSpatial())
? new QueryEvent(queryId, request.getFilesystemName(), request.getPolygon())
: new QueryEvent(queryId, request.getFilesystemName(), request.getTime());
if (request.isDryRun()) {
qEvent.enableDryRun();
response.setDryRun(true);
}
if (request.isSpatial())
qEvent.setPolygon(request.getPolygon());
if (request.isTemporal())
qEvent.setTime(request.getTime());
try {
ClientRequestHandler reqHandler = new ClientRequestHandler(new ArrayList<NetworkDestination>(nodes),
context, this);
reqHandler.handleRequest(qEvent, response);
this.requestHandlers.add(reqHandler);
} catch (IOException ioe) {
logger.log(Level.SEVERE,
"Failed to initialize a ClientRequestHandler. Sending unfinished response back to client",
ioe);
try {
context.sendReply(response);
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to send response back to original client", e);
}
}
} catch (HashException | PartitionException hepe) {
logger.log(Level.SEVERE,
"Failed to identify the destination nodes. Sending unfinished response back to client", hepe);
try {
context.sendReply(response);
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to send response back to original client", e);
}
}
} else {
try {
QueryResponse response = new QueryResponse(queryId, new JSONArray(), new JSONObject());
context.sendReply(response);
} catch (IOException ioe) {
logger.log(Level.SEVERE, "Failed to send response back to original client", ioe);
}
}
}
private String getResultFilePrefix(String queryId, String fsName, String blockIdentifier) {
return this.resultsDir + "/" + String.format("%s-%s-%s", fsName, queryId, blockIdentifier);
}
private class QueryProcessor implements Runnable {
private String blockPath;
private String pathPrefix;
private GeoavailabilityQuery geoQuery;
private GeoavailabilityGrid grid;
private GeospatialFileSystem gfs;
private Bitmap queryBitmap;
private List<String> resultPaths;
private long fileSize;
public QueryProcessor(GeospatialFileSystem gfs, String blockPath, GeoavailabilityQuery gQuery,
GeoavailabilityGrid grid, Bitmap queryBitmap, String pathPrefix) {
this.gfs = gfs;
this.blockPath = blockPath;
this.geoQuery = gQuery;
this.grid = grid;
this.queryBitmap = queryBitmap;
this.pathPrefix = pathPrefix;
}
@Override
public void run() {
try {
this.resultPaths = this.gfs.query(this.blockPath, this.geoQuery, this.grid, this.queryBitmap,
this.pathPrefix);
for (String resultPath : this.resultPaths)
this.fileSize += new File(resultPath).length();
} catch (IOException | InterruptedException e) {
logger.log(Level.SEVERE, "Something went wrong while querying the filesystem. No results obtained.");
}
}
public long getFileSize() {
return this.fileSize;
}
public List<String> getResultPaths() {
return this.resultPaths;
}
}
/**
* Handles an internal Query request (from another StorageNode)
*/
@EventHandler
public void handleQuery(QueryEvent event, EventContext context) {
long hostFileSize = 0;
long totalProcessingTime = 0;
long blocksProcessed = 0;
int totalNumPaths = 0;
JSONArray header = new JSONArray();
JSONObject blocksJSON = new JSONObject();
JSONArray resultsJSON = new JSONArray();
long processingTime = System.currentTimeMillis();
try {
logger.info(event.getFeatureQueryString());
logger.info(event.getMetadataQueryString());
String fsName = event.getFilesystemName();
GeospatialFileSystem fs = fsMap.get(fsName);
if (fs != null) {
header = fs.getFeaturesRepresentation();
Map<String, List<String>> blockMap = fs.listBlocks(event.getTime(), event.getPolygon(),
event.getMetadataQuery(), event.isDryRun());
if (event.isDryRun()) {
/*
* TODO: Make result of dryRun resemble the format of that
* of non-dry-run so that the end user can retrieve the
* blocks from the block paths
**/
JSONObject responseJSON = new JSONObject();
responseJSON.put("filesystem", event.getFilesystemName());
responseJSON.put("queryId", event.getQueryId());
for (String blockKey : blockMap.keySet()) {
blocksJSON.put(blockKey, new JSONArray(blockMap.get(blockKey)));
}
responseJSON.put("result", blocksJSON);
QueryResponse response = new QueryResponse(event.getQueryId(), header, responseJSON);
response.setDryRun(true);
context.sendReply(response);
return;
}
JSONArray filePaths = new JSONArray();
int totalBlocks = 0;
for (String blockKey : blockMap.keySet()) {
List<String> blocks = blockMap.get(blockKey);
totalBlocks += blocks.size();
for(String block : blocks){
filePaths.put(block);
hostFileSize += new File(block).length();
}
}
if (totalBlocks > 0) {
if (event.getFeatureQuery() != null || event.getPolygon() != null) {
hostFileSize = 0;
filePaths = new JSONArray();
// maximum parallelism = 64
ExecutorService executor = Executors.newFixedThreadPool(Math.min(totalBlocks, 2 * numCores));
List<QueryProcessor> queryProcessors = new ArrayList<>();
GeoavailabilityQuery geoQuery = new GeoavailabilityQuery(event.getFeatureQuery(),
event.getPolygon());
for (String blockKey : blockMap.keySet()) {
GeoavailabilityGrid blockGrid = new GeoavailabilityGrid(blockKey,
GeoHash.MAX_PRECISION * 2 / 3);
Bitmap queryBitmap = null;
if (geoQuery.getPolygon() != null)
queryBitmap = QueryTransform.queryToGridBitmap(geoQuery, blockGrid);
List<String> blocks = blockMap.get(blockKey);
for (String blockPath : blocks) {
QueryProcessor qp = new QueryProcessor(fs, blockPath, geoQuery, blockGrid, queryBitmap,
getResultFilePrefix(event.getQueryId(), fsName, blockKey + blocksProcessed));
blocksProcessed++;
queryProcessors.add(qp);
executor.execute(qp);
}
}
executor.shutdown();
boolean status = executor.awaitTermination(10, TimeUnit.MINUTES);
if (!status)
logger.log(Level.WARNING, "Executor terminated because of the specified timeout=10minutes");
for (QueryProcessor qp : queryProcessors) {
if (qp.getFileSize() > 0) {
hostFileSize += qp.getFileSize();
for (String resultPath : qp.getResultPaths())
filePaths.put(resultPath);
}
}
}
}
totalProcessingTime = System.currentTimeMillis() - processingTime;
totalNumPaths = filePaths.length();
JSONObject resultJSON = new JSONObject();
resultJSON.put("filePath", filePaths);
resultJSON.put("numPaths", totalNumPaths);
resultJSON.put("fileSize", hostFileSize);
resultJSON.put("hostName", this.canonicalHostname);
resultJSON.put("hostPort", this.port);
resultJSON.put("processingTime", totalProcessingTime);
resultsJSON.put(resultJSON);
} else {
logger.log(Level.SEVERE, "Requested file system(" + fsName
+ ") not found. Ignoring the query and returning empty results.");
}
} catch (Exception e) {
logger.log(Level.SEVERE,
"Something went wrong while querying the filesystem. No results obtained. Sending blank list to the client. Issue details follow:",
e);
}
JSONObject responseJSON = new JSONObject();
responseJSON.put("filesystem", event.getFilesystemName());
responseJSON.put("queryId", event.getQueryId());
if (hostFileSize == 0) {
responseJSON.put("result", new JSONArray());
responseJSON.put("hostFileSize", new JSONObject());
responseJSON.put("totalFileSize", 0);
responseJSON.put("totalNumPaths", 0);
responseJSON.put("hostProcessingTime", new JSONObject());
} else {
responseJSON.put("result", resultsJSON);
responseJSON.put("hostFileSize", new JSONObject().put(this.canonicalHostname, hostFileSize));
responseJSON.put("totalFileSize", hostFileSize);
responseJSON.put("totalNumPaths", totalNumPaths);
responseJSON.put("hostProcessingTime", new JSONObject().put(this.canonicalHostname, totalProcessingTime));
}
responseJSON.put("totalProcessingTime", totalProcessingTime);
responseJSON.put("totalBlocksProcessed", blocksProcessed);
QueryResponse response = new QueryResponse(event.getQueryId(), header, responseJSON);
try {
context.sendReply(response);
} catch (IOException ioe) {
logger.log(Level.SEVERE, "Failed to send response back to ClientRequestHandler", ioe);
}
}
@EventHandler
public void handleQueryResponse(QueryResponse response, EventContext context) throws IOException {
QueryTracker tracker = queryTrackers.get(response.getId());
if (tracker == null) {
logger.log(Level.WARNING, "Unknown query response received: {0}", response.getId());
return;
}
}
/**
* Triggered when the request is completed by the
* {@link ClientRequestHandler}
*/
@Override
public void onRequestCompleted(Event response, EventContext context, MessageListener requestHandler) {
try {
logger.info("Sending collective response to the client");
this.requestHandlers.remove(requestHandler);
context.sendReply(response);
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to send response to the client.", e);
} finally {
System.gc();
}
}
public void persistFilesystems() {
try (BufferedWriter bw = new BufferedWriter(new FileWriter(fsFile))) {
JSONObject fsJSON = new JSONObject();
for (String fsName : fsMap.keySet()) {
GeospatialFileSystem fs = fsMap.get(fsName);
fsJSON.put(fsName, fs.obtainState());
}
bw.write(fsJSON.toString());
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
/**
* Handles cleaning up the system for a graceful shutdown.
*/
private class ShutdownHandler extends Thread {
@Override
public void run() {
/*
* The logging subsystem may have already shut down, so we revert to
* stdout for our final messages
*/
System.out.println("Initiated shutdown.");
try {
connectionPool.forceShutdown();
messageRouter.shutdown();
} catch (Exception e) {
e.printStackTrace();
}
nodeStatus.close();
if (pidFile != null && pidFile.exists()) {
pidFile.delete();
}
persistFilesystems();
for (GeospatialFileSystem fs : fsMap.values())
fs.shutdown();
System.out.println("Goodbye!");
}
}
/**
* Executable entrypoint for a Galileo DHT Storage Node
*/
public static void main(String[] args) {
try {
StorageNode node = new StorageNode();
node.start();
} catch (Exception e) {
logger.log(Level.SEVERE, "Could not start StorageNode.", e);
}
}
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
java
| 1 | 0 | |
psycho-server/wsgi.py
|
"""
WSGI config for psycho-server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'psycho-server.settings.production')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
etc/gdocs.py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from exceptions import KeyError
import os
import requests
class GoogleDoc(object):
"""
A class for accessing a Google document as an object.
Includes the bits necessary for accessing the document and auth and such.
For example:
doc = {
"key": "123456abcdef",
"file_name": "my_google_doc"
}
g = GoogleDoc(**doc)
g.get_auth()
g.get_document()
Will download your google doc to data/file_name.format.
"""
# You can update these values with kwargs.
# In fact, you better pass a key or else it won't work!
key = None
file_format = 'xlsx'
file_name = 'copy'
gid = None
# You can change these with kwargs but it's not recommended.
spreadsheet_url = 'https://spreadsheets.google.com/feeds/download/spreadsheets/Export?key=%(key)s&exportFormat=%(format)s'
if gid:
spreadsheet_url = spreadsheet_url + '&gid=%(gid)s'
new_spreadsheet_url = 'https://docs.google.com/spreadsheets/d/%(key)s/export?format=%(format)s&id=%(key)s'
if gid:
new_spreadsheet_url = new_spreadsheet_url + '&gid=%(gid)s'
auth = None
email = os.environ.get('APPS_GOOGLE_EMAIL', None)
password = os.environ.get('APPS_GOOGLE_PASS', None)
scope = "https://spreadsheets.google.com/feeds/"
service = "wise"
session = "1"
def __init__(self, **kwargs):
"""
Because sometimes, just sometimes, you need to update the class when you instantiate it.
In this case, we need, minimally, a document key.
"""
if kwargs:
if kwargs.items():
for key, value in kwargs.items():
setattr(self, key, value)
def get_auth(self):
"""
Gets an authorization token and adds it to the class.
"""
data = {}
if not self.email or not self.password:
raise KeyError("Error! You're missing some variables. You need to export APPS_GOOGLE_EMAIL and APPS_GOOGLE_PASS.")
else:
data['Email'] = self.email
data['Passwd'] = self.password
data['scope'] = self.scope
data['service'] = self.service
data['session'] = self.session
r = requests.post("https://www.google.com/accounts/ClientLogin", data=data)
self.auth = r.content.split('\n')[2].split('Auth=')[1]
def get_document(self):
"""
Uses the authentication token to fetch a google doc.
"""
# Handle basically all the things that can go wrong.
if not self.auth:
raise KeyError("Error! You didn't get an auth token. Something very bad happened. File a bug?")
elif not self.key:
raise KeyError("Error! You forgot to pass a key to the class.")
else:
headers = {}
headers['Authorization'] = "GoogleLogin auth=%s" % self.auth
url_params = { 'key': self.key, 'format': self.file_format, 'gid': self.gid }
url = self.new_spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
url = self.spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
raise KeyError("Error! Your Google Doc does not exist.")
with open('data/%s.%s' % (self.file_name, self.file_format), 'wb') as writefile:
writefile.write(r.content)
|
[] |
[] |
[
"APPS_GOOGLE_PASS",
"APPS_GOOGLE_EMAIL"
] |
[]
|
["APPS_GOOGLE_PASS", "APPS_GOOGLE_EMAIL"]
|
python
| 2 | 0 | |
tests/test_objectStores.py
|
#Test object stores creation
from TestHelperSuperClass import testHelperSuperClass
import object_store_abstraction as undertest
import json
import os
'''
#Sample code used to create a store in an application:
objectStoreConfigJSON = readFromEnviroment(env, 'APIAPP_OBJECTSTORECONFIG', '{}', None)
objectStoreConfigDict = None
try:
if objectStoreConfigJSON != '{}':
objectStoreConfigDict = json.loads(objectStoreConfigJSON)
except Exception as err:
print(err) # for the repr
print(str(err)) # for just the message
print(err.args) # the arguments that the exception has been called with.
raise(InvalidObjectStoreConfigInvalidJSONException)
fns = {
'getCurDateTime': self.getCurDateTime,
'getPaginatedResult': self.getPaginatedResult
}
self.objectStore = createObjectStoreInstance(objectStoreConfigDict, fns)
'''
SKIPSQLALCHEMYTESTS=False
if ('SKIPSQLALCHEMYTESTS' in os.environ):
if os.environ["SKIPSQLALCHEMYTESTS"]=="Y":
SKIPSQLALCHEMYTESTS=True
#@TestHelperSuperClass.wipd
class test_objectStoresMemory(testHelperSuperClass):
def test_defaultCreation(self):
objectStoreConfigDict = None
a = undertest.createObjectStoreInstance(objectStoreConfigDict, self.getObjectStoreExternalFns())
if not isinstance(a,undertest.ObjectStore_Memory):
self.assertTrue(False,msg='Wrong type of object store created')
def test_memoryCreation(self):
a = "{\"Type\":\"Memory\"}"
objectStoreConfigDict = json.loads(a)
a = undertest.createObjectStoreInstance(objectStoreConfigDict, self.getObjectStoreExternalFns())
if not isinstance(a,undertest.ObjectStore_Memory):
self.assertTrue(False,msg='Wrong type of object store created')
def test_sqlAlchemyCreation(self):
if SKIPSQLALCHEMYTESTS:
print("Skipping SQLAlchemyTests")
return
a = "{\"Type\":\"SQLAlchemy\", \"connectionString\":\"mysql+pymysql://saas_user_man_user:[email protected]:10103/saas_user_man_rad\"}"
objectStoreConfigDict = json.loads(a)
a = undertest.createObjectStoreInstance(objectStoreConfigDict, self.getObjectStoreExternalFns())
if not isinstance(a,undertest.ObjectStore_SQLAlchemy):
self.assertTrue(False,msg='Wrong type of object store created')
def test_nonDictPassedToCreation(self):
with self.assertRaises(Exception) as context:
a = undertest.createObjectStoreInstance("Not A Dict", self.getObjectStoreExternalFns())
self.checkGotRightExceptionType(context,undertest.ObjectStoreConfigNotDictObjectExceptionClass)
def test_dictWithoutTypePassedToCreation(self):
objectStoreConfigDict = {'Som': 'dsds'}
with self.assertRaises(Exception) as context:
a = undertest.createObjectStoreInstance(objectStoreConfigDict, self.getObjectStoreExternalFns())
self.checkGotRightExceptionType(context,undertest.InvalidObjectStoreConfigMissingTypeClass)
def test_dictWithUnknownTypePassedToCreation(self):
objectStoreConfigDict = {'Type': 'SomeInvalidObjectStoreType'}
with self.assertRaises(Exception) as context:
a = undertest.createObjectStoreInstance(objectStoreConfigDict, self.getObjectStoreExternalFns())
self.checkGotRightExceptionType(context,undertest.InvalidObjectStoreConfigUnknownTypeClass)
|
[] |
[] |
[
"SKIPSQLALCHEMYTESTS"
] |
[]
|
["SKIPSQLALCHEMYTESTS"]
|
python
| 1 | 0 | |
soracom/generated/cmd/stats_beam_get.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// StatsBeamGetCmdImsi holds value of 'imsi' option
var StatsBeamGetCmdImsi string
// StatsBeamGetCmdPeriod holds value of 'period' option
var StatsBeamGetCmdPeriod string
// StatsBeamGetCmdFrom holds value of 'from' option
var StatsBeamGetCmdFrom int64
// StatsBeamGetCmdTo holds value of 'to' option
var StatsBeamGetCmdTo int64
// StatsBeamGetCmdOutputJSONL indicates to output with jsonl format
var StatsBeamGetCmdOutputJSONL bool
func init() {
StatsBeamGetCmd.Flags().StringVar(&StatsBeamGetCmdImsi, "imsi", "", TRAPI("imsi"))
StatsBeamGetCmd.Flags().StringVar(&StatsBeamGetCmdPeriod, "period", "", TRAPI("Units of aggregate data. For minutes, the interval is around 5 minutes."))
StatsBeamGetCmd.Flags().Int64Var(&StatsBeamGetCmdFrom, "from", 0, TRAPI("Start time in unixtime for the aggregate data."))
StatsBeamGetCmd.Flags().Int64Var(&StatsBeamGetCmdTo, "to", 0, TRAPI("End time in unixtime for the aggregate data."))
StatsBeamGetCmd.Flags().BoolVar(&StatsBeamGetCmdOutputJSONL, "jsonl", false, TRCLI("cli.common_params.jsonl.short_help"))
StatsBeamCmd.AddCommand(StatsBeamGetCmd)
}
// StatsBeamGetCmd defines 'get' subcommand
var StatsBeamGetCmd = &cobra.Command{
Use: "get",
Short: TRAPI("/stats/beam/subscribers/{imsi}:get:summary"),
Long: TRAPI(`/stats/beam/subscribers/{imsi}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectStatsBeamGetCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
if StatsBeamGetCmdOutputJSONL {
return printStringAsJSONL(body)
}
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectStatsBeamGetCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("imsi", "imsi", "path", parsedBody, StatsBeamGetCmdImsi)
if err != nil {
return nil, err
}
err = checkIfRequiredStringParameterIsSupplied("period", "period", "query", parsedBody, StatsBeamGetCmdPeriod)
if err != nil {
return nil, err
}
err = checkIfRequiredIntegerParameterIsSupplied("from", "from", "query", parsedBody, StatsBeamGetCmdFrom)
if err != nil {
return nil, err
}
err = checkIfRequiredIntegerParameterIsSupplied("to", "to", "query", parsedBody, StatsBeamGetCmdTo)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForStatsBeamGetCmd("/stats/beam/subscribers/{imsi}"),
query: buildQueryForStatsBeamGetCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForStatsBeamGetCmd(path string) string {
escapedImsi := url.PathEscape(StatsBeamGetCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForStatsBeamGetCmd() url.Values {
result := url.Values{}
if StatsBeamGetCmdPeriod != "" {
result.Add("period", StatsBeamGetCmdPeriod)
}
if StatsBeamGetCmdFrom != 0 {
result.Add("from", sprintf("%d", StatsBeamGetCmdFrom))
}
if StatsBeamGetCmdTo != 0 {
result.Add("to", sprintf("%d", StatsBeamGetCmdTo))
}
return result
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
setup.py
|
from setuptools import setup, find_packages
import os
# Check if OpenCV is installed and raise an error if it is not
# but don't do this if the ReadTheDocs systems tries to install
# the library, as that is configured to mock cv2 anyways
READ_THE_DOCS = (os.environ.get("READTHEDOCS", "False") == "True")
NO_CV2_INSTALLED_CHECK = (os.environ.get("IMGAUG_NO_CV2_INSTALLED_CHECK", "False") == "True")
if not READ_THE_DOCS and not NO_CV2_INSTALLED_CHECK:
try:
import cv2 # pylint: disable=locally-disabled, unused-import, line-too-long
except ImportError as e:
raise Exception("Could not find package 'cv2' (OpenCV). It cannot be automatically installed, so you will have to manually install it.")
long_description = """A library for image augmentation in machine learning experiments, particularly convolutional neural networks.
Supports augmentation of images and keypoints/landmarks in a variety of different ways."""
setup(
name="imgaug",
version="0.2.6",
author="Alexander Jung",
author_email="[email protected]",
url="https://github.com/aleju/imgaug",
download_url="https://github.com/aleju/imgaug/archive/0.2.6.tar.gz",
install_requires=["scipy", "scikit-image>=0.11.0", "numpy>=1.7.0", "six", "imageio"],
packages=find_packages(),
include_package_data=True,
license="MIT",
description="Image augmentation library for machine learning",
long_description=long_description,
keywords=["augmentation", "image", "deep learning", "neural network", "machine learning"]
)
|
[] |
[] |
[
"IMGAUG_NO_CV2_INSTALLED_CHECK",
"READTHEDOCS"
] |
[]
|
["IMGAUG_NO_CV2_INSTALLED_CHECK", "READTHEDOCS"]
|
python
| 2 | 0 | |
nike-riposte-integration-remote-tests/src/test/java/com/undefinedlabs/nikeriposteintegration/functionaltest/scope/RiposteProxyRouterEndpointIT.java
|
package com.undefinedlabs.nikeriposteintegration.functionaltest.scope;
import com.undefinedlabs.nikeriposteintegration.functionaltest.scope.utils.AbstractConcurrentIT;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
import org.apache.commons.lang.StringUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.assertj.core.api.Assertions.assertThat;
public class RiposteProxyRouterEndpointIT extends AbstractConcurrentIT {
private static final Logger LOGGER = LoggerFactory.getLogger(RiposteProxyRouterEndpointIT.class);
@Test
public void should_invoke_riposte_proxy_router_endpoint_concurrently() throws IOException {
//Given
final OkHttpClient okHttpClient = new OkHttpClient.Builder()
.connectTimeout(90, TimeUnit.SECONDS)
.readTimeout(90, TimeUnit.SECONDS)
.writeTimeout(90, TimeUnit.SECONDS)
.build();
final String concurrentReqsStr = System.getenv("CONCURRENT_REQUESTS");
int concurrentReqs = (StringUtils.isNotEmpty(concurrentReqsStr) && StringUtils.isNumeric(concurrentReqsStr)) ? Integer.parseInt(concurrentReqsStr) : 5;
LOGGER.info("--- Testing Proxy Router Endpoint --> Sending " + concurrentReqs + " requests concurrently");
//When
final List<Future<Response>> futureResponses = IntStream.range(0, concurrentReqs).mapToObj((num) -> makeRequest(okHttpClient, new Request.Builder().url(props.nikeriposteintegrationHost+"/exampleProxy?num="+num).build())).collect(Collectors.toList());
final CompletableFuture<List<Response>> futureListResponses = CompletableFuture.allOf(futureResponses.toArray(new CompletableFuture[0])).thenApply(v -> futureResponses.stream().map(futureResponse -> {
try {
return futureResponse.get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}).collect(Collectors.toList()));
//Then
try {
final List<Response> responses = futureListResponses.get();
for(Response response : responses) {
assertThat(response.isSuccessful()).isTrue();
}
} catch (Exception e){
throw new RuntimeException(e);
}
}
}
|
[
"\"CONCURRENT_REQUESTS\""
] |
[] |
[
"CONCURRENT_REQUESTS"
] |
[]
|
["CONCURRENT_REQUESTS"]
|
java
| 1 | 0 | |
java/yb-client/src/test/java/org/yb/client/TestUtils.java
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// The following only applies to changes made to this file as part of YugaByte development.
//
// Portions Copyright (c) YugaByte, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations
// under the License.
//
package org.yb.client;
import org.apache.commons.io.FileUtils;
import org.junit.runner.Description;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yb.BaseYBTest;
import org.yb.client.YBClient.Condition;
import org.yb.util.ConfForTesting;
import org.yb.util.EnvAndSysPropertyUtil;
import org.yb.util.RandomNumberUtil;
import org.yb.util.BuildTypeUtil;
import java.io.*;
import java.net.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestUtils {
private static final Logger LOG = LoggerFactory.getLogger(TestUtils.class);
private static final String BIN_DIR_PROP = "binDir";
private static String ybRootDir = null;
public static final boolean IS_LINUX =
System.getProperty("os.name").toLowerCase().equals("linux");
private static final long startTimeMillis = System.currentTimeMillis();
private static final String defaultTestTmpDir =
"/tmp/ybtest-" + System.getProperty("user.name") + "-" + startTimeMillis + "-" +
new Random().nextInt(Integer.MAX_VALUE);
private static boolean isJenkins = System.getProperty("user.name").equals("jenkins");
private static final AtomicBoolean defaultTestTmpDirCleanupHookRegistered = new AtomicBoolean();
// The amount of time to wait for in addition to the ttl specified.
private static final long WAIT_FOR_TTL_EXTENSION_MS = 100;
private static PrintStream defaultStdOut = System.out;
private static PrintStream defaultStdErr = System.err;
public static final int MIN_PORT_TO_USE = 10000;
public static final int MAX_PORT_TO_USE = 32768;
// Set of ports for the network addresses being reserved.
private static final Map<InetAddress, Set<Integer>> reservedPorts = new HashMap<>();
/** Time to sleep in milliseconds waiting for conditions to be met. */
private static final int SLEEP_TIME_MS = 1000;
private static Path flagFileTmpPath = null;
private static final Object flagFilePathLock = new Object();
private static volatile String cppBinariesDir = null;
private static volatile String buildType = null;
/**
* When collecting the list of tests to run using the -DcollectTests option to the build, prefix
* each line describing a test with this.
*/
private static final String COLLECTED_TESTS_PREFIX = "YUGABYTE_JAVA_TEST: ";
/**
* @return the path of the flags file to pass to daemon processes
* started by the tests
*/
public static String getFlagsPath() {
// If the flags are inside a JAR, extract them into our temporary
// test directory.
try {
// Somewhat unintuitively, createTempFile() actually creates the file,
// not just the path, so we have to use REPLACE_EXISTING below.
synchronized (flagFilePathLock) {
if (flagFileTmpPath == null || !Files.exists(flagFileTmpPath)) {
flagFileTmpPath = Files.createTempFile(
Paths.get(getBaseTmpDir()), "yb-flags", ".flags");
Files.copy(BaseYBClientTest.class.getResourceAsStream("/flags"), flagFileTmpPath,
StandardCopyOption.REPLACE_EXISTING);
}
}
return flagFileTmpPath.toAbsolutePath().toString();
} catch (IOException e) {
throw new RuntimeException("Unable to extract flags file into tmp", e);
}
}
/**
* Return the path portion of a file URL, after decoding the escaped
* components. This fixes issues when trying to build within a
* working directory with special characters.
*/
private static String urlToPath(URL u) {
try {
return URLDecoder.decode(u.getPath(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
public static synchronized String findYbRootDir() {
if (ybRootDir != null) {
return ybRootDir;
}
final URL myUrl = BaseYBClientTest.class.getProtectionDomain().getCodeSource().getLocation();
final String pathToCode = urlToPath(myUrl);
final String currentDir = System.getProperty("user.dir");
// Try to find the YB directory root by navigating upward from either the source code location,
// or, if that does not work, from the current directory.
for (String initialPath : new String[] { pathToCode, currentDir }) {
// Cache the root dir so that we don't have to find it every time.
ybRootDir = findYbSrcRootContaining(initialPath);
if (ybRootDir != null) {
return ybRootDir;
}
}
throw new RuntimeException(
"Unable to find build dir! myUrl=" + myUrl + ", currentDir=" + currentDir);
}
private static String findYbSrcRootContaining(String initialPath) {
File currentPath = new File(initialPath);
while (currentPath != null) {
if (new File(currentPath, "yb_build.sh").exists() &&
new File(currentPath, "build-support").exists()) {
return currentPath.getAbsolutePath();
}
currentPath = currentPath.getParentFile();
}
return null;
}
/**
* @return the directory with YB daemons' web UI assets
*/
public static String getWebserverDocRoot() {
return TestUtils.findYbRootDir() + "/www";
}
public static String getBinDir() {
if (cppBinariesDir != null)
return cppBinariesDir;
String binDir = System.getProperty(BIN_DIR_PROP);
if (binDir != null) {
LOG.info("Using binary directory specified by property: {}",
binDir);
} else {
binDir = findYbRootDir() + "/build/latest/bin";
}
if (!new File(binDir).isDirectory()) {
String externalBinDir = findYbRootDir() + "__build/latest/bin";
if (new File(externalBinDir).isDirectory()) {
binDir = externalBinDir;
} else {
throw new RuntimeException(
"Directory that is supposed to contain YB C++ binaries not found in either of the " +
"following locations: " + binDir + ", " + externalBinDir);
}
}
cppBinariesDir = binDir;
return binDir;
}
public static String getBuildRootDir() {
return new File(getBinDir()).getParent();
}
/**
* @param binName the binary to look for (eg 'yb-tserver')
* @return the absolute path of that binary
* @throws FileNotFoundException if no such binary is found
*/
public static String findBinary(String binName) throws FileNotFoundException {
String binDir = getBinDir();
File candidate = new File(binDir, binName);
if (candidate.canExecute()) {
return candidate.getAbsolutePath();
}
throw new FileNotFoundException("Cannot find binary " + binName +
" in binary directory " + binDir);
}
public static String getBuildType() {
if (buildType != null)
return buildType;
try {
final File canonicalBuildDir = new File(getBinDir()).getParentFile().getCanonicalFile();
final String buildDirBasename = canonicalBuildDir.getName();
final String[] buildDirNameComponents = buildDirBasename.split("-");
assert buildDirNameComponents.length >= 3 :
"buildDirNameComponents is expected to have at least 3 components: " +
Arrays.asList(buildDirNameComponents) + ", canonicalBuildDir=" +
canonicalBuildDir.getPath();
buildType = buildDirNameComponents[0].toLowerCase();
LOG.info("Identified build type as '" + buildType + "' based on canonical build " +
"directory '" + canonicalBuildDir + "' and base name '" + buildDirBasename + "'");
return buildType;
} catch (IOException ex) {
throw new RuntimeException("Failed trying to get the build type", ex);
}
}
public static boolean isReleaseBuild() {
return TestUtils.getBuildType().equals("release");
}
/**
* @return the base directory within which we will store server data
*/
public static String getBaseTmpDir() {
String testTmpDir = System.getenv("TEST_TMPDIR");
if (testTmpDir == null) {
// If we are generating the temporary directory name here, we are responsible for deleting it
// unless told not to.
testTmpDir = new File(defaultTestTmpDir).getAbsolutePath();
if (!ConfForTesting.keepData() &&
defaultTestTmpDirCleanupHookRegistered.compareAndSet(false, true)) {
final File tmpDirToCleanUp = new File(testTmpDir);
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
if (tmpDirToCleanUp.isDirectory()) {
try {
FileUtils.deleteDirectory(tmpDirToCleanUp);
} catch (IOException e) {
LOG.error("Failed to delete directory " + tmpDirToCleanUp + " recursively", e);
}
}
}));
}
}
File f = new File(testTmpDir);
if (!f.exists() && !f.mkdirs()) {
throw new RuntimeException("Could not create " + testTmpDir + ", not enough permissions?");
}
return f.getAbsolutePath();
}
/**
* Check if the given port is free on the given network interface.
*
* @param bindInterface the network interface to bind to
* @param port port to bind to
* @param logException whether to log an exception in case of failure to bind to the port
* (but not in case of a failure to close the server socket).
* @return true if the given port is free on the given interface
* @throws IOException
*/
public static boolean isPortFree(InetAddress bindInterface, int port, boolean logException)
throws IOException {
final int DEFAULT_BACKLOG = 50;
ServerSocket serverSocket;
try {
serverSocket = new ServerSocket(port, DEFAULT_BACKLOG, bindInterface);
} catch (IOException e) {
if (logException) {
LOG.error("Error trying to bind to " + bindInterface + ":" + port, e);
}
return false;
}
serverSocket.close();
return true;
}
/**
* Check if the port for the bind interface has already been reserved.
*/
private static boolean isReservedPort(InetAddress bindInterface, int port) {
return (reservedPorts.containsKey(bindInterface) &&
reservedPorts.get(bindInterface).contains(port));
}
/**
* Reserve the port for the bind interface.
*/
private static void reservePort(InetAddress bindInterface, int port) {
if (!reservedPorts.containsKey(bindInterface)) {
reservedPorts.put(bindInterface, new HashSet<>());
}
reservedPorts.get(bindInterface).add(port);
}
/**
* Clear all reserved ports.
*/
public static void clearReservedPorts() {
reservedPorts.clear();
}
/**
* Find a free port for the given bind interface, starting with the one passed. Keep in mind the
* time-of-check-time-of-use nature of this method, the returned port might become occupied
* after it was checked for availability.
* @return A currently usable port.
* @throws IOException if we can't close a socket we tried to open or if we run out of ports to
* try.
*/
public static int findFreePort(String bindInterface) throws IOException {
final InetAddress bindIp = InetAddress.getByName(bindInterface);
final int MAX_ATTEMPTS = 1000;
Random rng = RandomNumberUtil.getRandomGenerator();
for (int attempt = 0; attempt < MAX_ATTEMPTS; ++attempt) {
final int port = MIN_PORT_TO_USE + rng.nextInt(MAX_PORT_TO_USE - MIN_PORT_TO_USE);
if (!isReservedPort(bindIp, port) && isPortFree(bindIp, port, attempt == MAX_ATTEMPTS - 1)) {
reservePort(bindIp, port);
return port;
}
}
throw new IOException("Could not find a free port on interface " + bindInterface + " in " +
MAX_ATTEMPTS + " attempts");
}
public static void waitFor(Condition condition, long timeoutMs) throws Exception {
waitFor(condition, timeoutMs, SLEEP_TIME_MS);
}
public static void waitFor(Condition condition, long timeoutMs, int sleepTime) throws Exception {
timeoutMs = BuildTypeUtil.adjustTimeout(timeoutMs);
final long startTimeMs = System.currentTimeMillis();
while (System.currentTimeMillis() - startTimeMs < timeoutMs && !condition.get()) {
Thread.sleep(sleepTime);
}
if (!condition.get()) {
throw new Exception(String.format("Operation timed out after %dms", timeoutMs));
}
}
private static String getHorizontalLine() {
final StringBuilder horizontalLine = new StringBuilder();
for (int i = 0; i < 100; ++i) {
horizontalLine.append("-");
}
return horizontalLine.toString();
}
public static String HORIZONTAL_LINE = getHorizontalLine();
public static void printHeading(PrintStream out, String msg) {
out.println("\n" + HORIZONTAL_LINE + "\n" + msg + "\n" + HORIZONTAL_LINE + "\n");
}
public static String formatTestDescrition(String className, String methodName) {
return "class=\"" + className + "\", method=\"" + methodName + "\"";
}
public static String getClassAndMethodStr(Description description) {
return formatTestDescrition(description.getClassName(), description.getMethodName());
}
/**
* Tries to connect to the given host and port until the provided timeout has expired.
* @param host host to connect to.
* @param port port to connect to.
* @param timeoutMs timeout in milliseconds to wait for a successful connection.
* @throws Exception
*/
public static void waitForServer(String host, int port, long timeoutMs) throws Exception {
TestUtils.waitFor(() -> {
try {
new Socket(host, port);
} catch (IOException ie) {
return false;
}
return true;
}, timeoutMs);
}
/**
* Adjust the given timeout (that should already have been corrected for build type using
* {@link org.yb.util.Timeouts#adjustTimeoutSecForBuildType} according to some user overrides such
* as {@code YB_MIN_TEST_TIMEOUT_SEC}.
* @param timeoutSec the test timeout in seconds to be adjusted
* @return the adjusted timeout
*/
public static long finalizeTestTimeoutSec(long timeoutSec) {
String minTimeoutStr =
EnvAndSysPropertyUtil.getEnvVarOrSystemProperty("YB_MIN_TEST_TIMEOUT_SEC", "0");
LOG.info("minTimeoutStr=" + minTimeoutStr);
long minTestTimeoutSec;
if (minTimeoutStr.toLowerCase().equals("inf")) {
minTestTimeoutSec = Integer.MAX_VALUE;
} else {
minTestTimeoutSec = Long.valueOf(minTimeoutStr);
}
if (minTestTimeoutSec <= 0) {
return timeoutSec;
}
// The lower bound on the timeout in seconds is minTestTimeoutSec, as specified by the user.
return Math.max(timeoutSec, minTestTimeoutSec);
}
/**
* Waits for the given ttl (in msec) to expire.
* @param ttl the ttl (in msec) to wait for expiry.
* @throws Exception
*/
public static void waitForTTL(long ttl) throws Exception {
Thread.sleep(ttl + WAIT_FOR_TTL_EXTENSION_MS);
}
public static void reportCollectedTest(
String packageAndClassName, String methodNameAndParameters) {
System.out.println(COLLECTED_TESTS_PREFIX + packageAndClassName + "#" +
methodNameAndParameters);
}
public static void logAndSleepMs(int ms, String msg) throws InterruptedException {
LOG.info("Sleeping for " + ms + " milliseconds: " + msg);
Thread.sleep(ms);
LOG.info("Finished sleeping for " + ms + " milliseconds: " + msg);
}
public static String getTestReportFilePrefix() {
Class testClass;
try {
testClass = Class.forName(BaseYBTest.getCurrentTestClassName());
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
String testClassesDir = testClass.getProtectionDomain().getCodeSource().getLocation().getPath();
if (testClassesDir.endsWith("/")) {
testClassesDir = testClassesDir.substring(0, testClassesDir.length() - 1);
}
if (!testClassesDir.endsWith("test-classes")) {
throw new RuntimeException(
"Found class " + testClass + " in directory " + testClassesDir + ", expected it to be " +
"in a 'test-classes' directory");
}
final String defaultSurefireReportsDir =
new File(new File(testClassesDir).getParent(), "surefire-reports").getPath();
File surefireDir = new File(System.getProperty("yb.surefire.reports.directory",
defaultSurefireReportsDir));
if (!surefireDir.isDirectory()) {
LOG.warn("Directory " + surefireDir + " does not exist, attempting to create");
if (!surefireDir.mkdirs() && !surefireDir.isDirectory()) {
LOG.warn("Still could not create directory " + surefireDir);
throw new RuntimeException(
"Surefire report directory '" + surefireDir +
"' does not exist and could not be created");
}
}
return new File(
surefireDir,
testClass.getName() + "." + BaseYBTest.getCurrentTestMethodName() + "."
).toString();
}
public static void resetDefaultStdOutAndErr() {
System.setOut(defaultStdOut);
System.setErr(defaultStdErr);
}
/**
* @param arr integer parameters
* @return the first of the given numbers that is positive
*/
public static int getFirstPositiveNumber(int... arr) {
for (int value : arr) {
if (value > 0)
return value;
}
if (arr.length > 0) {
return arr[arr.length - 1];
}
throw new IllegalArgumentException("No numbers given to firstPositiveNumber");
}
public static <T> List<T> joinLists(List<T> a, List<T> b) {
List<T> joinedList = new ArrayList();
if (a != null) {
joinedList.addAll(a);
}
if (b != null) {
joinedList.addAll(b);
}
return joinedList;
}
public static boolean isJenkins() {
return isJenkins;
}
}
|
[
"\"TEST_TMPDIR\""
] |
[] |
[
"TEST_TMPDIR"
] |
[]
|
["TEST_TMPDIR"]
|
java
| 1 | 0 | |
bin/nets/old/convolutional_nn.py
|
#!/usr/bin/env python3
# -----------------------------
# convolution to compare images
# -----------------------------
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=np.nan)
from scipy import signal as sig
from PIL import Image as im
def main():
print("\nconvolution --- image evaluation\n")
# ----------------------- data ------------------------ #
# ----------------------------------------------------- #
# original_images: 96x96 image w/ int in [0,255] #
# reconstructed_images: 96x96 image w/ float in [0,255] #
# comparison_images: 96x96 image w/ float in [0,1) #
# ----------------------------------------------------- #
original_images = np.loadtxt("data/orig_3pics.txt")
reconstructed_images = np.loadtxt("data/recon_3pics.txt")
comparison_images = np.loadtxt("data/ssim_3pics.txt")
# data is now a 3 X 96 X 96 array (3 square 96px images)
original_images = original_images.reshape(3,96,96)
reconstructed_images = reconstructed_images.reshape(3,96,96)
comparison_images = comparison_images.reshape(3,96,96)
# these are copys of the data but with each entry being its own list
# i made two copy because i have been doing stuff with the non-dimension version separately
original_images_dim1 = original_images.reshape(3,96,96,1)
reconstructed_images_dim1 = reconstructed_images.reshape(3,96,96,1)
comparison_images_dim1 = comparison_images.reshape(3,96,96,1)
# start of the tf stuff
sess = tf.Session()
width = 96
height = 96
# this placeholder will recieve the image data from outside tf and turn it into a tensor
x_image = tf.placeholder(tf.float32, shape = [None, width, height, 1])
# these are the variables that will be learned, initial values not too important
filter_conv = tf.Variable(tf.truncated_normal([5,5,1,1]))
bias_conv = tf.Variable(tf.constant(0.1))
# the convolution operation, strides is how much it travels between each dot product.
# ----------------------------------------------------------------------------------------#
## NOTE: this is actually dope of tensor flow. when we specify the padding as same, then #
## it automagically chooses the right number of zeros to pad in order to give the output #
## the same size as the input. so that is take care of for us. you can check this by #
## changing the size of the filter. the output of the results.shape function will always #
## be 96,96,3,1. #
# ----------------------------------------------------------------------------------------#
convolution = tf.nn.conv2d(x_image, filter_conv, strides=[1,1,1,1], padding='SAME') + bias_conv
# running the operation --- we run it on the original and the reconstructed
init = tf.global_variables_initializer()
sess.run(init)
result_original = sess.run(convolution, feed_dict = {x_image: original_images_dim1})
result_recon = sess.run(convolution, feed_dict = {x_image: reconstructed_images_dim1})
# flattening out the images, because we arent using the square structure anymore
## this process is combining the original and reconstructed convolution into one array
## of length 18432 (96*96*2). this is to use the two images combined for our mlp training
## NOTE: i am sure there is a more efficient way to do this
result_original = tf.reshape(result_original, [3, 9216])
result_recon = tf.reshape(result_recon, [3, 9216])
result_combined1 = tf.concat([result_original[0], result_recon[0]], 0)
result_combined2 = tf.concat([result_original[1], result_recon[1]], 0)
result_combined3 = tf.concat([result_original[2], result_recon[2]], 0)
result_combined1 = tf.reshape(result_combined1, [1, 18432])
result_combined2 = tf.reshape(result_combined2, [1, 18432])
result_combined3 = tf.reshape(result_combined3, [1, 18432])
result_total = tf.concat([result_combined1, result_combined2, result_combined3], 0)
# print(result_total.shape)
# this is the start of the MLP aspect of the network.
## x is the input from our combined result of the convolution
## y_ is the output, which is an array holding the resulting values
x = tf.placeholder(tf.float32, shape=[None, 18432])
y_ = tf.placeholder(tf.float32, shape=[None, 9612])
# variables to be learned
weights = tf.Variable(tf.zeros([18432, 9612], tf.float32))
bias = tf.Variable(tf.zeros([9612], tf.float32))
sess.run(tf.global_variables_initializer())
# operations --- sigmoid normalizes the result
# apply_weights_op = tf.matmul(x, weight)
# add_bias_op = tf.add(apply_weights_op, bias)
# activation_op = tf.nn.sigmoid(add_bias_op)
y = tf.nn.sigmoid(tf.matmul(x, weights) + bias)
number_epochs = 1000
learning_rate = .0001
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
y_1 = comparison_images
y_1 = y_1.reshape(3,1,9216)
# looking at images --- i just did this because i was curious was the images were.
# if you want to see just uncomment the image_view.show() line
# you can see the reconstruction by switching which one is commented out. pretty cool stuff
image = np.asarray(original_images[1], dtype='uint8')
# image = np.asarray(reconstructed_images[1], dtype='uint8')
image_view = im.fromarray(image, 'L')
# image_view.save("images/test.png")
# image_view.show()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
tests/smoke/test_movielens.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
from tempfile import TemporaryDirectory
from reco_utils.dataset.movielens import (
load_pandas_df,
load_spark_df,
load_item_df,
download_movielens,
extract_movielens,
)
try:
from pyspark.sql.types import (
StructType,
StructField,
IntegerType,
StringType,
FloatType,
DoubleType,
)
from pyspark.sql.functions import col
from reco_utils.common.spark_utils import start_or_get_spark
except ImportError:
pass # skip this import if we are in pure python environment
@pytest.mark.smoke
@pytest.mark.parametrize(
"size, num_samples, num_movies, movie_example, title_example, genres_example, year_example",
[
(
"100k",
100000,
1682,
1,
"Toy Story (1995)",
"Animation|Children's|Comedy",
"1995",
)
],
)
def test_load_pandas_df(
size,
num_samples,
num_movies,
movie_example,
title_example,
genres_example,
year_example,
):
"""Test MovieLens dataset load into pd.DataFrame
"""
# Test if correct data are loaded and local_cache_path works
with TemporaryDirectory() as tmp_dir:
# Test if can handle different size of header columns
header = ["a"]
df = load_pandas_df(size=size, local_cache_path=tmp_dir, header=header)
assert len(df) == num_samples
assert len(df.columns) == max(
len(header), 2
) # Should load at least 2 columns, user and item
# Test title, genres, and released year load
header = ["a", "b", "c", "d", "e"]
with pytest.warns(Warning):
df = load_pandas_df(
size=size,
local_cache_path=tmp_dir,
header=header,
title_col="Title",
genres_col="Genres",
year_col="Year",
)
assert len(df) == num_samples
assert (
len(df.columns) == 7
) # 4 header columns (user, item, rating, timestamp) and 3 feature columns
assert "e" not in df.columns # only the first 4 header columns are used
# Get two records of the same items and check if the item-features are the same.
head = df.loc[df["b"] == movie_example][:2]
title = head["Title"].values
assert title[0] == title[1]
assert title[0] == title_example
genres = head["Genres"].values
assert genres[0] == genres[1]
assert genres[0] == genres_example
year = head["Year"].values
assert year[0] == year[1]
assert year[0] == year_example
# Test if raw-zip file, rating file, and item file are cached
assert len(os.listdir(tmp_dir)) == 3
# Test default arguments
df = load_pandas_df(size)
assert len(df) == num_samples
assert len(df.columns) == 4
@pytest.mark.smoke
@pytest.mark.parametrize(
"size, num_movies, movie_example, title_example, genres_example, year_example",
[("100k", 1682, 1, "Toy Story (1995)", "Animation|Children's|Comedy", "1995")],
)
def test_load_item_df(
size, num_movies, movie_example, title_example, genres_example, year_example
):
"""Test movielens item data load (not rating data)
"""
with TemporaryDirectory() as tmp_dir:
df = load_item_df(
size, local_cache_path=tmp_dir, movie_col=None, title_col="title"
)
assert len(df) == num_movies
assert len(df.columns) == 1 # Only title column should be loaded
assert df["title"][0] == title_example
# Test title and genres
df = load_item_df(
size, local_cache_path=tmp_dir, movie_col="item", genres_col="genres"
)
assert len(df) == num_movies
assert len(df.columns) == 2 # movile_col and genres_col
assert df["item"][0] == movie_example
assert df["genres"][0] == genres_example
# Test release year
df = load_item_df(size, local_cache_path=tmp_dir, year_col="year")
assert len(df) == num_movies
assert len(df.columns) == 2 # movile_col (default) and year_col
assert df["year"][0] == year_example
@pytest.mark.smoke
@pytest.mark.spark
@pytest.mark.parametrize(
"size, num_samples, num_movies, movie_example, title_example, genres_example, year_example",
[
(
"100k",
100000,
1682,
1,
"Toy Story (1995)",
"Animation|Children's|Comedy",
"1995",
)
],
)
def test_load_spark_df(
size,
num_samples,
num_movies,
movie_example,
title_example,
genres_example,
year_example,
):
"""Test MovieLens dataset load into pySpark.DataFrame
"""
spark = start_or_get_spark("MovieLensLoaderTesting")
# Test if correct data are loaded and local_cache_path works
with TemporaryDirectory() as tmp_dir:
# Test if can handle different size of header columns
header = ["1", "2"]
schema = StructType([StructField("u", IntegerType())])
with pytest.warns(Warning):
# Test if schema is used when both schema and header are provided
df = load_spark_df(
spark, size=size, local_cache_path=tmp_dir, header=header, schema=schema
)
assert df.count() == num_samples
assert len(df.columns) == len(schema)
# Test title, genres, and released year load
header = ["a", "b", "c", "d", "e"]
with pytest.warns(Warning):
df = load_spark_df(
spark,
size=size,
local_cache_path=tmp_dir,
header=header,
title_col="Title",
genres_col="Genres",
year_col="Year",
)
assert df.count() == num_samples
assert (
len(df.columns) == 7
) # 4 header columns (user, item, rating, timestamp) and 3 feature columns
assert "e" not in df.columns # only the first 4 header columns are used
# Get two records of the same items and check if the item-features are the same.
head = df.filter(col("b") == movie_example).limit(2)
title = head.select("Title").collect()
assert title[0][0] == title[1][0]
assert title[0][0] == title_example
genres = head.select("Genres").collect()
assert genres[0][0] == genres[1][0]
assert genres[0][0] == genres_example
year = head.select("Year").collect()
assert year[0][0] == year[1][0]
assert year[0][0] == year_example
# Test if raw-zip file, rating file, and item file are cached
assert len(os.listdir(tmp_dir)) == 3
# Test default arguments
df = load_spark_df(spark, size)
assert df.count() == num_samples
assert len(df.columns) == 4
@pytest.mark.smoke
@pytest.mark.parametrize("size", ["100k"])
def test_download_and_extract_movielens(size):
"""Test movielens data download and extract
"""
with TemporaryDirectory() as tmp_dir:
zip_path = os.path.join(tmp_dir, "ml.zip")
download_movielens(size, dest_path=zip_path)
assert len(os.listdir(tmp_dir)) == 1
assert os.path.exists(zip_path)
rating_path = os.path.join(tmp_dir, "rating.dat")
item_path = os.path.join(tmp_dir, "item.dat")
extract_movielens(
size, rating_path=rating_path, item_path=item_path, zip_path=zip_path
)
assert len(os.listdir(tmp_dir)) == 3
assert os.path.exists(rating_path)
assert os.path.exists(item_path)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/stock/history/getHistoricData.go
|
package example
import (
"fmt"
"os"
"go.m3o.com/stock"
)
// Get the historic open-close for a given day
func GetHistoricData() {
stockService := stock.NewStockService(os.Getenv("M3O_API_TOKEN"))
rsp, err := stockService.History(&stock.HistoryRequest{
Date: "2020-10-01",
Stock: "AAPL",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
server_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import binascii
import json
import logging
import math
import os
import numpy as np
import geopandas as gpd
import rasterio as rio
import rtree
import shlex
import spacy
import subprocess
import sys
import tempfile
import uuid
import uvloop
import gc
from aiohttp import web
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from functools import partial
from pyproj import Proj, transform
from rasterio.features import shapes as rio_shapes
from rasterio import merge as rio_merge
from shapely.geometry import Polygon, shape, mapping
from shapely.ops import unary_union
from glob import glob
def idx_generator_func(bounds):
for i, bound in enumerate(bounds):
yield (i, bound, i)
def make_index(bounds):
return rtree.index.Index(
[z for z in idx_generator_func(bounds)],
Interleaved=True,
)
async def handler_activity_features(request):
"""
Returns a GeoJSON FeatureCollection
containing specific features for the requested activity
(such a 'ski areas', 'ski lift' and 'pistes' for the activity "ski").
"""
app = request.app
category = request.match_info['category']
if category not in app['allowed_activity']:
return web.Response(text='Error')
app['logger'].info(
'Requested features for activity "{}" : found {} features'
.format(category, len(app['layer_activity_{}'.format(category)]))
)
result = app['layer_activity_{}'.format(category)].to_json()
return web.Response(text=result)
async def handler_features_post(request):
"""
Returns a GeoJSON FeatureCollection
containing features for the requested `category`
and intersecting with the requested posted feature geometry.
"""
app = request.app
category = request.match_info['category']
posted_data = await request.post()
_geom = posted_data.get('geometry')
geom = shape(json.loads(_geom))
xmin, ymin, xmax, ymax = geom.bounds
app['logger'].info(
'Requested {} within {}...'
.format(category, (xmin, ymin, xmax, ymax)))
async with app['lock']:
if category not in app['allowed_category']:
return web.Response(text='Error')
ix_within = list(
app['index_{}'.format(category)]
.intersection((xmin, ymin, xmax, ymax)))
temp = app['layer_{}'.format(category)].iloc[ix_within]
result = temp[temp.geometry.intersects(geom)].to_json()
app['logger'].info(
'...found {} {} features'
.format(category, len(ix_within)))
return web.Response(text=result)
async def handler_features(request):
"""
Returns a GeoJSON FeatureCollection
containing features for the requested `category`
within the requested `bbox`.
"""
app = request.app
category = request.match_info['category']
bbox = request.match_info['bbox']
app['logger'].info(
'Requested {} within {}...'
.format(category, bbox))
async with app['lock']:
if category not in app['allowed_category']:
return web.Response(text='Error')
xmin, ymin, xmax, ymax = list(map(float, bbox.split(',')))
ix_within = list(
app['index_{}'.format(category)]
.intersection((xmin, ymin, xmax, ymax)))
result = app['layer_{}'.format(category)].iloc[ix_within].to_json()
app['logger'].info(
'...found {} {} features'
.format(category, len(ix_within)))
return web.Response(text=result)
async def index(request):
"""Handler for the index page."""
return web.FileResponse('./dist/index.html')
def compute_binary_predicate(_op, _geoms1, _geoms2):
geoms1 = [shape(i) for i in json.loads(_geoms1)]
geoms2 = [shape(i) for i in json.loads(_geoms2)]
result = {}
for ix1, g1 in enumerate(geoms1):
result[ix1] = {}
for ix2, g2 in enumerate(geoms2):
result[ix1][ix2] = getattr(g1, _op)(g2)
return json.dumps(result)
def compute_op_geom(_op, _geoms, options):
geoms = [shape(i) for i in json.loads(_geoms)]
if _op == 'unary_union':
res = unary_union(geoms)
elif _op == 'intersection':
res = geoms[0]
for _geom in geoms[1:]:
res = _geom.intersection(res)
elif _op == 'symmetric_difference':
res = geoms[0].symmetric_difference(geoms[1])
elif _op == 'buffer':
geo_serie = gpd.GeoSeries(
geoms,
crs='+proj=longlat +datum=WGS84 +no_defs ',
).to_crs(epsg=2154)
if options['dist'] and int(options['dist']) != 0:
res = unary_union(
geo_serie.buffer(float(options['dist']))
.boundary.buffer(float(options['uncertainty']))
.to_crs('+proj=longlat +datum=WGS84 +no_defs ')
.values
)
else:
res = unary_union(
geo_serie
.buffer(float(options['uncertainty']))
.to_crs('+proj=longlat +datum=WGS84 +no_defs ')
.values
)
return json.dumps(mapping(res))
async def handler_geom_op(request):
"""
Handles some geo-operations (buffer, unary-union and intersection)
to be performed on an array of GeoJSON geometries.
"""
_op = request.match_info['op']
if _op in request.app['allowed_binary_predicate']:
posted_data = await request.post()
_geoms1 = posted_data.get('geoms1')
_geoms2 = posted_data.get('geoms2')
result = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
compute_binary_predicate,
_op,
_geoms1,
_geoms2,
)
return web.Response(text=result)
elif _op in request.app['allowed_geom_operation']:
posted_data = await request.post()
_geoms = posted_data.get('geoms')
options = {
'dist': posted_data.get('distance'),
'uncertainty': posted_data.get('uncertainty'),
} if _op == 'buffer' else None
result = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
compute_op_geom,
_op,
_geoms,
options,
)
return web.Response(text=result)
else:
return web.Response(
text=json.dumps({
'message': (
'Error : binary predicate or geometric operation '
f'\'{_op}\' not found.'
),
})
)
async def handler_clue(request):
"""
Handles clues in natural language to extract part of speech and named
entities if any.
"""
posted_data = await request.post()
clue_nl = posted_data.get('clue_nl')
doc = request.app['nlp'](clue_nl)
part_of_speech = [
(x.orth_, x.pos_, x.lemma_)
for x in [
y for y in doc if not y.is_stop and y.pos_ != 'PUNCT']
]
named_entities = [(X.text, X.label_) for X in doc.ents]
return web.Response(
text=json.dumps({
"part_of_speech": part_of_speech,
"named_entities": named_entities,
})
)
async def handle_404(request, response):
return web.Response(text="ERROR 404 !")
async def error_middleware(app, handler):
async def middleware_handler(request):
try:
response = await handler(request)
if response.status == 404:
return await handle_404(request, response)
return response
except web.HTTPException as ex:
if ex.status == 404:
return await handle_404(request, ex)
raise
return middleware_handler
def get_extent_proj(path):
with rio.open(path) as f:
crs = f.read_crs()
bounds = f.bounds
return {
'path': path,
'crs_epsg': crs.to_epsg(),
'crs_string': Proj(crs.to_string()).srs,
'w': math.ceil(bounds[0]),
's': math.ceil(bounds[1]),
'e': math.floor(bounds[2]),
'n': math.floor(bounds[3]),
'ewres': f.res[0],
'nsres': f.res[1],
}
def init_grass(info_dem):
grass_bin = 'grass'
startcmd = grass_bin + ' --config path'
p = subprocess.Popen(
startcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise ValueError(
'Failed to load GRASS\nStdout: {}\nStderr: {}\n'
.format(out.decode(), err.decode()))
gisbase = out.strip(b'\n').decode()
os.environ['GISBASE'] = gisbase
sys.path.append(os.path.join(gisbase, 'etc', 'python'))
gisdb = os.path.join(tempfile.gettempdir(), 'grassdata')
try:
os.stat(gisdb)
except FileNotFoundError:
os.mkdir(gisdb)
location = binascii.hexlify(os.urandom(12)).decode()
location_path = os.path.join(gisdb, location)
mapset = 'PERMANENT'
startcmd = ' '.join([
grass_bin,
'-c epsg:{}'.format(info_dem['crs_epsg']),
'-e',
location_path,
])
print('Starting grass with command: `' + startcmd + '`')
p = subprocess.Popen(
startcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise ValueError(
'Failed to load GRASS\nStdout: {}\nStderr: {}\n'
.format(out.decode(), err.decode()))
print('Created location ', location_path)
import grass.script as grass
import grass.script.setup as gsetup
gsetup.init(gisbase, gisdb, location, mapset)
grass.message('--- GRASS GIS 7: Current GRASS GIS 7 environment:')
print(grass.gisenv())
grass.message('--- GRASS GIS 7: Setting projection info:')
_out_proj = grass.read_command(
'g.proj',
flags='c',
epsg=info_dem['crs_epsg'],
)
print(_out_proj)
grass.message('--- GRASS GIS 7: Loading DEM file:')
res = grass.read_command(
'r.external',
flags='o',
input=info_dem['path'],
band=1,
output="rast_5cb08c8150bbc7",
)
print(res)
grass.message('--- GRASS GIS 7: Defining the region...')
grass.read_command(
'g.region',
n=info_dem['n'],
s=info_dem['s'],
e=info_dem['e'],
w=info_dem['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
return {
"gisbase": gisbase,
"gisdb": gisdb,
"location": location,
"mapset": mapset,
}
def _validate_number(h):
# Will raise a ValueError if 'h' isn't / can't be converted
# to 'float' :
float(h)
return h
def _validate_datetime(year, month, day, hour, minute):
# In order to raise a ValueError if one of them
# isn't (or cannot be converted to) an 'int' :
int(year) + int(month) + int(day) + int(hour) + int(minute)
return (year, month, day, hour, minute)
def _validate_region(region_coords, info_dem):
in_proj = Proj(info_dem['crs_string'])
out_proj = Proj(init='epsg:4326')
_to_projected = partial(transform, out_proj, in_proj)
if region_coords is None:
return None
_coords = list(map(lambda x: float(x), region_coords.split(',')))
_coords[0], _coords[2] = _to_projected(_coords[0], _coords[2])
_coords[1], _coords[3] = _to_projected(_coords[1], _coords[3])
if _coords[0] <= info_dem['w'] or _coords[0] >= info_dem['e'] \
or _coords[2] >= info_dem['n'] or _coords[2] <= info_dem['s']:
raise ValueError(
'Requested region {} is outside the allowed region '
'(xmin={}, xmax={}, ymin={}, ymax={})'
.format(
_coords,
info_dem['w'],
info_dem['e'],
info_dem['s'],
info_dem['n'],
))
return {
'w': str(_coords[0]),
'e': str(_coords[1]),
's': str(_coords[2]),
'n': str(_coords[3]),
}
def _validate_one_position(_coords, info_dem):
in_proj = Proj(info_dem['crs_string'])
out_proj = Proj(init='epsg:4326')
_to_projected = partial(transform, out_proj, in_proj)
_coords = _to_projected(_coords[1], _coords[0])
if _coords[1] >= info_dem['n'] or _coords[1] <= info_dem['s'] \
or _coords[0] >= info_dem['e'] or _coords[0] <= info_dem['w']:
raise ValueError(
'Requested point {} is outside the allowed region '
'(xmin={}, xmax={}, ymin={}, ymax={})'
.format(
_coords,
info_dem['w'],
info_dem['e'],
info_dem['s'],
info_dem['n'],
))
return '{},{}'.format(*_coords)
def _validate_coordinates(coords, info_dem):
if coords.startswith('(') and coords.endswith(')'):
_coords_list = [
list(map(lambda x: float(x), c.split(',')))
for c in coords[1:-1].split('),(')
]
return [
_validate_one_position(_coords, info_dem)
for _coords in _coords_list
]
else:
_coords = list(map(lambda x: float(x), coords.split(',')))
return _validate_one_position(_coords, info_dem)
async def interviz_wrapper(request):
try:
c = _validate_coordinates(
request.rel_url.query['coordinates'],
request.app['info_dem'],
)
h1 = _validate_number(request.rel_url.query['height1'])
h2 = _validate_number(request.rel_url.query['height2'])
region = _validate_region(
request.rel_url.query.get('region', None),
request.app['info_dem'],
)
except Exception as e:
return web.Response(
text=json.dumps({"message": "Error : {}".format(e)}))
if isinstance(c, list):
res = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
interviz_multiple,
request.app['path_info'],
request.app['info_dem'],
c,
h1,
h2,
region,
)
else:
res = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
interviz,
request.app['path_info'],
request.app['info_dem'],
c,
h1,
h2,
region,
)
return web.Response(text=res)
def interviz_multiple(path_info, info_dem, coords_list, height1, height2, region):
import grass.script as GRASS
try:
if region:
GRASS.read_command(
'g.region',
n=region['n'],
s=region['s'],
e=region['e'],
w=region['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
results_layers = []
for i, coordinates in enumerate(coords_list):
uid = str(uuid.uuid4()).replace('-', '')
grass_name = "output_{}".format(uid)
output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))
results_layers.append(output_name)
GRASS.message(
'--- GRASS GIS 7: Computing viewshed {}/{}'
.format(i + 1, len(coords_list))
)
res = GRASS.read_command(
'r.viewshed',
input='rast_5cb08c8150bbc7',
coordinates=coordinates,
observer_elevation=height1,
target_elevation=height2,
# max_distance=max_distance,
refraction_coeff="0.14286",
memory="1000",
flags='b',
output=grass_name,
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Saving resulting raster layer')
res = GRASS.read_command(
'r.out.gdal',
input=grass_name,
output=output_name,
format="GTiff",
createopt="TFW=YES,COMPRESS=LZW",
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Remove temporary result raster from GRASS')
res = GRASS.read_command(
'g.remove',
flags='f',
type='raster',
name=grass_name,
)
print(res)
if region:
GRASS.read_command(
'g.region',
n=info_dem['n'],
s=info_dem['s'],
e=info_dem['e'],
w=info_dem['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
except Exception as e:
return json.dumps({"message": "Error : {}".format(e)})
datasets = [rio.open(path_layer, 'r') for path_layer in results_layers]
res, out_trans = rio_merge.merge(datasets, indexes=1)
epsg_value = datasets[0].crs.to_epsg()
results = [{
'properties': {'visibility': v},
'geometry': s,
'type': 'Feature',
} for i, (s, v) in enumerate(rio_shapes(
res, mask=None, transform=datasets[0].transform)) if v == 1.0]
with open('/tmp/{}.geojson'.format(uid), 'w') as f:
f.write(json.dumps({"type": "FeatureCollection", "features": results}))
for ds, path_layer in zip(datasets, results_layers):
ds.close()
os.remove(path_layer)
p = subprocess.Popen(
shlex.split(
'ogr2ogr -s_srs "EPSG:{}" -t_srs "EPSG:4326" '
'-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
os.remove('/tmp/{}.geojson'.format(uid))
if p.returncode != 0:
print('Error: ', err)
return json.dumps({"message": "Error : {}".format(err)})
return out.decode()
def interviz(path_info, info_dem, coordinates, height1, height2, region):
import grass.script as GRASS
try:
uid = str(uuid.uuid4()).replace('-', '')
grass_name = "output_{}".format(uid)
output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))
if region:
GRASS.read_command(
'g.region',
n=region['n'],
s=region['s'],
e=region['e'],
w=region['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
GRASS.message(
'--- GRASS GIS 7: Computing viewshed')
res = GRASS.read_command(
'r.viewshed',
input='rast_5cb08c8150bbc7',
coordinates=coordinates,
observer_elevation=height1,
target_elevation=height2,
# max_distance=max_distance,
refraction_coeff="0.14286",
memory="1000",
flags='b',
output=grass_name,
)
print(res)
if region:
GRASS.read_command(
'g.region',
n=info_dem['n'],
s=info_dem['s'],
e=info_dem['e'],
w=info_dem['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
GRASS.message(
'--- GRASS GIS 7: Saving resulting raster layer')
res = GRASS.read_command(
'r.out.gdal',
input=grass_name,
output=output_name,
format="GTiff",
createopt="TFW=YES,COMPRESS=LZW",
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Remove temporary result raster from GRASS')
res = GRASS.read_command(
'g.remove',
flags='f',
type='raster',
name=grass_name,
)
print(res)
except Exception as e:
return json.dumps({"message": "Error : {}".format(e)})
with rio.open(output_name) as src:
epsg_value = src.crs.to_epsg()
image = src.read(1)
results = [{
'properties': {'visibility': v},
'geometry': s,
'type': 'Feature',
} for i, (s, v) in enumerate(rio_shapes(
image, mask=None, transform=src.transform)) if v == 1.0]
with open('/tmp/{}.geojson'.format(uid), 'w') as f:
f.write(json.dumps({"type": "FeatureCollection", "features": results}))
p = subprocess.Popen(
shlex.split(
'ogr2ogr -s_srs "EPSG:{}" -t_srs "EPSG:4326" '
'-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
os.remove('/tmp/{}.geojson'.format(uid))
os.remove(output_name)
if p.returncode != 0:
print('Error: ', err)
return json.dumps({"message": "Error : {}".format(err)})
return out.decode()
async def sunmask_wrapper(request):
try:
datetime = _validate_datetime(
request.rel_url.query['year'],
request.rel_url.query['month'],
request.rel_url.query['day'],
request.rel_url.query['hour'],
request.rel_url.query['minute'],
)
region = _validate_region(
request.rel_url.query.get('region', None),
request.app['info_dem'],
)
timezone = _validate_number(request.rel_url.query.get('timezone', '1'))
if not 0 <= int(timezone) <= 25:
raise ValueError('Invalid timezone')
sun = request.rel_url.query.get('sun', False)
if isinstance(sun, str):
if sun.lower() == 'false':
sun = False
else:
sun = True
except Exception as e:
return web.Response(
text=json.dumps({"message": "Error : {}".format(e)}))
res = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
sunmask,
request.app['path_info'],
request.app['info_dem'],
datetime,
region,
timezone,
sun,
)
return web.Response(text=res)
def sunmask(path_info, info_dem, d, region, tz, sun):
import grass.script as GRASS
try:
uid = str(uuid.uuid4()).replace('-', '')
grass_name = "output_{}".format(uid)
output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))
if region:
GRASS.message(
'--- GRASS GIS 7: Reducing the region')
GRASS.read_command(
'g.region',
n=region['n'],
s=region['s'],
e=region['e'],
w=region['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
GRASS.message(
'--- GRASS GIS 7: Computing sunmask')
res = GRASS.read_command(
'r.sunmask',
elevation='rast_5cb08c8150bbc7',
year=d[0],
month=d[1],
day=d[2],
hour=d[3],
minute=d[4],
timezone=tz,
output=grass_name,
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Saving resulting raster layer')
res = GRASS.read_command(
'r.out.gdal',
input=grass_name,
output=output_name,
format="GTiff",
createopt="TFW=YES,COMPRESS=LZW",
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Remove temporary result raster from GRASS')
res = GRASS.read_command(
'g.remove',
flags='f',
type='raster',
name=grass_name,
)
print(res)
if region:
GRASS.message(
'--- GRASS GIS 7: Restoring the region')
GRASS.read_command(
'g.region',
n=info_dem['n'],
s=info_dem['s'],
e=info_dem['e'],
w=info_dem['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
except Exception as e:
return json.dumps({"message": "Error : {}".format(e)})
with rio.open(output_name) as src:
epsg_value = src.crs.to_epsg()
image = src.read(1)
results = [{
'properties': {'shadow': v},
'geometry': s,
'type': 'Feature',
} for i, (s, v) in enumerate(rio_shapes(
image, mask=None, transform=src.transform)) if v == 1.0]
# In this case we want the difference between the region and the
# computed areas of cast shadow
if sun:
region = Polygon([
(float(region['w']), float(region['s'])),
(float(region['e']), float(region['s'])),
(float(region['e']), float(region['n'])),
(float(region['w']), float(region['n'])),
(float(region['w']), float(region['s']))
])
shadow_union = unary_union([shape(ft['geometry']) for ft in results])
results = [{
'type': 'Feature',
'geometry': mapping(region.difference(shadow_union)),
'properties': {'sun': 1.0}
}]
with open('/tmp/{}.geojson'.format(uid), 'w') as f:
f.write(json.dumps({"type": "FeatureCollection", "features": results}))
p = subprocess.Popen(
shlex.split(
'ogr2ogr -s_srs "EPSG:{}" -t_srs "EPSG:4326" '
'-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
os.remove('/tmp/{}.geojson'.format(uid))
os.remove(output_name)
if p.returncode != 0:
print('Error: ', err)
return json.dumps({"message": "Error : {}".format(err)})
return out.decode()
async def sun_wrapper(request):
try:
dt = _validate_datetime(
request.rel_url.query['year'],
request.rel_url.query['month'],
request.rel_url.query['day'],
request.rel_url.query['hour'],
request.rel_url.query['minute'],
)
day = datetime(
int(dt[0]),
int(dt[1]),
int(dt[2]),
).timetuple().tm_yday
time = float(dt[3]) + (float(dt[4]) / 60)
region = _validate_region(
request.rel_url.query.get('region', None),
request.app['info_dem'],
)
timezone = _validate_number(request.rel_url.query.get('timezone', '1'))
if not 0 <= int(timezone) <= 25:
raise ValueError('Invalid timezone')
is_sun = request.rel_url.query.get('sun', False)
if isinstance(sun, str):
if is_sun.lower() == 'false':
is_sun = False
else:
is_sun = True
except Exception as e:
return web.Response(
text=json.dumps({"message": "Error : {}".format(e)}))
res = await request.app.loop.run_in_executor(
request.app["ProcessPool"],
sun,
request.app['path_info'],
request.app['info_dem'],
day,
time,
region,
timezone,
is_sun,
)
return web.Response(text=res)
def sun(path_info, info_dem, day, time, region, tz, is_sun):
import grass.script as GRASS
try:
uid = str(uuid.uuid4()).replace('-', '')
grass_name = "output_{}".format(uid)
output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))
if region:
GRASS.message(
'--- GRASS GIS 7: Reducing the region')
GRASS.read_command(
'g.region',
n=region['n'],
s=region['s'],
e=region['e'],
w=region['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
GRASS.message(
'--- GRASS GIS 7: Computing longitude map')
GRASS.read_command(
'r.latlong',
flags='l',
input='rast_5cb08c8150bbc7',
output='rast_long_5cb08c8150bbc7',
)
GRASS.message(
'--- GRASS GIS 7: Computing sun incidence')
res = GRASS.read_command(
'r.sun',
elevation='rast_5cb08c8150bbc7',
long='rast_long_5cb08c8150bbc7',
day=day,
time=time,
civil_time=tz,
incidout=grass_name,
nprocs=2,
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Saving resulting raster layer')
res = GRASS.read_command(
'r.out.gdal',
input=grass_name,
output=output_name,
format="GTiff",
createopt="TFW=YES,COMPRESS=LZW",
)
print(res)
GRASS.message(
'--- GRASS GIS 7: Remove temporary result raster from GRASS')
res = GRASS.read_command(
'g.remove',
flags='f',
type='raster',
name=grass_name,
)
print(res)
res = GRASS.read_command(
'g.remove',
flags='f',
type='raster',
name='rast_long_5cb08c8150bbc7',
)
print(res)
if region:
GRASS.message(
'--- GRASS GIS 7: Restoring the region')
GRASS.read_command(
'g.region',
n=info_dem['n'],
s=info_dem['s'],
e=info_dem['e'],
w=info_dem['w'],
nsres=info_dem['nsres'],
ewres=info_dem['ewres'],
)
except Exception as e:
return json.dumps({"message": "Error : {}".format(e)})
with rio.open(output_name) as src:
epsg_value = src.crs.to_epsg()
image = src.read(1)
image = np.nan_to_num(image)
image[image >= 1.0] = 1.0
if is_sun:
results = [{
'properties': {'sun': v},
'geometry': s,
'type': 'Feature',
} for i, (s, v) in enumerate(rio_shapes(
image, mask=None, transform=src.transform)) if v == 1.0]
else:
results = [{
'properties': {'sun': v},
'geometry': s,
'type': 'Feature',
} for i, (s, v) in enumerate(rio_shapes(
image, mask=None, transform=src.transform)) if v != 1.0]
with open('/tmp/{}.geojson'.format(uid), 'w') as f:
f.write(json.dumps({"type": "FeatureCollection", "features": results}))
p = subprocess.Popen(
shlex.split(
'ogr2ogr -s_srs "EPSG:{}" -t_srs "EPSG:4326" '
'-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
os.remove('/tmp/{}.geojson'.format(uid))
if p.returncode != 0:
print('Error: ', err)
return json.dumps({"message": "Error : {}".format(err)})
return out.decode()
async def make_app(loop, info_dem, addr='0.0.0.0', port='8008'):
logging.basicConfig(level=logging.INFO)
app = web.Application(
loop=loop,
client_max_size=17408**2,
middlewares=[error_middleware],
)
app['logger'] = logging.getLogger("features.main")
app['path_info'] = init_grass(info_dem)
app['info_dem'] = info_dem
app.add_routes([
web.get('/sun', sun_wrapper),
web.get('/sunmask', sunmask_wrapper),
web.get('/viewshed', interviz_wrapper),
web.get('/activity-features/{category}', handler_activity_features),
web.get('/features/{category}/{bbox}', handler_features),
web.post('/features/{category}', handler_features_post),
web.post('/parse-clue', handler_clue),
web.post('/{op}', handler_geom_op),
web.get('/', index),
web.static('/', 'dist/'),
])
handler = app.make_handler()
srv = await loop.create_server(handler, addr, port)
return srv, app, handler
def main(prefix_data='data/osm/'):
filename = glob('data/elevation/*.tif')
info_dem = get_extent_proj(filename[0])
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
srv, app, handler = loop.run_until_complete(make_app(loop, info_dem))
app['allowed_binary_predicate'] = {
'intersects',
'equals',
'contains',
'crosses',
'overlaps',
'touches',
'within',
}
app['allowed_geom_operation'] = {
'buffer',
'intersection',
'difference',
'symmetric_difference',
'unary_union',
}
app['allowed_category'] = {
'RIVER',
'LAKE',
'RESERVOIR',
'ROAD',
'PATHWAY',
'POWERLINE',
'PISTE',
'PEAK',
'COL',
'SKILIFT',
'CITY',
'TOWN',
'VILLAGE',
}
app['allowed_activity'] = {
'ski',
'randonnee',
'speleologie',
'escalade',
'vtt',
}
app['lock'] = asyncio.Lock()
app['logger'].info('Opening OSM layers in memory...')
app['layer_RIVER'] = gpd.read_file(
os.path.join(prefix_data, 'eaux_courantes_choucas.geojson'))
app['layer_LAKE'] = gpd.read_file(
os.path.join(prefix_data, 'water_lake_choucas.geojson'))
app['layer_RESERVOIR'] = gpd.read_file(
os.path.join(prefix_data, 'water_reservoir_choucas.geojson'))
app['layer_ROAD'] = gpd.read_file(
os.path.join(prefix_data, 'routes_choucas.geojson'))
app['layer_PATHWAY'] = gpd.read_file(
os.path.join(prefix_data, 'sentiers_choucas.geojson'))
app['layer_POWERLINE'] = gpd.read_file(
os.path.join(prefix_data, 'powerline_choucas.geojson'))
app['layer_PISTE'] = gpd.read_file(
os.path.join(prefix_data, 'pistes_choucas.geojson'))
app['layer_PEAK'] = gpd.read_file(
os.path.join(prefix_data, 'peak_choucas.geojson'))
app['layer_COL'] = gpd.read_file(
os.path.join(prefix_data, 'col_choucas.geojson'))
app['layer_SKILIFT'] = gpd.read_file(
os.path.join(prefix_data, 'cable_skilift_choucas.geojson'))
app['layer_CITY'] = gpd.read_file(
os.path.join(prefix_data, 'city_choucas.geojson'))
app['layer_TOWN'] = gpd.read_file(
os.path.join(prefix_data, 'town_choucas.geojson'))
app['layer_VILLAGE'] = gpd.read_file(
os.path.join(prefix_data, 'village_choucas.geojson'))
# Specific layers related to the activity of the victim
app['layer_activity_ski'] = gpd.read_file(
os.path.join(
prefix_data,
'domaine_station_remontee_ski_choucas_large.geojson'))
app['layer_activity_speleologie'] = gpd.read_file(
os.path.join(
prefix_data,
'cave_entrance_speleologie_choucas_large.geojson'))
app['layer_activity_escalade'] = gpd.read_file(
os.path.join(
prefix_data,
'sport_climbing_escalade_choucas_large.geojson'))
app['layer_activity_vtt'] = gpd.read_file(
os.path.join(
prefix_data,
'mtb_scale_vtt_choucas_large.geojson'))
app['logger'].info('Creating spatial index for OSM layers...')
for lyr_name in app['allowed_category']:
app['index_{}'.format(lyr_name)] = make_index(
[g.bounds for g in app['layer_{}'.format(lyr_name)].geometry])
app['logger'].info('Loading spaCy model for French...')
app['nlp'] = spacy.load('fr_core_news_sm')
app['ProcessPool'] = ProcessPoolExecutor(1)
app['logger'].info('Serving on' + str(srv.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.shutdown())
loop.run_until_complete(handler.shutdown(60.0))
loop.run_until_complete(app.cleanup())
loop.close()
if __name__ == '__main__':
gc.disable()
main()
|
[] |
[] |
[
"GISBASE"
] |
[]
|
["GISBASE"]
|
python
| 1 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/masterstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *master_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("master-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
common.go
|
package main
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/codegangsta/cli"
"github.com/golang/glog"
"github.com/golang/protobuf/proto"
)
func defaultConfigPath(c *cli.Context, file string) string {
path := filepath.Join(os.Getenv("HOME"), ".config/fproxy", file)
if args := c.Args(); len(args) > 0 {
path = args[0]
}
return path
}
// Returns the first element of args, or defaultArg if args is an empty slice.
func firstOrDefault(args []string, defaultArg string) string {
if len(args) > 0 {
return args[0]
}
return defaultArg
}
func readConfig(configPath string, msg proto.Message) {
protoBytes, err := ioutil.ReadFile(configPath)
if err != nil {
glog.Fatalf("Could not read config at path %v: %v", configPath, err)
}
err = proto.UnmarshalText(string(protoBytes), msg)
if err != nil {
glog.Fatalf("Could not parse config at path %v: %v", configPath, err)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
Scripts/CFListScraper.py
|
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from time import sleep
import os
from dotenv import load_dotenv
load_dotenv()
HANDLE = os.getenv('CF_HANDLE')
PASSWORD = os.getenv('CF_PASSWORD')
CFLIST_BASE_URL='https://codeforces.com/list/'
CF_LOGIN='https://codeforces.com/enter'
class ScrapeList():
def __init__(self):
opts=Options()
opts.add_argument("--headless")
self.browser = Firefox(firefox_options=opts)
self.browser.get(CF_LOGIN)
sleep(1)
self.browser.find_element_by_id('handleOrEmail').send_keys(HANDLE)
self.browser.find_element_by_id('password').send_keys(PASSWORD)
self.browser.find_element_by_class_name('submit').click()
print("Logged in and Ready:")
def list_scrape(self,key: str):
URL = CFLIST_BASE_URL + key
self.browser.get(URL)
if self.browser.current_url != URL :
return ''
names = self.browser.find_elements_by_css_selector('.rated-user')
ans = ';'.join(name.get_attribute('text') for name in names)
return ans
def __del__(self):
self.browser.close()
print("Browser closed:")
|
[] |
[] |
[
"CF_PASSWORD",
"CF_HANDLE"
] |
[]
|
["CF_PASSWORD", "CF_HANDLE"]
|
python
| 2 | 0 | |
dali_tf_plugin/dali_tf_plugin_utils.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import re
import sys
import platform
import fnmatch
# Find file matching `pattern` in `path`
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
# Get path to python module `module_name`
def get_module_path(module_name):
module_path = ''
for d in sys.path:
possible_path = os.path.join(d, module_name)
# skip current dir as this is plugin dir
if os.path.isdir(possible_path) and len(d) != 0:
module_path = possible_path
break
return module_path
# Get compiler version used to build tensorflow
def get_tf_compiler_version():
tensorflow_path = get_module_path('tensorflow')
tensorflow_libs = find('libtensorflow_framework*so*', tensorflow_path)
if not tensorflow_libs:
return ''
lib = tensorflow_libs[0]
cmd = 'strings -a ' + lib + ' | grep "GCC: ("'
s = str(subprocess.check_output(cmd, shell=True))
version = re.search("GCC:\s*\(.*\)\s*(\d+.\d+).\d+", s).group(1)
return version
# Get current tensorflow version
def get_tf_version():
try:
import pkg_resources
s = pkg_resources.get_distribution("tensorflow-gpu").version
except:
# pkg_resources.get_distribution doesn't work well with conda installed packages
try:
import tensorflow as tf
s = tf.__version__
except:
return ""
version = re.search("(\d+.\d+).\d+", s).group(1)
return version
# Get C++ compiler
def get_cpp_compiler():
return os.environ.get('CXX') or 'g++'
# Get C++ compiler version
def get_cpp_compiler_version():
cmd = get_cpp_compiler() + ' --version | head -1 | grep "[c|g]++ ("'
s = str(subprocess.check_output(cmd, shell=True).strip())
version = re.search("[g|c]\+\+\s*\(.*\)\s*(\d+.\d+).\d+", s).group(1)
return version
# Runs `which` program
def which(program):
try:
return subprocess.check_output('which ' + program, shell=True).strip()
except:
return None
# Checks whether we are inside a conda env
def is_conda_env():
return True if os.environ.get('CONDA_PREFIX') else False
# Get compile and link flags for installed tensorflow
def get_tf_build_flags():
tf_cflags = ''
tf_lflags = ''
try:
import tensorflow as tensorflow
tf_cflags=" ".join(tensorflow.sysconfig.get_compile_flags())
tf_lflags=" ".join(tensorflow.sysconfig.get_link_flags())
except:
tensorflow_path = get_module_path('tensorflow')
if tensorflow_path is not '':
tf_cflags=" ".join(["-I" + tensorflow_path + "/include", "-I" + tensorflow_path + "/include/external/nsync/public", "-D_GLIBCXX_USE_CXX11_ABI=0"])
tf_lflags=" ".join(["-L" + tensorflow_path, "-ltensorflow_framework"])
if tf_cflags is '' and tf_lflags is '':
raise ImportError('Could not find Tensorflow. Tensorflow must be installed before installing nvidia-dali-tf-plugin')
return (tf_cflags, tf_lflags)
# Get compile and link flags for installed DALI
def get_dali_build_flags():
dali_cflags = ''
dali_lflags = ''
try:
import nvidia.dali.sysconfig as dali_sc
dali_lib_path = dali_sc.get_lib_dir()
dali_cflags=" ".join(dali_sc.get_compile_flags())
dali_lflags=" ".join(dali_sc.get_link_flags())
except:
dali_path = get_module_path('nvidia/dali')
if dali_path is not '':
dali_cflags=" ".join(["-I" + dali_path + "/include", "-D_GLIBCXX_USE_CXX11_ABI=0"])
dali_lflags=" ".join(["-L" + dali_path, "-ldali"])
if dali_cflags is '' and dali_lflags is '':
raise ImportError('Could not find DALI.')
return (dali_cflags, dali_lflags)
# Get compile and link flags for installed CUDA
def get_cuda_build_flags():
cuda_cflags = ''
cuda_lflags = ''
cuda_home = os.environ.get('CUDA_HOME')
if not cuda_home:
cuda_home = '/usr/local/cuda'
cuda_cflags=" ".join(["-I" + cuda_home + "/include"])
cuda_lflags=" ".join([])
return (cuda_cflags, cuda_lflags)
def find_available_prebuilt_tf(requested_version, available_libs):
req_ver_first, req_ver_second = [int(v) for v in requested_version.split('.', 2)]
selected_ver = None
for file in available_libs:
re_match = re.search(".*(\d+)_(\d+).*", file)
if re_match is None:
continue
ver_first, ver_second = [int(v) for v in re_match.groups()]
if ver_first == req_ver_first:
if ver_second <= req_ver_second and (selected_ver is None or selected_ver < (ver_first, ver_second)):
selected_ver = (ver_first, ver_second)
return '.'.join([str(v) for v in selected_ver]) if selected_ver is not None else None
|
[] |
[] |
[
"CXX",
"CONDA_PREFIX",
"CUDA_HOME"
] |
[]
|
["CXX", "CONDA_PREFIX", "CUDA_HOME"]
|
python
| 3 | 0 | |
vvs_app/master_window.py
|
import os
import subprocess
import sys
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from qtpy.QtCore import *
from nodeeditor.node_editor_widget import NodeEditorWidget
from nodeeditor.utils import loadStylesheets
from nodeeditor.node_editor_window import NodeEditorWindow
from vvs_app.editor_settings_wnd import SettingsWidget
from vvs_app.master_editor_wnd import MasterEditorWnd
from vvs_app.master_designer_wnd import MasterDesignerWnd
from vvs_app.editor_node_list import NodeList
from vvs_app.editor_files_wdg import FilesWDG
from vvs_app.editor_user_nodes_list import UserNodesList
from vvs_app.editor_properties_list import PropertiesList
from vvs_app.global_switches import *
from nodeeditor.utils import dumpException
# from vvs_app.nodes_configuration import FUNCTIONS
# Enabling edge validators
from nodeeditor.node_edge import Edge
from nodeeditor.node_edge_validators import (
edge_cannot_connect_two_outputs_or_two_inputs,
edge_cannot_connect_input_and_output_of_same_node
)
# Edge.registerEdgeValidator(edge_validator_debug)
from vvs_app.master_node import MasterNode
from vvs_app.nodes.nodes_configuration import register_Node
Edge.registerEdgeValidator(edge_cannot_connect_two_outputs_or_two_inputs)
Edge.registerEdgeValidator(edge_cannot_connect_input_and_output_of_same_node)
# images for the dark skin
DEBUG = False
class MasterWindow(NodeEditorWindow):
def initUI(self):
# self.qss_theme = "qss/nodeeditor-light.qss"
self.settingsWidget = None
self.qss_theme = self.global_switches.themes[self.global_switches.switches_Dict["Appearance"]["Theme"][0]] # ["Theme"][0]
self.stylesheet_filename = os.path.join(os.path.dirname(__file__), self.qss_theme)
loadStylesheets(
os.path.join(os.path.dirname(__file__), self.qss_theme), self.stylesheet_filename)
self.global_switches.update_font_size(self.global_switches.switches_Dict["Appearance"]["Font Size"])
self.empty_icon = QIcon(".")
if DEBUG: print("Registered nodes:")
self.stackedDisplay = QStackedWidget()
self.graphs_parent_wdg = QMdiArea()
self.CreateLibraryWnd()
# Create Node Designer Window
self.node_designer = MasterDesignerWnd(self)
self.stackedDisplay.addWidget(self.graphs_parent_wdg)
self.stackedDisplay.addWidget(self.node_designer)
self.graphs_parent_wdg.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.graphs_parent_wdg.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.graphs_parent_wdg.setTabsClosable(True)
self.graphs_parent_wdg.setTabsMovable(True)
self.graphs_parent_wdg.subWindowActivated.connect(self.active_graph_switched)
self.setCentralWidget(self.stackedDisplay)
self.windowMapper = QSignalMapper(self)
self.windowMapper.mapped[QWidget].connect(self.setActiveSubWindow)
# Create Welcome Screen and allow user to set the project Directory
self.create_welcome_screen()
# Create Nodes List
self.create_functions_dock()
# Create Files Dock
self.create_files_dock()
# Create Details List Window
self.create_properties_dock()
# Create Variable List
self.create_user_nodes_dock()
self.createActions()
self.create_menus()
self.createStatusBar()
self.update_menus()
self.readSettings()
self.CreateToolBar()
self.setWindowTitle("Vision Visual Scripting")
self.update_libraries_wnd()
self.library_menu.setEnabled(False)
self.node_designer_menu.setEnabled(False)
self.set_actions_shortcuts()
def create_welcome_screen(self):
Elayout = QVBoxLayout()
Elayout.setAlignment(Qt.AlignCenter)
Elayout.setSpacing(20)
self.empty_screen = QWidget()
self.empty_screen.setLayout(Elayout)
user_text = QLabel("Select Your Project Directory...")
user_text.setFont(QFont("Roboto", 14))
w_image = QPixmap("icons/Dark/VVS_White2.png" if self.global_switches.switches_Dict["Appearance"]['Theme'][0] == 'Dark' else "icons/light/VVS_White2.png")
welcome_image = QLabel()
welcome_image.setPixmap(w_image)
self.brows_btn = QPushButton("Brows..")
Elayout.addWidget(welcome_image)
Elayout.addItem(QSpacerItem(120, 120))
Elayout.addWidget(user_text)
Elayout.addWidget(self.brows_btn)
self.stackedDisplay.addWidget(self.empty_screen)
self.switch_display(Welcome=True)
def CreateOfflineDir(self):
self.Offline_Dir = f"C:/Users/{os.getlogin()}/AppData/Roaming/VVS/Offline Library"
if os.path.exists(self.Offline_Dir):
pass
else:
self.Offline_Dir = os.makedirs(os.getenv('AppData') + "/VVS/Offline Library")
self.library_offline_list.setRootIndex(self.Model.index(self.Offline_Dir))
def CreateLibraryWnd(self):
self.librariesDock = QDockWidget("Libraries")
self.library_subwnd = QTabWidget()
self.librariesDock.setWidget(self.library_subwnd)
self.librariesDock.setFeatures(self.librariesDock.DockWidgetMovable)
self.addDockWidget(Qt.RightDockWidgetArea, self.librariesDock)
offline_Vlayout = QVBoxLayout()
offline_Vlayout.setContentsMargins(0, 0, 0, 0)
self.Model = QFileSystemModel()
self.Model.setRootPath("")
self.library_offline_list = QTreeView()
self.library_offline_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.library_offline_list.setModel(self.Model)
self.library_offline_list.setSortingEnabled(True)
self.library_offline_list.setColumnWidth(0, 130)
self.library_offline_list.sortByColumn(0, Qt.AscendingOrder)
self.library_offline_list.hideColumn(1)
self.library_offline_list.hideColumn(2)
self.library_offline_list.setStyleSheet("color: white")
self.library_offline_list.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
offline_Vlayout.addWidget(self.library_offline_list)
self.library_online_list = QListWidget()
topVlayout = QVBoxLayout()
search_bar_layout = QHBoxLayout()
self.search_line_edit = QLineEdit()
self.search_btn = QPushButton()
search_bar_layout.addWidget(self.search_line_edit)
search_bar_layout.addWidget(self.search_btn)
self.search_btn.setMaximumSize(30, 30)
self.search_btn.setIcon(QIcon("icons/Light/search.png"))
self.search_line_edit.setMinimumHeight(30)
topVlayout.addLayout(search_bar_layout)
topVlayout.addWidget(self.library_online_list)
online_widget = QWidget()
online_widget.setLayout(topVlayout)
offline_widget = QWidget()
offline_widget.setLayout(offline_Vlayout)
self.library_subwnd.addTab(offline_widget, " Offline ")
self.library_subwnd.addTab(online_widget, " Online ")
self.CreateOfflineDir()
self.library_offline_list.clicked.connect(self.ViewSelectedFiles)
def ViewSelectedFiles(self):
all_files = []
selected_files = self.library_offline_list.selectedIndexes()
for file_name in selected_files:
file_path = QFileSystemModel().filePath(file_name)
if file_path.endswith(".json"):
if not all_files.__contains__(file_path):
all_files.append(file_path)
# print(all_files)
self.on_file_open(all_files)
def active_graph_switched(self):
self.update_menus()
if self.currentNodeEditor():
self.VEStackedWdg.setCurrentWidget(self.currentNodeEditor().scene.user_nodes_wdg)
def switch_display(self, Welcome=False, Editor=False, Designer=False, Library=False):
# Use the Argument To Force Activate the Specified Window
if Editor:
self.stackedDisplay.setCurrentIndex(0)
self.library_btn.setChecked(False)
self.node_editor_btn.setChecked(True)
self.node_designer_btn.setChecked(False)
return
if Library:
self.stackedDisplay.setCurrentIndex(0)
self.library_btn.setChecked(True)
self.node_editor_btn.setChecked(False)
self.node_designer_btn.setChecked(False)
return
elif Designer:
self.stackedDisplay.setCurrentIndex(1)
self.library_btn.setChecked(False)
self.node_editor_btn.setChecked(False)
self.node_designer_btn.setChecked(True)
self.node_editor_menu.setEnabled(False)
self.library_menu.setEnabled(False)
self.toolbar_library.setChecked(False)
self.librariesDock.setVisible(self.toolbar_library.isChecked())
return
elif Welcome:
self.stackedDisplay.setCurrentIndex(2)
return
def CreateToolBar(self):
# Create Tools self.tools_bar
self.tools_bar = QToolBar("Tools", self)
self.tools_bar.setIconSize(QSize(20, 20))
self.tools_bar.setFloatable(False)
# Add self.tools_bar To Main Window
self.addToolBar(self.tools_bar)
# Add and connect self.settingsBtn
self.settingsBtn = QAction(QIcon(self.global_switches.get_icon("settings.png")), "&Open Settings Window", self)
self.settingsBtn.setIconText("settings.png")
self.settingsBtn.setCheckable(True)
self.settingsBtn.triggered.connect(self.onSettingsOpen)
self.settingsBtn.setShortcut(QKeySequence(self.global_switches.switches_Dict["Key Mapping"]["Settings Window"]))
self.tools_bar.addAction(self.settingsBtn)
self.actions_list["Settings Window"] = self.settingsBtn
# Add Separator
self.tools_bar.addSeparator()
# Add and connect self.node_editor_btn
self.node_editor_btn = QAction(QIcon(self.global_switches.get_icon("edit.png")), "&Node Editor", self)
self.node_editor_btn.setIconText("edit.png")
self.node_editor_btn.setCheckable(True)
self.node_editor_btn.triggered.connect(self.activate_editor_mode)
self.tools_bar.addAction(self.node_editor_btn)
self.actions_list["Node Editor Window"] = self.node_editor_btn
# Add and connect self.node_designer_btn
self.node_designer_btn = QAction(QIcon(self.global_switches.get_icon("node design.png")), "&Node Designer", self)
self.node_designer_btn.setIconText("node design.png")
self.node_designer_btn.setEnabled(False)
self.node_designer_btn.setCheckable(True)
self.node_designer_btn.triggered.connect(self.activate_designer_mode)
self.tools_bar.addAction(self.node_designer_btn)
self.actions_list["Node Designer Window"] = self.node_designer_btn
# Add and connect self.library_btn
self.library_btn = QAction(QIcon(self.global_switches.get_icon("library.png")), "&Library", self)
self.library_btn.setIconText("library.png")
self.library_btn.setCheckable(True)
self.library_btn.triggered.connect(self.activate_library_mode)
self.library_btn.setShortcut(QKeySequence("`"))
self.tools_bar.addAction(self.library_btn)
self.actions_list["Library Window"] = self.library_btn
# Add Separator
self.tools_bar.addSeparator()
# # Add Separator
# self.tools_bar.addSeparator()
def onSettingsOpen(self):
if self.settingsWidget:
if self.settingsWidget.isHidden():
self.settingsWidget.show()
self.settingsBtn.setChecked(True)
else:
self.settingsWidget.hide()
else:
self.settingsWidget = SettingsWidget(masterRef=self)
self.global_switches.update_font_size(self.global_switches.switches_Dict["Appearance"]["Font Size"])
self.settingsWidget.show()
self.settingsBtn.setChecked(True)
self.settingsWidget.setWindowTitle("Settings")
self.settingsWidget.setGeometry(300, 150, 1200, 800)
def closeEvent(self, event):
self.graphs_parent_wdg.closeAllSubWindows()
if self.graphs_parent_wdg.currentSubWindow():
event.ignore()
else:
self.writeSettings()
event.accept()
# hacky fix for PyQt 5.14.x
import sys
sys.exit(0)
def createActions(self):
super().createActions()
self.actClose = QAction("Cl&ose", self, statusTip="Close the active window", triggered=self.graphs_parent_wdg.closeActiveSubWindow)
self.actCloseAll = QAction("Close &All", self, statusTip="Close all the windows", triggered=self.graphs_parent_wdg.closeAllSubWindows)
self.actTile = QAction("&Tile", self, statusTip="Tile the windows", triggered=self.graphs_parent_wdg.tileSubWindows)
self.actCascade = QAction("&Cascade", self, statusTip="Cascade the windows", triggered=self.graphs_parent_wdg.cascadeSubWindows)
self.actNext = QAction("Ne&xt", self, shortcut=QKeySequence.NextChild, statusTip="Move the focus to the next window", triggered=self.graphs_parent_wdg.activateNextSubWindow)
self.actPrevious = QAction("Pre&vious", self, shortcut=QKeySequence.PreviousChild, statusTip="Move the focus to the previous window", triggered=self.graphs_parent_wdg.activatePreviousSubWindow)
self.actSeparator = QAction(self)
self.actSeparator.setSeparator(True)
self.actAbout = QAction("&About", self, statusTip="Show the application's About box", triggered=self.about)
self.actDoc = QAction("&Documentation", self, triggered=self.open_doc)
self.actions_list = {"New Graph": self.actNew,
"Open": self.actOpen,
"Set Project Location": self.actSetProjectDir,
"Save": self.actSave,
"Save As": self.actSaveAs,
"Exit": self.actExit,
"Undo": self.actUndo,
"Redo": self.actRedo,
"Cut": self.actCut,
"Copy": self.actCopy,
"Paste": self.actPaste,
"Delete": self.actDelete,
"Select All": self.actSelectAll,
}
def set_actions_shortcuts(self):
shortcuts = self.global_switches.switches_Dict["Key Mapping"]
for act in self.actions_list:
if shortcuts.__contains__(act):
self.actions_list[act].setShortcut(shortcuts[act])
def open_doc(self):
subprocess.Popen('hh.exe "VVS-Help.chm"')
def currentNodeEditor(self):
""" we're returning NodeEditorWidget here... """
activeSubWindow = self.graphs_parent_wdg.activeSubWindow()
if activeSubWindow:
return activeSubWindow.widget()
else:
return None
def on_new_graph_tab(self):
# Overrides Node Editor Window > actNew action
try:
subwnd = self.new_graph_tab()
all_names = []
for item in self.graphs_parent_wdg.subWindowList():
all_names.append(item.widget().windowTitle())
self.files_widget.new_graph_name(subwnd, all_names)
except Exception as e:
dumpException(e)
def on_file_open(self, all_files=False):
if all_files == False:
file_names, filter = QFileDialog.getOpenFileNames(self, 'Open graph from file',
self.files_widget.Project_Directory,
self.getFileDialogFilter())
else:
file_names = all_files
try:
for file_name in file_names:
if file_name:
if self.findMdiChild(file_name):
subwnd = self.findMdiChild(file_name)
self.graphs_parent_wdg.setActiveSubWindow(subwnd)
else:
# We need to create new subWindow and open the file
subwnd = self.new_graph_tab()
node_editor = subwnd.widget()
if node_editor.fileLoad(file_name):
self.statusBar().showMessage("File %s loaded" % file_name, 5000)
node_editor.setWindowTitle(os.path.splitext(os.path.basename(file_name))[0])
else:
node_editor.close()
except Exception as e:
dumpException(e)
def create_menus(self):
super().create_menus()
self.node_editor_menu = self.menuBar().addMenu("&Node Editor")
self.library_menu = self.menuBar().addMenu("&Library")
self.node_designer_menu = self.menuBar().addMenu("&Node Designer")
self.update_window_menu()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.actDoc)
self.helpMenu.addAction(self.actAbout)
self.editMenu.aboutToShow.connect(self.update_edit_menu)
def update_menus(self):
# print("update Menus")
active = self.currentNodeEditor()
hasMdiChild = (active is not None)
# Update File Menu
self.actSave.setEnabled(hasMdiChild)
self.actSaveAs.setEnabled(hasMdiChild)
# Update Node Editor Menu
self.actSelectAll.setEnabled(hasMdiChild)
self.actClose.setEnabled(hasMdiChild)
self.actCloseAll.setEnabled(hasMdiChild)
self.actTile.setEnabled(hasMdiChild)
self.actCascade.setEnabled(hasMdiChild)
self.actNext.setEnabled(hasMdiChild)
self.actPrevious.setEnabled(hasMdiChild)
self.actSeparator.setVisible(hasMdiChild)
# Update Edit Menu
self.update_edit_menu()
def update_edit_menu(self):
try:
# print("update Edit Menu")
active = self.currentNodeEditor()
hasMdiChild = (active is not None)
self.actPaste.setEnabled(hasMdiChild)
self.actCut.setEnabled(hasMdiChild and active.hasSelectedItems())
self.actSelectAll.setEnabled(hasMdiChild)
self.actCopy.setEnabled(hasMdiChild and active.hasSelectedItems())
self.actDelete.setEnabled(hasMdiChild and active.hasSelectedItems())
self.actUndo.setEnabled(hasMdiChild and active.canUndo())
self.actRedo.setEnabled(hasMdiChild and active.canRedo())
except Exception as e:
dumpException(e)
def update_window_menu(self):
self.toolbar_library = self.library_menu.addAction("Libraries Window")
self.toolbar_library.setCheckable(True)
self.toolbar_library.triggered.connect(self.update_libraries_wnd)
self.toolbar_library.setChecked(False)
self.toolbar_properties = self.node_editor_menu.addAction("Properties Window")
self.toolbar_properties.setCheckable(True)
self.toolbar_properties.triggered.connect(self.update_properties_wnd)
self.toolbar_properties.setChecked(True)
self.toolbar_files = self.node_editor_menu.addAction("Project Files Window")
self.toolbar_files.setCheckable(True)
self.toolbar_files.triggered.connect(self.update_files_wnd)
self.toolbar_files.setChecked(True)
self.toolbar_events_vars = self.node_editor_menu.addAction("Variables & Events Window")
self.toolbar_events_vars.setCheckable(True)
self.toolbar_events_vars.triggered.connect(self.update_events_vars_wnd)
self.toolbar_events_vars.setChecked(True)
self.toolbar_functions = self.node_editor_menu.addAction("Functions Window")
self.toolbar_functions.setCheckable(True)
self.toolbar_functions.triggered.connect(self.update_functions_wnd)
self.toolbar_functions.setChecked(True)
self.node_editor_menu.addSeparator()
self.node_editor_menu.addAction(self.actClose)
self.node_editor_menu.addAction(self.actCloseAll)
self.node_editor_menu.addSeparator()
self.node_editor_menu.addAction(self.actTile)
# self.windowMenu.addAction(self.actCascade)
self.node_editor_menu.addSeparator()
self.node_editor_menu.addAction(self.actNext)
self.node_editor_menu.addAction(self.actPrevious)
self.node_editor_menu.addAction(self.actSeparator)
windows = self.graphs_parent_wdg.subWindowList()
self.actSeparator.setVisible(len(windows) != 0)
for i, window in enumerate(windows):
child = window.widget()
text = "%d %s" % (i + 1, child.getUserFriendlyFilename())
if i < 9:
text = '&' + text
action = self.node_editor_menu.addAction(text)
action.setCheckable(True)
action.setChecked(child is self.currentNodeEditor())
action.triggered.connect(self.windowMapper.map)
self.windowMapper.setMapping(action, window)
def update_functions_wnd(self):
self.toolbar_functions.setChecked(self.toolbar_functions.isChecked())
self.functionsDock.setVisible(self.toolbar_functions.isChecked())
def update_events_vars_wnd(self):
self.toolbar_events_vars.setChecked(self.toolbar_events_vars.isChecked())
self.varsEventsDock.setVisible(self.toolbar_events_vars.isChecked())
def update_properties_wnd(self):
self.toolbar_properties.setChecked(self.toolbar_properties.isChecked())
self.proprietiesDock.setVisible(self.toolbar_properties.isChecked())
def update_libraries_wnd(self):
self.toolbar_library.setChecked(self.toolbar_library.isChecked())
self.librariesDock.setVisible(self.toolbar_library.isChecked())
def update_files_wnd(self):
self.toolbar_files.setChecked(self.toolbar_files.isChecked())
self.filesDock.setVisible(self.toolbar_files.isChecked())
def activate_editor_mode(self):
if self.graphs_parent_wdg.subWindowList():
self.switch_display(Editor=True)
else:
self.switch_display(Welcome=True)
self.node_editor_menu.setEnabled(True)
self.library_menu.setEnabled(False)
self.toolbar_library.setChecked(False)
self.librariesDock.setVisible(self.toolbar_library.isChecked())
self.toolbar_functions.setChecked(True)
self.functionsDock.setVisible(self.toolbar_functions.isChecked())
self.toolbar_files.setChecked(True)
self.filesDock.setVisible(self.toolbar_files.isChecked())
self.toolbar_properties.setChecked(True)
self.proprietiesDock.setVisible(self.toolbar_properties.isChecked())
def activate_designer_mode(self):
self.switch_display(Designer=True)
def activate_library_mode(self):
if self.graphs_parent_wdg.subWindowList():
self.switch_display(Library=True)
else:
self.switch_display(Welcome=True)
# Handel buttons State
self.node_editor_menu.setEnabled(False)
self.library_menu.setEnabled(True)
self.toolbar_library.setChecked(True)
self.librariesDock.setVisible(self.toolbar_library.isChecked())
self.toolbar_files.setChecked(False)
self.filesDock.setVisible(self.toolbar_files.isChecked())
def create_functions_dock(self):
self.functionsDock = QDockWidget("Functions")
self.nodesListWidget = NodeList()
self.functionsDock.setWidget(self.nodesListWidget)
self.functionsDock.setFeatures(self.functionsDock.DockWidgetMovable)
self.addDockWidget(Qt.LeftDockWidgetArea, self.functionsDock)
def create_files_dock(self):
self.brows_btn.clicked.connect(self.files_widget.set_project_folder)
# self.files_widget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
self.filesDock = QDockWidget("Project Files")
self.filesDock.setWidget(self.files_widget)
self.filesDock.setFeatures(self.filesDock.DockWidgetMovable)
self.addDockWidget(Qt.RightDockWidgetArea, self.filesDock)
def create_properties_dock(self):
self.proprietiesWdg = PropertiesList(master_ref=self)
self.proprietiesDock = QDockWidget("Properties")
self.proprietiesDock.setWidget(self.proprietiesWdg)
self.proprietiesDock.setFeatures(self.proprietiesDock.DockWidgetMovable)
self.addDockWidget(Qt.RightDockWidgetArea, self.proprietiesDock)
def create_user_nodes_dock(self):
self.varsEventsDock = QDockWidget("Variables & Events")
self.VEStackedWdg = QStackedWidget()
self.VEStackedWdg.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
self.varsEventsDock.setWidget(self.VEStackedWdg)
self.varsEventsDock.setFeatures(self.varsEventsDock.DockWidgetMovable)
self.addDockWidget(Qt.LeftDockWidgetArea, self.varsEventsDock)
def delete_user_nodes_wgd(self, ref):
self.VEStackedWdg.removeWidget(ref)
def createStatusBar(self):
self.statusBar().showMessage("Ready")
def before_window_close(self):
self.proprietiesWdg.clear()
def on_before_save_file(self):
self.proprietiesWdg.clear()
def new_graph_tab(self):
# This Check Prevents The Parent graph from opening in Cascade view-mode
if not self.graphs_parent_wdg.subWindowList():
self.switch_display(Editor=True)
node_editor = MasterEditorWnd(masterRef=self)
VEL = UserNodesList(scene=node_editor.scene, propertiesWdg=self.proprietiesWdg)
self.VEStackedWdg.addWidget(VEL)
self.VEStackedWdg.setCurrentWidget(VEL)
node_editor.scene.user_nodes_wdg = VEL
# node_editor.scene.masterRef = self
# node_editor.scene.history.masterRef = self
subwnd = QMdiSubWindow()
subwnd.setAttribute(Qt.WA_DeleteOnClose, True)
subwnd.setWidget(node_editor)
self.graphs_parent_wdg.addSubWindow(subwnd)
subwnd.setWindowIcon(self.empty_icon)
node_editor.scene.addItemSelectedListener(self.update_edit_menu)
node_editor.scene.addItemsDeselectedListener(self.update_edit_menu)
node_editor.scene.history.addHistoryModifiedListener(self.update_edit_menu)
node_editor.addCloseEventListener(self.on_sub_wnd_close)
self.graphs_parent_wdg.setViewMode(QMdiArea.TabbedView)
subwnd.show()
return subwnd
def on_sub_wnd_close(self, widget, event):
existing = self.findMdiChild(widget.filename)
self.graphs_parent_wdg.setActiveSubWindow(existing)
if self.ask_save():
event.accept()
self.delete_user_nodes_wgd(widget.scene.user_nodes_wdg)
if (len(self.graphs_parent_wdg.subWindowList())-1) == 0:
self.switch_display(Welcome=True)
else:
self.switch_display(Editor=True)
self.before_window_close()
else:
event.ignore()
def findMdiChild(self, filename):
for window in self.graphs_parent_wdg.subWindowList():
if window.widget().filename == filename:
return window
return None
def setActiveSubWindow(self, window):
if window:
self.graphs_parent_wdg.setActiveSubWindow(window)
def get_QWidget_content(self, widget):
if [QKeySequenceEdit].__contains__(type(widget)):
return widget.keySequence().toString()
elif [QSpinBox, QDoubleSpinBox].__contains__(type(widget)):
return widget.value()
elif [QLineEdit, QLabel].__contains__(type(widget)):
return widget.text()
elif [QTextEdit].__contains__(type(widget)):
return widget.toPlainText()
elif [QRadioButton, QCheckBox].__contains__(type(widget)):
return widget.isChecked()
elif [QComboBox].__contains__(type(widget)):
current = widget.currentText()
widget.removeItem(widget.currentIndex())
content_list = [current]
for index in range(widget.__len__()):
content_list.append(widget.itemText(index))
widget.clear()
widget.addItems(content_list)
return content_list
else:
print(widget, "Widget Not Supported")
return None
def set_QWidget_content(self, widget, new_value):
if [QKeySequenceEdit].__contains__(type(widget)):
widget.setKeySequence(new_value)
elif [QSpinBox, QDoubleSpinBox].__contains__(type(widget)):
widget.setValue(new_value)
elif [QLineEdit, QLabel, QTextEdit].__contains__(type(widget)):
widget.setText(new_value)
elif [QRadioButton, QCheckBox].__contains__(type(widget)):
widget.setChecked(new_value)
elif [QComboBox].__contains__(type(widget)):
widget.addItems(new_value)
else:
print(widget, "Widget Not Supported")
def about(self):
QMessageBox.about(self, "About Calculator NodeEditor Example",
"The <b>Calculator NodeEditor</b> example demonstrates how to write multiple "
"document interface applications using PyQt5 and NodeEditor. For more information visit: "
"<a href='https://www.blenderfreak.com/'>www.BlenderFreak.com</a>")
|
[] |
[] |
[
"AppData"
] |
[]
|
["AppData"]
|
python
| 1 | 0 | |
src/main/java/dev/jbang/cli/App.java
|
package dev.jbang.cli;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.PosixFilePermission;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import dev.jbang.Cache;
import dev.jbang.Settings;
import dev.jbang.catalog.CatalogUtil;
import dev.jbang.dependencies.DependencyUtil;
import dev.jbang.net.JdkManager;
import dev.jbang.source.RunContext;
import dev.jbang.source.Source;
import dev.jbang.util.UnpackUtil;
import dev.jbang.util.Util;
import picocli.CommandLine;
@CommandLine.Command(name = "app", description = "Manage scripts installed on the user's PATH as commands.", subcommands = {
AppInstall.class, AppList.class,
AppUninstall.class, AppSetup.class })
public class App {
public static void deleteCommandFiles(String name) {
try (Stream<Path> files = Files.list(Settings.getConfigBinDir())) {
files .filter(f -> f.getFileName().toString().equals(name)
|| f.getFileName().toString().startsWith(name + "."))
.forEach(f -> Util.deletePath(f, true));
} catch (IOException e) {
// Ignore
}
}
}
@CommandLine.Command(name = "install", description = "Install a script as a command.")
class AppInstall extends BaseCommand {
private static final String jbangUrl = "https://www.jbang.dev/releases/latest/download/jbang.zip";
@CommandLine.Option(names = {
"--native" }, description = "Enable native build/run")
boolean benative;
@CommandLine.Option(names = {
"--force" }, description = "Force re-installation")
boolean force;
@CommandLine.Option(names = { "--name" }, description = "A name for the command")
String name;
@CommandLine.Parameters(paramLabel = "scriptRef", description = "A file or URL to a Java code file or an alias")
String scriptRef;
@Override
public Integer doCall() {
boolean installed = false;
try {
if (scriptRef.equals("jbang")) {
if (name != null && !"jbang".equals(name)) {
throw new IllegalArgumentException(
"It's not possible to install jbang with a different name");
}
Util.setFresh(true);// TODO: workaround as url cache is not honoring changed redirects
installed = installJbang(force);
} else {
if ("jbang".equals(name)) {
throw new IllegalArgumentException("jbang is a reserved name.");
}
if (name != null && !CatalogUtil.isValidName(name)) {
throw new IllegalArgumentException("Not a valid command name: '" + name + "'");
}
installed = install(name, scriptRef, force, benative);
}
if (installed) {
if (AppSetup.needsSetup()) {
return AppSetup.setup(AppSetup.guessWithJava(), false, false);
}
}
} catch (IOException e) {
throw new ExitException(EXIT_INTERNAL_ERROR, "Could not install command", e);
}
return EXIT_OK;
}
public static boolean install(String name, String scriptRef, boolean force, boolean benative) throws IOException {
Path binDir = Settings.getConfigBinDir();
if (!force && name != null && existScripts(binDir, name)) {
Util.infoMsg("A script with name '" + name + "' already exists, use '--force' to install anyway.");
return false;
}
RunContext ctx = RunContext.empty();
Source src = Source.forResource(scriptRef, ctx);
if (name == null) {
name = CatalogUtil.nameFromRef(ctx.getOriginalRef());
if (!force && existScripts(binDir, name)) {
Util.infoMsg("A script with name '" + name + "' already exists, use '--force' to install anyway.");
return false;
}
}
if (ctx.getAlias() == null && !DependencyUtil.looksLikeAGav(scriptRef) && !src.getResourceRef().isURL()) {
scriptRef = src.getResourceRef().getFile().getAbsolutePath();
}
installScripts(name, scriptRef, benative);
Util.infoMsg("Command installed: " + name);
return true;
}
private static boolean existScripts(Path binDir, String name) {
return Files.exists(binDir.resolve(name)) || Files.exists(binDir.resolve(name + ".cmd"))
|| Files.exists(binDir.resolve(name + ".ps1"));
}
private static void installScripts(String name, String scriptRef, boolean benative) throws IOException {
Path binDir = Settings.getConfigBinDir();
binDir.toFile().mkdirs();
if (Util.isWindows()) {
installCmdScript(binDir.resolve(name + ".cmd"), scriptRef, benative);
installPSScript(binDir.resolve(name + ".ps1"), scriptRef, benative);
} else {
installShellScript(binDir.resolve(name), scriptRef, benative);
}
}
private static void installShellScript(Path file, String scriptRef, boolean benative) throws IOException {
List<String> lines = Arrays.asList(
"#!/bin/sh",
"exec jbang run" + (benative ? " --native " : " ") + scriptRef + " \"$@\"");
Files.write(file, lines, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE);
setExecutable(file);
}
private static void setExecutable(Path file) {
final Set<PosixFilePermission> permissions;
try {
permissions = Files.getPosixFilePermissions(file);
permissions.add(PosixFilePermission.OWNER_EXECUTE);
permissions.add(PosixFilePermission.GROUP_EXECUTE);
Files.setPosixFilePermissions(file, permissions);
} catch (UnsupportedOperationException | IOException e) {
throw new ExitException(EXIT_GENERIC_ERROR, "Couldn't mark script as executable: " + file, e);
}
}
private static void installCmdScript(Path file, String scriptRef, boolean benative) throws IOException {
List<String> lines = Arrays.asList(
"@echo off",
"jbang run" + (benative ? " --native " : " ") + scriptRef + " %*");
Files.write(file, lines, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE);
}
private static void installPSScript(Path file, String scriptRef, boolean benative) throws IOException {
List<String> lines = Collections.singletonList(
"jbang run" + (benative ? " --native " : " ") + scriptRef + " $args");
Files.write(file, lines, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE);
}
public static boolean installJbang(boolean force) throws IOException {
Path binDir = Settings.getConfigBinDir();
boolean managedJbang = Files.exists(binDir.resolve("jbang.jar"));
if (!force && (managedJbang || Util.searchPath("jbang") != null)) {
Util.infoMsg("jbang is already available, re-run with --force to install anyway.");
return false;
}
if (force || !managedJbang) {
// Download Jbang and unzip to ~/.jbang/bin/
Util.infoMsg("Downloading and installing jbang...");
Path zipFile = Util.downloadAndCacheFile(jbangUrl);
Path urlsDir = Settings.getCacheDir(Cache.CacheClass.urls);
Util.deletePath(urlsDir.resolve("jbang"), true);
UnpackUtil.unpack(zipFile, urlsDir);
App.deleteCommandFiles("jbang");
Path fromDir = urlsDir.resolve("jbang").resolve("bin");
copyJbangFiles(fromDir, binDir);
} else {
Util.infoMsg("jbang is already installed.");
}
return true;
}
private static void copyJbangFiles(Path from, Path to) throws IOException {
to.toFile().mkdirs();
try (Stream<Path> files = Files.list(from)) {
files .map(from::relativize)
.forEach(f -> {
try {
Path fromp = from.resolve(f);
Path top = to.resolve(f);
if (Util.isWindows() && f.endsWith("jbang.jar") && Files.isRegularFile(top)) {
top = Paths.get("jbang.jar.new");
}
Files.copy(fromp, top, StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES);
} catch (IOException e) {
throw new ExitException(EXIT_GENERIC_ERROR, "Could not copy " + f.toString(), e);
}
});
}
}
}
@CommandLine.Command(name = "list", description = "Lists installed commands.")
class AppList extends BaseCommand {
@Override
public Integer doCall() {
listCommandFiles().forEach(System.out::println);
return EXIT_OK;
}
private static List<String> listCommandFiles() {
try (Stream<Path> files = Files.list(Settings.getConfigBinDir())) {
return files
.map(AppList::baseFileName)
.distinct()
.sorted()
.collect(Collectors.toList());
} catch (IOException e) {
return Collections.emptyList();
}
}
private static String baseFileName(Path file) {
String nm = file.getFileName().toString();
int p = nm.lastIndexOf('.');
if (p > 0) {
nm = nm.substring(0, p);
}
return nm;
}
}
@CommandLine.Command(name = "uninstall", description = "Removes a previously installed command.")
class AppUninstall extends BaseCommand {
@CommandLine.Parameters(paramLabel = "name", index = "0", description = "The name of the command", arity = "1")
String name;
@Override
public Integer doCall() {
if (commandFilesExist(name)) {
App.deleteCommandFiles(name);
Util.infoMsg("Command removed: " + name);
return EXIT_OK;
} else {
Util.infoMsg("Command not found: " + name);
return EXIT_INVALID_INPUT;
}
}
private static boolean commandFilesExist(String name) {
try (Stream<Path> files = Files.list(Settings.getConfigBinDir())) {
return files.anyMatch(f -> f.getFileName().toString().equals(name)
|| f.getFileName().toString().startsWith(name + "."));
} catch (IOException e) {
return false;
}
}
}
@CommandLine.Command(name = "setup", description = "Make jbang commands available for the user.")
class AppSetup extends BaseCommand {
@CommandLine.Option(names = {
"--java" }, description = "Add Jbang's Java to the user's environment as well", negatable = true)
Boolean java;
@CommandLine.Option(names = {
"--force" }, description = "Force setup to be performed even when existing configuration has been detected")
boolean force;
@Override
public Integer doCall() {
boolean withJava;
if (java == null) {
withJava = guessWithJava();
} else {
withJava = java;
}
return setup(withJava, force, true);
}
public static boolean needsSetup() {
String envPath = System.getenv("PATH");
Path binDir = Settings.getConfigBinDir();
return !envPath.toLowerCase().contains(binDir.toString().toLowerCase());
}
/**
* Makes a best guess if JAVA_HOME should be set by us or not. Returns true if
* no JAVA_HOME is set and javac wasn't found on the PATH and we have at least
* one managed JDK installed by us. Otherwise it returns false.
*/
public static boolean guessWithJava() {
boolean withJava;
int v = JdkManager.getDefaultJdk();
String javaHome = System.getenv("JAVA_HOME");
Path javacCmd = Util.searchPath("javac");
withJava = (v > 0
&& (javaHome == null
|| javaHome.isEmpty()
|| javaHome.toLowerCase().startsWith(Settings.getConfigDir().toString().toLowerCase()))
&& (javacCmd == null || javacCmd.startsWith(Settings.getConfigBinDir())));
return withJava;
}
public static int setup(boolean withJava, boolean force, boolean chatty) {
Path jdkHome = null;
if (withJava) {
int v = JdkManager.getDefaultJdk();
if (v < 0) {
Util.infoMsg("No default JDK set, use 'jbang jdk default <version>' to set one.");
return EXIT_UNEXPECTED_STATE;
}
jdkHome = Settings.getCurrentJdkDir();
}
Path binDir = Settings.getConfigBinDir();
binDir.toFile().mkdirs();
boolean changed = false;
String cmd = "";
// Permanently add Jbang's bin folder to the user's PATH
if (Util.isWindows()) {
String env = "";
if (withJava) {
String newPath = jdkHome.resolve("bin") + ";";
env += " ; [Environment]::SetEnvironmentVariable('Path', '" + newPath + "' + " +
"[Environment]::GetEnvironmentVariable('Path', [EnvironmentVariableTarget]::User), " +
"[EnvironmentVariableTarget]::User)";
env += " ; [Environment]::SetEnvironmentVariable('JAVA_HOME', '" + jdkHome + "', " +
"[EnvironmentVariableTarget]::User)";
}
if (force || needsSetup()) {
// Create the command to change the user's PATH
String newPath = binDir + ";";
env += " ; [Environment]::SetEnvironmentVariable('Path', '" + newPath + "' + " +
"[Environment]::GetEnvironmentVariable('Path', [EnvironmentVariableTarget]::User), " +
"[EnvironmentVariableTarget]::User)";
}
if (!env.isEmpty()) {
if (Util.isUsingPowerShell()) {
cmd = "{ " + env + " }";
} else {
cmd = "powershell -NoProfile -ExecutionPolicy Bypass -NonInteractive -Command \"" + env + "\" & ";
}
changed = true;
}
} else {
if (force || needsSetup() || withJava) {
// Update shell startup scripts
Path bashRcFile = getHome().resolve(".bashrc");
changed = changeScript(binDir, jdkHome, bashRcFile) || changed;
Path zshRcFile = getHome().resolve(".zshrc");
changed = changeScript(binDir, jdkHome, zshRcFile) || changed;
}
}
if (changed) {
Util.infoMsg("Setting up Jbang environment...");
} else if (chatty) {
Util.infoMsg("Jbang environment is already set up.");
}
if (Util.isWindows()) {
if (changed) {
if (Util.isUsingPowerShell()) {
System.err.println("Please start a new PowerShell for changes to take effect");
} else {
System.err.println("Please open a new CMD window for changes to take effect");
}
}
} else {
if (changed) {
System.out.println("Please start a new Shell for changes to take effect");
}
}
if (!cmd.isEmpty()) {
System.out.println(cmd);
return EXIT_EXECUTE;
} else {
return EXIT_OK;
}
}
private static boolean changeScript(Path binDir, Path javaHome, Path bashFile) {
try {
// Detect if Jbang has already been set up before
boolean jbangFound = Files.exists(bashFile)
&& Files.lines(bashFile)
.anyMatch(ln -> ln.trim().startsWith("#") && ln.toLowerCase().contains("jbang"));
if (!jbangFound) {
// Add lines to add Jbang to PATH
String lines = "\n# Add Jbang to environment\n" +
"alias j!=jbang\n";
if (javaHome != null) {
lines += "export PATH=\"" + toHomePath(binDir) + ":" + toHomePath(javaHome.resolve("bin"))
+ ":$PATH\"\n" +
"export JAVA_HOME=" + toHomePath(javaHome) + "\n";
} else {
lines += "export PATH=\"" + toHomePath(binDir) + ":$PATH\"\n";
}
Files.write(bashFile, lines.getBytes(), StandardOpenOption.APPEND, StandardOpenOption.CREATE);
Util.verboseMsg("Added Jbang setup lines " + bashFile);
return true;
}
} catch (IOException e) {
Util.verboseMsg("Couldn't change script: " + bashFile, e);
}
return false;
}
private static Path getHome() {
return Paths.get(System.getProperty("user.home"));
}
private static String toHomePath(Path path) {
Path home = getHome();
if (path.startsWith(home)) {
if (Util.isWindows()) {
return "%userprofile%\\" + home.relativize(path);
} else {
return "$HOME/" + home.relativize(path);
}
} else {
return path.toString();
}
}
}
|
[
"\"PATH\"",
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME",
"PATH"
] |
[]
|
["JAVA_HOME", "PATH"]
|
java
| 2 | 0 | |
scripts/config.py
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This module defines a collection of variables that specify site-specific
configuration information such as names of RAMCloud hosts and the location
of RAMCloud binaries. This should be the only file you have to modify to
run RAMCloud scripts at your site.
"""
from common import captureSh
import os
import re
import subprocess
import sys
__all__ = ['coordinator_port', 'default_disk1','default_disk2', 'git_branch',
'hosts', 'obj_dir', 'obj_path', 'scripts_path', 'second_backup_port',
'server_port', 'top_path']
# git_branch is the name of the current git branch, which is used
# for purposes such as computing objDir.
try:
git_branch = re.search('^refs/heads/(.*)$',
captureSh('git symbolic-ref -q HEAD 2>/dev/null'))
except subprocess.CalledProcessError:
git_branch = None
obj_dir = 'obj'
else:
git_branch = git_branch.group(1)
obj_dir = 'obj.%s' % git_branch
# obj_dir is the name of the directory containing binaries for the current
# git branch (it's just a single name such as "obj.master", not a full path)
if git_branch == None:
obj_dir = 'obj'
else:
obj_dir = 'obj.%s' % git_branch
# The full path name of the directory containing this script file.
scripts_path = os.path.dirname(os.path.abspath(__file__))
# The full pathname of the parent of scriptsPath (the top-level directory
# of a RAMCloud source tree).
top_path = os.path.abspath(scripts_path + '/..')
# Add /usr/local/lib to LD_LIBARY_PATH it isn't already there (this was
# needed for CentOS 5.5, but should probably be deleted now).
try:
ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
except KeyError:
ld_library_path = []
if '/usr/local/lib' not in ld_library_path:
ld_library_path.insert(0, '/usr/local/lib')
os.environ['LD_LIBRARY_PATH'] = ':'.join(ld_library_path)
# All of the hosts available for servers or clients; each entry
# consists of a name for the host (for ssh), an IP address
# to use for creating service locators. and an id for generating
# Ethernet addresses.
hosts = []
for i in range(1, 61):
hosts.append(('rc%02d' % i,
'192.168.1.%d' % (100 + i),
i))
# Host on which old master is run for running recoveries.
# Need not be a member of hosts
old_master_host = ('rcmaster', '192.168.1.1', 81)
# Full path to the directory containing RAMCloud executables.
obj_path = '%s/%s' % (top_path, obj_dir)
# Ports (for TCP, etc.) to use for each kind of server.
coordinator_port = 12246
server_port = 12247
second_backup_port = 12248
# Command-line argument specifying where the first backup on each
# server should storage the segment replicas.
default_disk1 = '-f /dev/sda2'
# Command-line argument specifying where the second backup should
# store its segment replicas.
default_disk2 = '-f /dev/sdb2'
# Try to include local overrides.
try:
from localconfig import *
except:
pass
|
[] |
[] |
[
"LD_LIBRARY_PATH"
] |
[]
|
["LD_LIBRARY_PATH"]
|
python
| 1 | 0 | |
moto/awslambda/models.py
|
from __future__ import unicode_literals
import base64
import time
from collections import defaultdict
import copy
import datetime
from gzip import GzipFile
import docker
import docker.errors
import hashlib
import io
import logging
import os
import json
import re
import zipfile
import uuid
import tarfile
import calendar
import threading
import weakref
import requests.exceptions
from boto3 import Session
from moto.awslambda.policy import Policy
from moto.core import BaseBackend, CloudFormationModel
from moto.core.exceptions import RESTError
from moto.iam.models import iam_backend
from moto.iam.exceptions import IAMNotFoundException
from moto.core.utils import unix_time_millis
from moto.s3.models import s3_backend
from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .exceptions import (
CrossAccountNotAllowed,
InvalidRoleFormat,
InvalidParameterValueException,
)
from .utils import (
make_function_arn,
make_function_ver_arn,
make_layer_arn,
make_layer_ver_arn,
split_layer_arn,
)
from moto.sqs import sqs_backends
from moto.dynamodb2 import dynamodb_backends2
from moto.dynamodbstreams import dynamodbstreams_backends
from moto.core import ACCOUNT_ID
from moto.utilities.docker_utilities import DockerModel, parse_image_ref
logger = logging.getLogger(__name__)
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
docker_3 = docker.__version__[0] >= "3"
def zip2tar(zip_bytes):
with TemporaryDirectory() as td:
tarname = os.path.join(td, "data.tar")
timeshift = int(
(datetime.datetime.now() - datetime.datetime.utcnow()).total_seconds()
)
with zipfile.ZipFile(io.BytesIO(zip_bytes), "r") as zipf, tarfile.TarFile(
tarname, "w"
) as tarf:
for zipinfo in zipf.infolist():
if zipinfo.filename[-1] == "/": # is_dir() is py3.6+
continue
tarinfo = tarfile.TarInfo(name=zipinfo.filename)
tarinfo.size = zipinfo.file_size
tarinfo.mtime = calendar.timegm(zipinfo.date_time) - timeshift
infile = zipf.open(zipinfo.filename)
tarf.addfile(tarinfo, infile)
with open(tarname, "rb") as f:
tar_data = f.read()
return tar_data
class _VolumeRefCount:
__slots__ = "refcount", "volume"
def __init__(self, refcount, volume):
self.refcount = refcount
self.volume = volume
class _DockerDataVolumeContext:
_data_vol_map = defaultdict(
lambda: _VolumeRefCount(0, None)
) # {sha256: _VolumeRefCount}
_lock = threading.Lock()
def __init__(self, lambda_func):
self._lambda_func = lambda_func
self._vol_ref = None
@property
def name(self):
return self._vol_ref.volume.name
def __enter__(self):
# See if volume is already known
print("Entering Docker Data Volume")
with self.__class__._lock:
print("in Lock")
print("Geting ref")
self._vol_ref = self.__class__._data_vol_map[self._lambda_func.code_sha_256]
print("Increasing Count")
self._vol_ref.refcount += 1
if self._vol_ref.refcount > 1:
return self
print("Checking if the volume already exists")
# See if the volume already exists
for vol in self._lambda_func.docker_client.volumes.list():
if vol.name == self._lambda_func.code_sha_256:
self._vol_ref.volume = vol
return self
print("Doesn't exist!")
# It doesn't exist so we need to create it
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(
self._lambda_func.code_sha_256
)
print("Creating volume")
if docker_3:
volumes = {self.name: {"bind": "/tmp/data", "mode": "rw"}}
else:
volumes = {self.name: "/tmp/data"}
self._lambda_func.docker_client.images.pull(
":".join(parse_image_ref("alpine"))
)
print("Running container with volumes:")
print(volumes)
container = self._lambda_func.docker_client.containers.run(
"alpine", "sleep 100", volumes=volumes, detach=True
)
try:
tar_bytes = zip2tar(self._lambda_func.code_bytes)
container.put_archive("/tmp/data", tar_bytes)
finally:
container.remove(force=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.__class__._lock:
self._vol_ref.refcount -= 1
if self._vol_ref.refcount == 0:
try:
self._vol_ref.volume.remove()
except docker.errors.APIError as e:
if e.status_code != 409:
raise
raise # multiple processes trying to use same volume?
def _zipfile_content(zipfile):
# more hackery to handle unicode/bytes/str in python3 and python2 -
# argh!
try:
to_unzip_code = base64.b64decode(bytes(zipfile, "utf-8"))
except Exception:
to_unzip_code = base64.b64decode(zipfile)
return to_unzip_code, len(to_unzip_code), hashlib.sha256(to_unzip_code).hexdigest()
def _validate_s3_bucket_and_key(data):
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_object(data["S3Bucket"], data["S3Key"])
except MissingBucket:
if do_validate_s3():
raise InvalidParameterValueException(
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist"
)
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
)
return key
class Permission(CloudFormationModel):
def __init__(self, region):
self.region = region
@staticmethod
def cloudformation_name_type():
return "Permission"
@staticmethod
def cloudformation_type():
return "AWS::Lambda::Permission"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
backend = lambda_backends[region_name]
fn = backend.get_function(properties["FunctionName"])
fn.policy.add_statement(raw=json.dumps(properties))
return Permission(region=region_name)
class LayerVersion(CloudFormationModel):
def __init__(self, spec, region):
# required
self.region = region
self.name = spec["LayerName"]
self.content = spec["Content"]
# optional
self.description = spec.get("Description", "")
self.compatible_runtimes = spec.get("CompatibleRuntimes", [])
self.license_info = spec.get("LicenseInfo", "")
# auto-generated
self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
self.version = None
self._attached = False
self._layer = None
if "ZipFile" in self.content:
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
self.content["ZipFile"]
)
else:
key = _validate_s3_bucket_and_key(self.content)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
@property
def arn(self):
if self.version:
return make_layer_ver_arn(self.region, ACCOUNT_ID, self.name, self.version)
raise ValueError("Layer version is not set")
def attach(self, layer, version):
self._attached = True
self._layer = layer
self.version = version
def get_layer_version(self):
return {
"Version": self.version,
"LayerVersionArn": self.arn,
"CreatedDate": self.created_date,
"CompatibleRuntimes": self.compatible_runtimes,
"Description": self.description,
"LicenseInfo": self.license_info,
}
@staticmethod
def cloudformation_name_type():
return "LayerVersion"
@staticmethod
def cloudformation_type():
return "AWS::Lambda::LayerVersion"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
optional_properties = (
"Description",
"CompatibleRuntimes",
"LicenseInfo",
)
# required
spec = {
"Content": properties["Content"],
"LayerName": resource_name,
}
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
backend = lambda_backends[region_name]
layer_version = backend.publish_layer_version(spec)
return layer_version
class Layer(object):
def __init__(self, name, region):
self.region = region
self.name = name
self.layer_arn = make_layer_arn(region, ACCOUNT_ID, self.name)
self._latest_version = 0
self.layer_versions = {}
def attach_version(self, layer_version):
self._latest_version += 1
layer_version.attach(self, self._latest_version)
self.layer_versions[str(self._latest_version)] = layer_version
def to_dict(self):
return {
"LayerName": self.name,
"LayerArn": self.layer_arn,
"LatestMatchingVersion": self.layer_versions[
str(self._latest_version)
].get_layer_version(),
}
class LambdaFunction(CloudFormationModel, DockerModel):
def __init__(self, spec, region, validate_s3=True, version=1):
DockerModel.__init__(self)
# required
self.region = region
self.code = spec["Code"]
self.function_name = spec["FunctionName"]
self.handler = spec["Handler"]
self.role = spec["Role"]
self.run_time = spec["Runtime"]
self.logs_backend = logs_backends[self.region]
self.environment_vars = spec.get("Environment", {}).get("Variables", {})
self.policy = None
self.state = "Active"
self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None)
# optional
self.description = spec.get("Description", "")
self.memory_size = spec.get("MemorySize", 128)
self.publish = spec.get("Publish", False) # this is ignored currently
self.timeout = spec.get("Timeout", 3)
self.layers = self._get_layers_data(spec.get("Layers", []))
self.logs_group_name = "/aws/lambda/{}".format(self.function_name)
# this isn't finished yet. it needs to find out the VpcId value
self._vpc_config = spec.get(
"VpcConfig", {"SubnetIds": [], "SecurityGroupIds": []}
)
# auto-generated
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
if "ZipFile" in self.code:
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
self.code["ZipFile"]
)
# TODO: we should be putting this in a lambda bucket
self.code["UUID"] = str(uuid.uuid4())
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
else:
key = _validate_s3_bucket_and_key(self.code)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
else:
self.code_bytes = ""
self.code_size = 0
self.code_sha_256 = ""
self.function_arn = make_function_arn(
self.region, ACCOUNT_ID, self.function_name
)
self.tags = dict()
def set_version(self, version):
self.function_arn = make_function_ver_arn(
self.region, ACCOUNT_ID, self.function_name, version
)
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
@property
def vpc_config(self):
config = self._vpc_config.copy()
if config["SecurityGroupIds"]:
config.update({"VpcId": "vpc-123abc"})
return config
@property
def physical_resource_id(self):
return self.function_name
def __repr__(self):
return json.dumps(self.get_configuration())
def _get_layers_data(self, layers_versions_arns):
backend = lambda_backends[self.region]
layer_versions = [
backend.layers_versions_by_arn(layer_version)
for layer_version in layers_versions_arns
]
if not all(layer_versions):
raise ValueError(
"InvalidParameterValueException",
"One or more LayerVersion does not exist {0}".format(
layers_versions_arns
),
)
return [{"Arn": lv.arn, "CodeSize": lv.code_size} for lv in layer_versions]
def get_configuration(self):
config = {
"CodeSha256": self.code_sha_256,
"CodeSize": self.code_size,
"Description": self.description,
"FunctionArn": self.function_arn,
"FunctionName": self.function_name,
"Handler": self.handler,
"LastModified": self.last_modified,
"MemorySize": self.memory_size,
"Role": self.role,
"Runtime": self.run_time,
"State": self.state,
"Timeout": self.timeout,
"Version": str(self.version),
"VpcConfig": self.vpc_config,
"Layers": self.layers,
}
if self.environment_vars:
config["Environment"] = {"Variables": self.environment_vars}
return config
def get_code(self):
code = {
"Code": {
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(
self.region, self.code["S3Key"]
),
"RepositoryType": "S3",
},
"Configuration": self.get_configuration(),
}
if self.reserved_concurrency:
code.update(
{
"Concurrency": {
"ReservedConcurrentExecutions": self.reserved_concurrency
}
}
)
return code
def update_configuration(self, config_updates):
for key, value in config_updates.items():
if key == "Description":
self.description = value
elif key == "Handler":
self.handler = value
elif key == "MemorySize":
self.memory_size = value
elif key == "Role":
self.role = value
elif key == "Runtime":
self.run_time = value
elif key == "Timeout":
self.timeout = value
elif key == "VpcConfig":
self._vpc_config = value
elif key == "Environment":
self.environment_vars = value["Variables"]
elif key == "Layers":
self.layers = self._get_layers_data(value)
return self.get_configuration()
def update_function_code(self, updated_spec):
if "DryRun" in updated_spec and updated_spec["DryRun"]:
return self.get_configuration()
if "ZipFile" in updated_spec:
self.code["ZipFile"] = updated_spec["ZipFile"]
# using the "hackery" from __init__ because it seems to work
# TODOs and FIXMEs included, because they'll need to be fixed
# in both places now
try:
to_unzip_code = base64.b64decode(
bytes(updated_spec["ZipFile"], "utf-8")
)
except Exception:
to_unzip_code = base64.b64decode(updated_spec["ZipFile"])
self.code_bytes = to_unzip_code
self.code_size = len(to_unzip_code)
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
# TODO: we should be putting this in a lambda bucket
self.code["UUID"] = str(uuid.uuid4())
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
elif "S3Bucket" in updated_spec and "S3Key" in updated_spec:
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_object(
updated_spec["S3Bucket"], updated_spec["S3Key"]
)
except MissingBucket:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist",
)
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
)
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.code["S3Bucket"] = updated_spec["S3Bucket"]
self.code["S3Key"] = updated_spec["S3Key"]
return self.get_configuration()
@staticmethod
def convert(s):
try:
return str(s, encoding="utf-8")
except Exception:
return s
def _invoke_lambda(self, code, event=None, context=None):
# Create the LogGroup if necessary, to write the result to
self.logs_backend.ensure_log_group(self.logs_group_name, [])
print("Invoking Lambda")
# TODO: context not yet implemented
if event is None:
event = dict()
if context is None:
context = {}
output = None
try:
# TODO: I believe we can keep the container running and feed events as needed
# also need to hook it up to the other services so it can make kws/s3 etc calls
# Should get invoke_id /RequestId from invocation
env_vars = {
"_HANDLER": self.handler,
"AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time),
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
"AWS_LAMBDA_FUNCTION_VERSION": self.version,
"AWS_REGION": self.region,
"AWS_ACCESS_KEY_ID": "role-account-id",
"AWS_SECRET_ACCESS_KEY": "role-secret-key",
"AWS_SESSION_TOKEN": "session-token",
}
print("Updating Env Vars")
env_vars.update(self.environment_vars)
container = exit_code = None
print("Setting log config")
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
print("With volume")
with _DockerDataVolumeContext(self) as data_vol:
print("Inside, data vol:")
print(data_vol)
try:
self.docker_client.ping() # Verify Docker is running
run_kwargs = (
dict(links={"motoserver": "motoserver"})
if settings.TEST_SERVER_MODE
else {}
)
image_ref = "lambci/lambda:{}".format(self.run_time)
self.docker_client.images.pull(":".join(parse_image_ref(image_ref)))
print("Running Container")
print("Volumes: %s" % "{}:/var/task".format(data_vol.name))
container = self.docker_client.containers.run(
image_ref,
[self.handler, json.dumps(event)],
remove=False,
mem_limit="{}m".format(self.memory_size),
volumes=["{}:/var/task".format(data_vol.name)],
environment=env_vars,
detach=True,
log_config=log_config,
**run_kwargs
)
finally:
print("In Finally")
if container:
try:
exit_code = container.wait(timeout=300)
except requests.exceptions.ReadTimeout:
exit_code = -1
container.stop()
container.kill()
else:
if docker_3:
exit_code = exit_code["StatusCode"]
print("After status code")
output = container.logs(stdout=False, stderr=True)
output += container.logs(stdout=True, stderr=False)
container.remove()
output = output.decode("utf-8")
# Send output to "logs" backend
invoke_id = uuid.uuid4().hex
log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
date=datetime.datetime.utcnow(),
version=self.version,
invoke_id=invoke_id,
)
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
log_events = [
{"timestamp": unix_time_millis(), "message": line}
for line in output.splitlines()
]
self.logs_backend.put_log_events(
self.logs_group_name, log_stream_name, log_events, None
)
if exit_code != 0:
raise Exception("lambda invoke failed output: {}".format(output))
# We only care about the response from the lambda
# Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25
resp = output.splitlines()[-1]
logs = os.linesep.join(
[line for line in self.convert(output).splitlines()[:-1]]
)
return resp, False, logs
except docker.errors.DockerException as e:
# Docker itself is probably not running - there will be no Lambda-logs to handle
return "error running docker: {}".format(e), True, ""
except BaseException as e:
logs = os.linesep.join(
[line for line in self.convert(output).splitlines()[:-1]]
)
return "error running lambda: {}".format(e), True, logs
def invoke(self, body, request_headers, response_headers):
if body:
body = json.loads(body)
# Get the invocation type:
res, errored, logs = self._invoke_lambda(code=self.code, event=body)
inv_type = request_headers.get("x-amz-invocation-type", "RequestResponse")
if inv_type == "RequestResponse":
encoded = base64.b64encode(logs.encode("utf-8"))
response_headers["x-amz-log-result"] = encoded.decode("utf-8")
result = res.encode("utf-8")
else:
result = res
if errored:
response_headers["x-amz-function-error"] = "Handled"
return result
@staticmethod
def cloudformation_name_type():
return "FunctionName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html
return "AWS::Lambda::Function"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
optional_properties = (
"Description",
"MemorySize",
"Publish",
"Timeout",
"VpcConfig",
"Environment",
"ReservedConcurrentExecutions",
)
# required
spec = {
"Code": properties["Code"],
"FunctionName": resource_name,
"Handler": properties["Handler"],
"Role": properties["Role"],
"Runtime": properties["Runtime"],
}
# NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the
# default logic
for prop in optional_properties:
if prop in properties:
spec[prop] = properties[prop]
# when ZipFile is present in CloudFormation, per the official docs,
# the code it's a plaintext code snippet up to 4096 bytes.
# this snippet converts this plaintext code to a proper base64-encoded ZIP file.
if "ZipFile" in properties["Code"]:
spec["Code"]["ZipFile"] = base64.b64encode(
cls._create_zipfile_from_plaintext_code(spec["Code"]["ZipFile"])
)
backend = lambda_backends[region_name]
fn = backend.create_function(spec)
return fn
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return make_function_arn(self.region, ACCOUNT_ID, self.function_name)
raise UnformattedGetAttTemplateException()
@classmethod
def update_from_cloudformation_json(
cls, new_resource_name, cloudformation_json, original_resource, region_name
):
updated_props = cloudformation_json["Properties"]
original_resource.update_configuration(updated_props)
original_resource.update_function_code(updated_props["Code"])
return original_resource
@staticmethod
def _create_zipfile_from_plaintext_code(code):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.zip", code)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def delete(self, region):
lambda_backends[region].delete_function(self.function_name)
class EventSourceMapping(CloudFormationModel):
def __init__(self, spec):
# required
self.function_name = spec["FunctionName"]
self.event_source_arn = spec["EventSourceArn"]
# optional
self.batch_size = spec.get("BatchSize")
self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON")
self.enabled = spec.get("Enabled", True)
self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None)
self.function_arn = spec["FunctionArn"]
self.uuid = str(uuid.uuid4())
self.last_modified = time.mktime(datetime.datetime.utcnow().timetuple())
def _get_service_source_from_arn(self, event_source_arn):
return event_source_arn.split(":")[2].lower()
def _validate_event_source(self, event_source_arn):
valid_services = ("dynamodb", "kinesis", "sqs")
service = self._get_service_source_from_arn(event_source_arn)
return True if service in valid_services else False
@property
def event_source_arn(self):
return self._event_source_arn
@event_source_arn.setter
def event_source_arn(self, event_source_arn):
if not self._validate_event_source(event_source_arn):
raise ValueError(
"InvalidParameterValueException", "Unsupported event source type"
)
self._event_source_arn = event_source_arn
@property
def batch_size(self):
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size):
batch_size_service_map = {
"kinesis": (100, 10000),
"dynamodb": (100, 1000),
"sqs": (10, 10),
}
source_type = self._get_service_source_from_arn(self.event_source_arn)
batch_size_for_source = batch_size_service_map[source_type]
if batch_size is None:
self._batch_size = batch_size_for_source[0]
elif batch_size > batch_size_for_source[1]:
error_message = "BatchSize {} exceeds the max of {}".format(
batch_size, batch_size_for_source[1]
)
raise ValueError("InvalidParameterValueException", error_message)
else:
self._batch_size = int(batch_size)
def get_configuration(self):
return {
"UUID": self.uuid,
"BatchSize": self.batch_size,
"EventSourceArn": self.event_source_arn,
"FunctionArn": self.function_arn,
"LastModified": self.last_modified,
"LastProcessingResult": "",
"State": "Enabled" if self.enabled else "Disabled",
"StateTransitionReason": "User initiated",
}
def delete(self, region_name):
lambda_backend = lambda_backends[region_name]
lambda_backend.delete_event_source_mapping(self.uuid)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
return "AWS::Lambda::EventSourceMapping"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
lambda_backend = lambda_backends[region_name]
return lambda_backend.create_event_source_mapping(properties)
@classmethod
def update_from_cloudformation_json(
cls, new_resource_name, cloudformation_json, original_resource, region_name
):
properties = cloudformation_json["Properties"]
event_source_uuid = original_resource.uuid
lambda_backend = lambda_backends[region_name]
return lambda_backend.update_event_source_mapping(event_source_uuid, properties)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
lambda_backend = lambda_backends[region_name]
esms = lambda_backend.list_event_source_mappings(
event_source_arn=properties["EventSourceArn"],
function_name=properties["FunctionName"],
)
for esm in esms:
if esm.uuid == resource_name:
esm.delete(region_name)
@property
def physical_resource_id(self):
return self.uuid
class LambdaVersion(CloudFormationModel):
def __init__(self, spec):
self.version = spec["Version"]
def __repr__(self):
return str(self.logical_resource_id)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-version.html
return "AWS::Lambda::Version"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
function_name = properties["FunctionName"]
func = lambda_backends[region_name].publish_function(function_name)
spec = {"Version": func.version}
return LambdaVersion(spec)
class LambdaStorage(object):
def __init__(self):
# Format 'func_name' {'alias': {}, 'versions': []}
self._functions = {}
self._arns = weakref.WeakValueDictionary()
def _get_latest(self, name):
return self._functions[name]["latest"]
def _get_version(self, name, version):
index = version - 1
try:
return self._functions[name]["versions"][index]
except IndexError:
return None
def _get_alias(self, name, alias):
return self._functions[name]["alias"].get(alias, None)
def get_function_by_name(self, name, qualifier=None):
if name not in self._functions:
return None
if qualifier is None:
return self._get_latest(name)
try:
return self._get_version(name, int(qualifier))
except ValueError:
return self._functions[name]["latest"]
def list_versions_by_function(self, name):
if name not in self._functions:
return None
latest = copy.copy(self._functions[name]["latest"])
latest.function_arn += ":$LATEST"
return [latest] + self._functions[name]["versions"]
def get_arn(self, arn):
return self._arns.get(arn, None)
def get_function_by_name_or_arn(self, input, qualifier=None):
return self.get_function_by_name(input, qualifier) or self.get_arn(input)
def put_function(self, fn):
"""
:param fn: Function
:type fn: LambdaFunction
"""
valid_role = re.match(InvalidRoleFormat.pattern, fn.role)
if valid_role:
account = valid_role.group(2)
if account != ACCOUNT_ID:
raise CrossAccountNotAllowed()
try:
iam_backend.get_role_by_arn(fn.role)
except IAMNotFoundException:
raise InvalidParameterValueException(
"The role defined for the function cannot be assumed by Lambda."
)
else:
raise InvalidRoleFormat(fn.role)
if fn.function_name in self._functions:
self._functions[fn.function_name]["latest"] = fn
else:
self._functions[fn.function_name] = {
"latest": fn,
"versions": [],
"alias": weakref.WeakValueDictionary(),
}
# instantiate a new policy for this version of the lambda
fn.policy = Policy(fn)
self._arns[fn.function_arn] = fn
def publish_function(self, name):
if name not in self._functions:
return None
if not self._functions[name]["latest"]:
return None
new_version = len(self._functions[name]["versions"]) + 1
fn = copy.copy(self._functions[name]["latest"])
fn.set_version(new_version)
self._functions[name]["versions"].append(fn)
self._arns[fn.function_arn] = fn
return fn
def del_function(self, name_or_arn, qualifier=None):
function = self.get_function_by_name_or_arn(name_or_arn)
if function:
name = function.function_name
if not qualifier:
# Something is still reffing this so delete all arns
latest = self._functions[name]["latest"].function_arn
del self._arns[latest]
for fn in self._functions[name]["versions"]:
del self._arns[fn.function_arn]
del self._functions[name]
return True
elif qualifier == "$LATEST":
self._functions[name]["latest"] = None
# If theres no functions left
if (
not self._functions[name]["versions"]
and not self._functions[name]["latest"]
):
del self._functions[name]
return True
else:
fn = self.get_function_by_name(name, qualifier)
if fn:
self._functions[name]["versions"].remove(fn)
# If theres no functions left
if (
not self._functions[name]["versions"]
and not self._functions[name]["latest"]
):
del self._functions[name]
return True
return False
def all(self):
result = []
for function_group in self._functions.values():
if function_group["latest"] is not None:
result.append(function_group["latest"])
result.extend(function_group["versions"])
return result
class LayerStorage(object):
def __init__(self):
self._layers = {}
self._arns = weakref.WeakValueDictionary()
def put_layer_version(self, layer_version):
"""
:param layer_version: LayerVersion
"""
if layer_version.name not in self._layers:
self._layers[layer_version.name] = Layer(
layer_version.name, layer_version.region
)
self._layers[layer_version.name].attach_version(layer_version)
def list_layers(self):
return [layer.to_dict() for layer in self._layers.values()]
def get_layer_versions(self, layer_name):
if layer_name in self._layers:
return list(iter(self._layers[layer_name].layer_versions.values()))
return []
def get_layer_version_by_arn(self, layer_version_arn):
split_arn = split_layer_arn(layer_version_arn)
if split_arn.layer_name in self._layers:
return self._layers[split_arn.layer_name].layer_versions.get(
split_arn.version, None
)
return None
class LambdaBackend(BaseBackend):
def __init__(self, region_name):
self._lambdas = LambdaStorage()
self._event_source_mappings = {}
self._layers = LayerStorage()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_function(self, spec):
function_name = spec.get("FunctionName", None)
if function_name is None:
raise RESTError("InvalidParameterValueException", "Missing FunctionName")
fn = LambdaFunction(spec, self.region_name, version="$LATEST")
self._lambdas.put_function(fn)
if spec.get("Publish"):
ver = self.publish_function(function_name)
fn.version = ver.version
return fn
def create_event_source_mapping(self, spec):
required = ["EventSourceArn", "FunctionName"]
for param in required:
if not spec.get(param):
raise RESTError(
"InvalidParameterValueException", "Missing {}".format(param)
)
# Validate function name
func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", ""))
if not func:
raise RESTError("ResourceNotFoundException", "Invalid FunctionName")
# Validate queue
for queue in sqs_backends[self.region_name].queues.values():
if queue.queue_arn == spec["EventSourceArn"]:
if queue.lambda_event_source_mappings.get("func.function_arn"):
# TODO: Correct exception?
raise RESTError(
"ResourceConflictException", "The resource already exists."
)
if queue.fifo_queue:
raise RESTError(
"InvalidParameterValueException",
"{} is FIFO".format(queue.queue_arn),
)
else:
spec.update({"FunctionArn": func.function_arn})
esm = EventSourceMapping(spec)
self._event_source_mappings[esm.uuid] = esm
# Set backend function on queue
queue.lambda_event_source_mappings[esm.function_arn] = esm
return esm
for stream in json.loads(
dynamodbstreams_backends[self.region_name].list_streams()
)["Streams"]:
if stream["StreamArn"] == spec["EventSourceArn"]:
spec.update({"FunctionArn": func.function_arn})
esm = EventSourceMapping(spec)
self._event_source_mappings[esm.uuid] = esm
table_name = stream["TableName"]
table = dynamodb_backends2[self.region_name].get_table(table_name)
table.lambda_event_source_mappings[esm.function_arn] = esm
return esm
raise RESTError("ResourceNotFoundException", "Invalid EventSourceArn")
def publish_layer_version(self, spec):
required = ["LayerName", "Content"]
for param in required:
if not spec.get(param):
raise RESTError(
"InvalidParameterValueException", "Missing {}".format(param)
)
layer_version = LayerVersion(spec, self.region_name)
self._layers.put_layer_version(layer_version)
return layer_version
def list_layers(self):
return self._layers.list_layers()
def get_layer_versions(self, layer_name):
return self._layers.get_layer_versions(layer_name)
def layers_versions_by_arn(self, layer_version_arn):
return self._layers.get_layer_version_by_arn(layer_version_arn)
def publish_function(self, function_name):
return self._lambdas.publish_function(function_name)
def get_function(self, function_name_or_arn, qualifier=None):
return self._lambdas.get_function_by_name_or_arn(
function_name_or_arn, qualifier
)
def list_versions_by_function(self, function_name):
return self._lambdas.list_versions_by_function(function_name)
def get_event_source_mapping(self, uuid):
return self._event_source_mappings.get(uuid)
def delete_event_source_mapping(self, uuid):
return self._event_source_mappings.pop(uuid)
def update_event_source_mapping(self, uuid, spec):
esm = self.get_event_source_mapping(uuid)
if not esm:
return False
for key, value in spec.items():
if key == "FunctionName":
func = self._lambdas.get_function_by_name_or_arn(spec[key])
esm.function_arn = func.function_arn
elif key == "BatchSize":
esm.batch_size = spec[key]
elif key == "Enabled":
esm.enabled = spec[key]
esm.last_modified = time.mktime(datetime.datetime.utcnow().timetuple())
return esm
def list_event_source_mappings(self, event_source_arn, function_name):
esms = list(self._event_source_mappings.values())
if event_source_arn:
esms = list(filter(lambda x: x.event_source_arn == event_source_arn, esms))
if function_name:
esms = list(filter(lambda x: x.function_name == function_name, esms))
return esms
def get_function_by_arn(self, function_arn):
return self._lambdas.get_arn(function_arn)
def delete_function(self, function_name, qualifier=None):
return self._lambdas.del_function(function_name, qualifier)
def list_functions(self):
return self._lambdas.all()
def send_sqs_batch(self, function_arn, messages, queue_arn):
success = True
for message in messages:
func = self.get_function_by_arn(function_arn)
result = self._send_sqs_message(func, message, queue_arn)
if not result:
success = False
return success
def _send_sqs_message(self, func, message, queue_arn):
event = {
"Records": [
{
"messageId": message.id,
"receiptHandle": message.receipt_handle,
"body": message.body,
"attributes": {
"ApproximateReceiveCount": "1",
"SentTimestamp": "1545082649183",
"SenderId": "AIDAIENQZJOLO23YVJ4VO",
"ApproximateFirstReceiveTimestamp": "1545082649185",
},
"messageAttributes": {},
"md5OfBody": "098f6bcd4621d373cade4e832627b4f6",
"eventSource": "aws:sqs",
"eventSourceARN": queue_arn,
"awsRegion": self.region_name,
}
]
}
request_headers = {}
response_headers = {}
func.invoke(json.dumps(event), request_headers, response_headers)
return "x-amz-function-error" not in response_headers
def send_sns_message(self, function_name, message, subject=None, qualifier=None):
event = {
"Records": [
{
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:EXAMPLE",
"EventSource": "aws:sns",
"Sns": {
"SignatureVersion": "1",
"Timestamp": "1970-01-01T00:00:00.000Z",
"Signature": "EXAMPLE",
"SigningCertUrl": "EXAMPLE",
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
"Message": message,
"MessageAttributes": {
"Test": {"Type": "String", "Value": "TestString"},
"TestBinary": {"Type": "Binary", "Value": "TestBinary"},
},
"Type": "Notification",
"UnsubscribeUrl": "EXAMPLE",
"TopicArn": "arn:aws:sns:EXAMPLE",
"Subject": subject or "TestInvoke",
},
}
]
}
func = self._lambdas.get_function_by_name_or_arn(function_name, qualifier)
func.invoke(json.dumps(event), {}, {})
def send_dynamodb_items(self, function_arn, items, source):
event = {
"Records": [
{
"eventID": item.to_json()["eventID"],
"eventName": "INSERT",
"eventVersion": item.to_json()["eventVersion"],
"eventSource": item.to_json()["eventSource"],
"awsRegion": self.region_name,
"dynamodb": item.to_json()["dynamodb"],
"eventSourceARN": source,
}
for item in items
]
}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def send_log_event(
self, function_arn, filter_name, log_group_name, log_stream_name, log_events
):
data = {
"messageType": "DATA_MESSAGE",
"owner": ACCOUNT_ID,
"logGroup": log_group_name,
"logStream": log_stream_name,
"subscriptionFilters": [filter_name],
"logEvents": log_events,
}
output = io.BytesIO()
with GzipFile(fileobj=output, mode="w") as f:
f.write(json.dumps(data, separators=(",", ":")).encode("utf-8"))
payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8")
event = {"awslogs": {"data": payload_gz_encoded}}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource):
return self.get_function_by_arn(resource).tags
def tag_resource(self, resource, tags):
fn = self.get_function_by_arn(resource)
if not fn:
return False
fn.tags.update(tags)
return True
def untag_resource(self, resource, tagKeys):
fn = self.get_function_by_arn(resource)
if fn:
for key in tagKeys:
try:
del fn.tags[key]
except KeyError:
pass
# Don't care
return True
return False
def add_permission(self, function_name, raw):
fn = self.get_function(function_name)
fn.policy.add_statement(raw)
def remove_permission(self, function_name, sid, revision=""):
fn = self.get_function(function_name)
fn.policy.del_statement(sid, revision)
def get_policy(self, function_name):
fn = self.get_function(function_name)
return fn.policy.get_policy()
def get_policy_wire_format(self, function_name):
fn = self.get_function(function_name)
return fn.policy.wire_format()
def update_function_code(self, function_name, qualifier, body):
fn = self.get_function(function_name, qualifier)
if fn:
if body.get("Publish", False):
fn = self.publish_function(function_name)
config = fn.update_function_code(body)
return config
else:
return None
def update_function_configuration(self, function_name, qualifier, body):
fn = self.get_function(function_name, qualifier)
return fn.update_configuration(body) if fn else None
def invoke(self, function_name, qualifier, body, headers, response_headers):
fn = self.get_function(function_name, qualifier)
if fn:
payload = fn.invoke(body, headers, response_headers)
response_headers["Content-Length"] = str(len(payload))
return payload
else:
return None
def put_function_concurrency(self, function_name, reserved_concurrency):
fn = self.get_function(function_name)
fn.reserved_concurrency = reserved_concurrency
return fn.reserved_concurrency
def delete_function_concurrency(self, function_name):
fn = self.get_function(function_name)
fn.reserved_concurrency = None
return fn.reserved_concurrency
def get_function_concurrency(self, function_name):
fn = self.get_function(function_name)
return fn.reserved_concurrency
def do_validate_s3():
return os.environ.get("VALIDATE_LAMBDA_S3", "") in ["", "1", "true"]
lambda_backends = {}
for region in Session().get_available_regions("lambda"):
lambda_backends[region] = LambdaBackend(region)
for region in Session().get_available_regions("lambda", partition_name="aws-us-gov"):
lambda_backends[region] = LambdaBackend(region)
for region in Session().get_available_regions("lambda", partition_name="aws-cn"):
lambda_backends[region] = LambdaBackend(region)
|
[] |
[] |
[
"VALIDATE_LAMBDA_S3"
] |
[]
|
["VALIDATE_LAMBDA_S3"]
|
python
| 1 | 0 | |
setup/experiment.py
|
import redis
import os
import logging
import time
import datetime
import json
import uuid
#logger = logging.getLogger('google_experiment')
#logger.setLevel(logging.INFO)
TOPIC_PRODUCE = ('TOPIC_PRODUCE' in os.environ and os.environ['TOPIC_PRODUCE']) or "population-objects"
WORKER_HEARTBEAT_INTERVAL = 10
r = redis.StrictRedis(host=os.environ['REDIS_HOST'], port=6379, db=0)
redis_ready = False
while not redis_ready:
try:
redis_ready = r.ping()
except:
print("waiting for redis")
time.sleep(3)
print("setup:redis alive")
def new_populations(env,conf ,number_of_pops, n_individuals, dim, lb, ub ):
import random
message_list = []
for _ in range(number_of_pops):
new_env = dict(env)
new_env["population"] = [{"chromosome": [random.uniform(lb,ub) for _ in range(dim)], "id": None, "fitness": {"DefaultContext": 0.0}} for _ in range(n_individuals)]
new_env["message_id"] = str (uuid.uuid4())
if random.random() > env["experiment"]["ga_worker_ratio"]:
new_env["algorithm"] = "PSO"
else:
new_env["algorithm"] = "GA"
new_env['params']['GA']['crossover']['CXPB'] = random.uniform(conf['CXPB_RND'][0],conf['CXPB_RND'][1])
new_env['params']['GA']['mutation']['MUTPB'] = random.uniform(conf['MUTPB_RND'][0],conf['MUTPB_RND'][1])
message_list.append(new_env)
return message_list
def experiment(conf):
print("conf", conf)
for function in conf['FUNCTIONS']:
for dim in conf['DIMENSIONS'] :
for instance in conf['INSTANCES']:
env = {"problem":
{"name": "BBOB",
"instance": instance,
"error": 1e-8,
"function": function,
"dim": dim,
"search_space": [-5, 5],
"problem_id": "%s-%s-%s-%s" % ( conf['EXPERIMENT_ID'] , function, instance, dim ),
"max_iterations": conf["DIM_CONFIGURATION"][str(dim)]['MAX_ITERATIONS'] * conf["DIM_CONFIGURATION"][str(dim)]['MESSAGES_GA'] },
"population": [],
"population_size": conf["DIM_CONFIGURATION"][str(dim)]['POP_SIZE'],
"id": "1",
"algorithm": None,
"params": { "GA" :
{ "crossover": {"type": "cxTwoPoint", "CXPB_RND": conf["CXPB_RND"] },
"mutation": {"MUTPB_RND":conf["MUTPB_RND"], "indpb": 0.05, "sigma": 0.5, "type": "mutGaussian", "mu": 0},
"selection": {"type": "tools.selTournament", "tournsize": 2},
"iterations": conf["DIM_CONFIGURATION"][str(dim)]['NGEN'],
},
"PSO":
# Acording to https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/EAMHCO/contributionsGECCO09/p2269-elabd.pdf
{ "Vmax": 5,
"wMax": 0.9,
"wMin" : 0.2,
"c1":2,
"c2":2,
"iterations": conf["DIM_CONFIGURATION"][str(dim)]['NGEN']
}
},
"experiment":
{"type": "benchmark", "experiment_id": conf['EXPERIMENT_ID'], "ga_worker_ratio":conf['GA_WORKER_RATIO'] }}
#Initialize pops
_messages = new_populations(env,conf ,conf["DIM_CONFIGURATION"][str(dim)]['MESSAGES_GA'] , conf["DIM_CONFIGURATION"][str(dim)]['POP_SIZE'],env["problem"]["dim"], env["problem"]["search_space"][0], env["problem"]["search_space"][1])
print("messages created")
print("Checking redis with ping")
while not r.ping():
print("ping",r.ping())
time.sleep(1)
for data in _messages:
json_data = json.dumps(data)
# Data must be a bytestring
message = json_data.encode('utf-8')
print("message")
print("sending to",TOPIC_PRODUCE )
result = r.rpush(TOPIC_PRODUCE, message)
print("rpush", result)
print(function,dim, instance )
print ("populations sent to workers")
print("sending problem to controller")
r.rpush("experiment_queue",json.dumps(env))
#Block Until Finsihed
print("waiting for problem to finish")
experiment_finished = r.blpop("experiment_finished", 0)
print (experiment_finished, "Done")
#Return
print ("Begin Message Loop")
def pull_conf(time_out=WORKER_HEARTBEAT_INTERVAL):
#Pop task from queue
#This is a blocking operation
#task is a tuple (queue_name, task_id)
message = r.blpop("setup_queue", time_out)
if message:
config_json = message[1]
config = json.loads(config_json)
return config
else:
return ""
if __name__ == '__main__':
while True:
config = pull_conf()
if (config):
experiment(config)
else:
pass
|
[] |
[] |
[
"REDIS_HOST",
"TOPIC_PRODUCE"
] |
[]
|
["REDIS_HOST", "TOPIC_PRODUCE"]
|
python
| 2 | 0 | |
pybind11_mkdoc/mkdoc_lib.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]
#
# Extract documentation from C++ header files to use it in Python bindings
#
import os
import sys
import platform
import re
import textwrap
from clang import cindex
from clang.cindex import CursorKind
from collections import OrderedDict
from glob import glob
from threading import Thread, Semaphore
from multiprocessing import cpu_count
from .doxygen_trans import DoxygenTranslator
RECURSE_LIST = [
CursorKind.TRANSLATION_UNIT,
CursorKind.NAMESPACE,
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.CLASS_TEMPLATE
]
PRINT_LIST = [
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.ENUM_CONSTANT_DECL,
CursorKind.CLASS_TEMPLATE,
CursorKind.FUNCTION_DECL,
CursorKind.FUNCTION_TEMPLATE,
CursorKind.CONVERSION_FUNCTION,
CursorKind.CXX_METHOD,
CursorKind.CONSTRUCTOR,
CursorKind.FIELD_DECL
]
PREFIX_BLACKLIST = [
CursorKind.TRANSLATION_UNIT
]
CPP_OPERATORS = {
'<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',
'+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':
'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',
'>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':
'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',
'&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':
'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'
}
CPP_OPERATORS = OrderedDict(
sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))
job_count = cpu_count()
job_semaphore = Semaphore(job_count)
errors_detected = False
class NoFilenamesError(ValueError):
pass
def d(s):
return s if isinstance(s, str) else s.decode('utf8')
def sanitize_name(name):
name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
for k, v in CPP_OPERATORS.items():
name = name.replace('operator%s' % k, 'operator_%s' % v)
name = re.sub('<.*>', '', name)
name = ''.join([ch if ch.isalnum() else '_' for ch in name])
name = re.sub('_$', '', name)
return '__doc_' + name
def process_comment(comment):
result = ''
# Remove C++ comment syntax
leading_spaces = float('inf')
for s in comment.expandtabs(tabsize=4).splitlines():
s = s.strip()
if s.startswith('/*'):
s = s[2:].lstrip('*')
elif s.endswith('*/'):
s = s[:-2].rstrip('*')
elif s.startswith('///'):
s = s[3:]
if s.startswith('*'):
s = s[1:]
if len(s) > 0:
leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
result += s + '\n'
# can TextWrap.dedent help here?
if leading_spaces != float('inf'):
result2 = ""
for s in result.splitlines():
result2 += s[leading_spaces:] + '\n'
result = result2
translation = DoxygenTranslator(return_includes_type_tag=True)(result)
# TODO: Re-flow text without messing up the new format
return translation
def extract(filename, node, prefix, output):
if not (node.location.file is None or
os.path.samefile(d(node.location.file.name), filename)):
return 0
if node.kind in RECURSE_LIST:
sub_prefix = prefix
if node.kind not in PREFIX_BLACKLIST:
if len(sub_prefix) > 0:
sub_prefix += '_'
sub_prefix += d(node.spelling)
for i in node.get_children():
extract(filename, i, sub_prefix, output)
if node.kind in PRINT_LIST:
comment = d(node.raw_comment) if node.raw_comment is not None else ''
comment = process_comment(comment)
sub_prefix = prefix
if len(sub_prefix) > 0:
sub_prefix += '_'
if len(node.spelling) > 0:
name = sanitize_name(sub_prefix + d(node.spelling))
output.append((name, filename, comment))
class ExtractionThread(Thread):
def __init__(self, filename, parameters, output):
Thread.__init__(self)
self.filename = filename
self.parameters = parameters
self.output = output
job_semaphore.acquire()
def run(self):
global errors_detected
print('Processing "%s" ..' % self.filename, file=sys.stderr)
try:
index = cindex.Index(
cindex.conf.lib.clang_createIndex(False, True))
tu = index.parse(self.filename, self.parameters)
extract(self.filename, tu.cursor, '', self.output)
except BaseException:
errors_detected = True
raise
finally:
job_semaphore.release()
def read_args(args):
parameters = []
filenames = []
if "-x" not in args:
parameters.extend(['-x', 'c++'])
if not any(it.startswith("-std=") for it in args):
parameters.append('-std=c++11')
parameters.append('-Wno-pragma-once-outside-header')
if platform.system() == 'Darwin':
dev_path = '/Applications/Xcode.app/Contents/Developer/'
lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'
sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'
libclang = lib_dir + 'libclang.dylib'
if os.path.exists(libclang):
cindex.Config.set_library_path(os.path.dirname(libclang))
if os.path.exists(sdk_dir):
sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])
parameters.append('-isysroot')
parameters.append(sysroot_dir)
elif platform.system() == 'Linux':
# cython.util.find_library does not find `libclang` for all clang
# versions and distributions. LLVM switched to a monolithical setup
# that includes everything under /usr/lib/llvm{version_number}/
# We therefore glob for the library and select the highest version
if 'LIBCLANG_PATH' in os.environ:
cindex.Config.set_library_file(os.environ['LIBCLANG_PATH'])
else:
library_file = sorted(glob("/usr/lib/llvm-*/lib/libclang.so.1"), reverse=True)[0]
cindex.Config.set_library_file(library_file)
# clang doesn't find its own base includes by default on Linux,
# but different distros install them in different paths.
# Try to autodetect, preferring the highest numbered version.
def folder_version(d):
return [int(ver) for ver in re.findall(r'(?<!lib)(?<!\d)\d+', d)]
llvm_dir = max((
path
for libdir in ['lib64', 'lib', 'lib32']
for path in glob('/usr/%s/llvm-*' % libdir)
if os.path.isdir(path)
), default=None, key=folder_version)
if llvm_dir:
if '-stdlib=libc++' in args:
parameters.extend(['-isystem', os.path.join(llvm_dir, 'include', 'c++', 'v1')])
clang_include_dir = max(
glob(os.path.join(llvm_dir, 'lib', 'clang', '*')
), default=None, key=folder_version)
parameters.extend(['-isystem', clang_include_dir])
parameters.extend(['-isystem', '/usr/include/%s-linux-gnu' % platform.machine(),
'-isystem', '/usr/include'])
else:
clang_include_dir = max((
path
for libdir in ['lib64', 'lib', 'lib32']
for path in glob('/usr/%s/clang/*/include' % libdir)
if os.path.isdir(path)
), default=None, key=folder_version)
if clang_include_dir:
parameters.extend(['-isystem', clang_include_dir])
for item in args:
if item.startswith('-'):
parameters.append(item)
else:
filenames.append(item)
if len(filenames) == 0:
raise NoFilenamesError("args parameter did not contain any filenames")
return parameters, filenames
def extract_all(args):
parameters, filenames = read_args(args)
output = []
for filename in filenames:
thr = ExtractionThread(filename, parameters, output)
thr.start()
print('Waiting for jobs to finish ..', file=sys.stderr)
for i in range(job_count):
job_semaphore.acquire()
return output
def write_header(comments, out_file=sys.stdout):
print('''/*
This file contains docstrings for use in the Python bindings.
Do not edit! They were automatically extracted by pybind11_mkdoc.
*/
#define __EXPAND(x) x
#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
#define __CAT1(a, b) a ## b
#define __CAT2(a, b) __CAT1(a, b)
#define __DOC1(n1) __doc_##n1
#define __DOC2(n1, n2) __doc_##n1##_##n2
#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
#if defined(__GNUG__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
''', file=out_file)
name_ctr = 1
name_prev = None
for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))):
if name == name_prev:
name_ctr += 1
name = name + "_%i" % name_ctr
else:
name_prev = name
name_ctr = 1
print('\nstatic const char *%s =%sR"doc(%s)doc";' %
(name, '\n' if '\n' in comment else ' ', comment), file=out_file)
print('''
#if defined(__GNUG__)
#pragma GCC diagnostic pop
#endif
''', file=out_file)
def mkdoc(args, output=None):
comments = extract_all(args)
if errors_detected:
return
if output:
try:
with open(output, 'w') as out_file:
write_header(comments, out_file)
except:
# In the event of an error, don't leave a partially-written
# output file.
try:
os.unlink(output)
except:
pass
raise
else:
write_header(comments)
|
[] |
[] |
[
"LIBCLANG_PATH"
] |
[]
|
["LIBCLANG_PATH"]
|
python
| 1 | 0 | |
integrationtest/vm/multihosts/ha/test_2nfs_vm_ha_force_stop_host_vm_status.py
|
'''
New Integration Test for host where KVM VM ha located force stop and start again,
check vm running on other host.
This test is specific for 2nfs case
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.ha_operations as ha_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import apibinding.inventory as inventory
import time
import os
vm = None
host_uuid = None
host_ip = None
max_attempts = None
storagechecker_timeout = None
test_stub = test_lib.lib_get_test_stub()
test_host = None
vm_name = "nfs_vm_ha"
def test():
global vm
global host_uuid
global test_host
global host_ip
global max_attempts
global storagechecker_timeout
allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)
if test_lib.lib_get_ha_enable() != 'true':
test_util.test_skip("vm ha not enabled. Skip test")
test_lib.lib_skip_if_ps_num_is_not_eq_number(2)
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
test_lib.clean_up_all_vr()
mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
vm_creation_option.set_host_uuid(host_uuid)
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name(vm_name)
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
test_stub.ensure_host_has_no_vr(host_uuid)
host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
test_util.test_logger("host %s is disconnecting" %(host_ip))
ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file)
for host in host_list:
if host.ip_ == host_ip:
test_host = host
break
if not test_host:
test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip))
test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold')
vm_stop_time = None
cond = res_ops.gen_query_conditions('name', '=', vm_name)
cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond)
for i in range(0, 180):
vm_stop_time = i
if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped":
test_stub.start_host(test_host, test_lib.all_scenario_config)
test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config)
conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
host_ops.reconnect_host(kvm_host_uuid)
break
time.sleep(1)
for i in range(vm_stop_time, 180):
if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running":
break
time.sleep(1)
else:
test_util.test_fail("vm has not been changed to running as expected within 180s.")
vm.update()
if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip:
test_util.test_fail("VM is expected to start running on another host")
vm.destroy()
test_util.test_pass('Test checking 2nfs vm ha running on other host Success.')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
def env_recover():
global test_host
global host_ip
try:
test_stub.start_host(test_host, test_lib.all_scenario_config)
test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config)
conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
host_ops.reconnect_host(kvm_host_uuid)
except:
pass
|
[] |
[] |
[
"l3VlanNetworkName1",
"imageName_net"
] |
[]
|
["l3VlanNetworkName1", "imageName_net"]
|
python
| 2 | 0 | |
.buildbot/jenkins-build-project.py
|
#!/usr/bin/env python3
import datetime
import docker
import fasteners
import git
import hashlib
import json
import logging
import math
import os
import re
import requests
import sys
logging.basicConfig(
level = logging.INFO, format = '[%(asctime)s] %(levelname)s: %(message)s')
# Set parallel jobs based on both CPU count and memory size.
# Because using CPU count alone can result in out of memory
# and get Jenkins killed. For example, we may have 64 CPUs
# (128 threads) and only 32GB memory. So spawning off 128
# cc/c++ processes is going to quickly exhaust the memory.
#
# Algorithm: NPROC = min(2, # of CPUs) if memory < 8GB, otherwise
# NPROC = min(memory / 4, # of CPUs)
MEMORY_IN_GB = (os.sysconf('SC_PAGE_SIZE') *
os.sysconf('SC_PHYS_PAGES') / (1024.**3))
NPROC = str(math.ceil(min(max(2, MEMORY_IN_GB/4), os.cpu_count())))
READ_CHUNK_SIZE = 1024*1024
cpu_arch = os.getenv('CPU_ARCH')
docker_pushpull_rwlock = os.getenv('DOCKER_PUSHPULL_RWLOCK')
docker_daemon_socket = os.getenv('DOCKER_DAEMON_SOCKET')
docker_registry_host_name = os.getenv('DOCKER_REGISTRY_HOST_NAME')
docker_registry_user_name = os.getenv('DOCKER_REGISTRY_USER_NAME')
docker_registry_login_name = os.getenv('DOCKER_REGISTRY_LOGIN_NAME')
docker_registry_login_token = os.getenv('DOCKER_REGISTRY_LOGIN_TOKEN')
github_repo_name = os.getenv('GITHUB_REPO_NAME')
github_repo_name2 = os.getenv('GITHUB_REPO_NAME').replace('-', '_')
github_pr_baseref = os.getenv('GITHUB_PR_BASEREF')
github_pr_baseref2 = os.getenv('GITHUB_PR_BASEREF').lower()
github_pr_number = os.getenv('GITHUB_PR_NUMBER')
github_pr_number2 = os.getenv('GITHUB_PR_NUMBER2')
docker_static_image_name = (github_repo_name + '-llvm-static' +
('.' + github_pr_baseref2
if github_pr_baseref != 'main' else ''))
docker_shared_image_name = (github_repo_name + '-llvm-shared' +
('.' + github_pr_baseref2
if github_pr_baseref != 'main' else ''))
docker_dev_image_name = (github_repo_name + '-dev' +
('.' + github_pr_baseref2
if github_pr_baseref != 'main' else ''))
docker_usr_image_name = (github_repo_name +
('.' + github_pr_baseref2
if github_pr_baseref != 'main' else ''))
LLVM_PROJECT_IMAGE = { 'dev': docker_static_image_name,
'usr': docker_shared_image_name }
PROJECT_IMAGE = { 'dev': docker_dev_image_name,
'usr': docker_usr_image_name }
PROJECT_DOCKERFILE = { 'dev': 'docker/Dockerfile.' + github_repo_name + '-dev',
'usr': 'docker/Dockerfile.' + github_repo_name }
PROJECT_LABELS = [ github_repo_name2 + '_sha1',
github_repo_name2 + '_sha1_date',
github_repo_name2 + '_dockerfile_sha1' ]
GITHUB_REPO_NAME = github_repo_name.upper()
GITHUB_REPO_NAME2 = github_repo_name2.upper()
DOCKER_DIST_MANIFESTS = {
'v1': 'application/vnd.docker.distribution.manifest.v1+json',
'v2': 'application/vnd.docker.distribution.manifest.v2+json' }
docker_rwlock = fasteners.InterProcessReaderWriterLock(docker_pushpull_rwlock)
docker_api = docker.APIClient(base_url=docker_daemon_socket)
# Validate whether the commit sha1 date is a valid UTC ISO 8601 date
def valid_sha1_date(sha1_date):
try:
datetime.datetime.strptime(sha1_date, '%Y-%m-%dT%H:%M:%SZ')
return True
except:
return False
# Compute sha1 of a file
def compute_file_sha1(file_name):
sha1sum = hashlib.sha1()
try:
with open(file_name, 'rb') as f:
for data in iter(lambda: f.read(READ_CHUNK_SIZE), b''):
sha1sum.update(data)
return sha1sum.hexdigest()
except:
return ''
# Get project repo commit sha1 and date we are expecting to build
# from the local pull request repo.
def get_proj_repo_info(image_type, local_repo):
repo = git.Repo(local_repo)
exp_proj_repo_sha1 = repo.head.commit.hexsha
exp_proj_repo_sha1_date = datetime.datetime.utcfromtimestamp(
repo.head.commit.committed_date).isoformat() + 'Z'
exp_proj_repo_dockerfile_sha1 = compute_file_sha1(
PROJECT_DOCKERFILE[image_type])
# Labels used to filter local images
exp_proj_repo_filter = { 'label': [
github_repo_name2 + '_sha1=' + exp_proj_repo_sha1,
github_repo_name2 + '_dockerfile_sha1=' + exp_proj_repo_dockerfile_sha1 ] }
logging.info('%s expected', PROJECT_IMAGE[image_type])
logging.info('commit sha1: %s', exp_proj_repo_sha1)
logging.info('commit date: %s', exp_proj_repo_sha1_date)
logging.info('dockerfile sha1: %s', exp_proj_repo_dockerfile_sha1)
logging.info('image filter: %s', exp_proj_repo_filter)
return { github_repo_name2 + '_sha1': exp_proj_repo_sha1,
github_repo_name2 + '_sha1_date': exp_proj_repo_sha1_date,
github_repo_name2 + '_dockerfile_sha1': exp_proj_repo_dockerfile_sha1,
github_repo_name2 + '_filter': exp_proj_repo_filter }
# Make REST call to get the v1 or v2 manifest of an image from
# private docker registry
def get_image_manifest_private(host_name, user_name, image_name, image_tag,
schema_version, login_name, login_token):
resp = requests.get(
url = ('https://' + host_name + '/v2/' +
(user_name + '/' if user_name else '') +
image_name + '/manifests/' + image_tag),
headers = { 'Accept': DOCKER_DIST_MANIFESTS[schema_version] },
auth = (login_name, login_token))
resp.raise_for_status()
return resp
# Make REST call to get the access token to operate on an image in
# public docker registry
def get_access_token(user_name, image_name, action, login_name, login_token):
resp = requests.get(
url = ('https://auth.docker.io/token' +
'?service=registry.docker.io' +
'&scope=repository:' +
(user_name + '/' if user_name else '') + image_name + ':'+ action),
auth = (login_name, login_token))
resp.raise_for_status()
return resp.json()['token']
# Make REST call to get the v1 or v2 manifest of an image from
# public docker registry
def get_image_manifest_public(user_name, image_name, image_tag,
schema_version, login_name, login_token, access_token = None):
# Get access token if not passed in
if not access_token:
access_token = get_access_token(
user_name, image_name, 'pull', login_name, login_token)
# Get manifest
resp = requests.get(
url = ('https://registry-1.docker.io/v2/' +
(user_name + '/' if user_name else '') +
image_name + '/manifests/' + image_tag),
headers={ 'Accept': DOCKER_DIST_MANIFESTS[schema_version],
'Authorization': 'Bearer ' + access_token })
resp.raise_for_status()
return resp
# Get the labels of a docker image in the docker registry.
# python docker SDK does not support this so we have to make
# our own REST calls.
def get_remote_image_labels(host_name, user_name, image_name, image_tag,
image_labels, login_name, login_token):
try:
# Get manifest, only v1 schema has labels so accept v1 only
resp = (
# private docker registry
get_image_manifest_private(host_name, user_name, image_name, image_tag,
'v1', login_name, login_token)
if host_name else
# public docker registry
get_image_manifest_public(user_name, image_name, image_tag,
'v1', login_name, login_token))
image_full = ((host_name + '/' if host_name else '') +
(user_name + '/' if user_name else '') +
image_name + ':' + image_tag)
# v1Compatibility is a quoted JSON string, not a JSON object
manifest = json.loads(resp.json()['history'][0]['v1Compatibility'])
logging.info('remote image %s labels: %s',
image_full, manifest['config']['Labels'])
labels = manifest['config']['Labels']
if (labels):
labels_ok = True
for label in image_labels:
if not labels[label]:
labels_ok = False
break
if labels_ok:
return labels
raise Exception('remote image ' + image_full +
' does not exist or has invalid labels')
except:
logging.info(sys.exc_info()[1])
return ''
# Remove all the containers depending on an (dangling) image.
def remove_dependent_containers(image):
containers = docker_api.containers(
filters = { 'ancestor': image }, all = True, quiet = True)
for container in containers:
try:
container_info = docker_api.inspect_container(container['Id'])
logging.info('Removing Id:%s', container['Id'])
logging.info(' Image %s', container_info['Image'])
logging.info(' Cmd %s', str(container_info['Config']['Cmd']))
logging.info(' Labels %s', str(container_info['Config']['Labels']))
docker_api.remove_container(container['Id'], v = True, force = True)
except:
logging.info(sys.exc_info()[1])
logging.info('errors ignored while removing dependent containers')
# Build project dev and user images.
def build_private_project(image_type, exp):
host_name = docker_registry_host_name
user_name = docker_registry_user_name
login_name = docker_registry_login_name
login_token = docker_registry_login_token
base_image_name = LLVM_PROJECT_IMAGE[image_type]
base_image_repo = ((host_name + '/' if host_name else '') +
(user_name + '/' if user_name else '') +
base_image_name)
base_image_tag = github_pr_number.lower()
image_name = PROJECT_IMAGE[image_type]
image_repo = ((host_name + '/' if host_name else '') +
(user_name + '/' if user_name else '') +
image_name)
image_tag = github_pr_number.lower()
image_full = image_repo + ':' + image_tag
image_arch = image_repo + ':' + cpu_arch
image_filter = exp[github_repo_name2 + '_filter']
image_labels = PROJECT_LABELS
# First look for a local project image for the pull request that
# was built by a previous build job. We can use it if it has the
# expected project repo sha1, which means that the repo hasn't changed.
# This is useful for situations where we trigger the build by the
# "{test|publish} this please" comment phrase for various testing
# purposes without actually changing the repo itself, e.g.,
# testing different Jenkins job configurations.
#
# Note that, unlike the case with llvm-project images, we don't need
# to check the dockerfile sha1 used to built the onnx-mlir images
# because the dockerfile is part of onnx-mlir. If we changed it, then
# onnx-mlir commit sha1 would have changed.
id = docker_api.images(name = image_full, filters = image_filter,
all = False, quiet = True)
# If a local useable project image was not found, see if we can
# pull one from the registry.
if not id:
# Acquire read lock to pull the arch image. This is to serialize
# against other PR merges trying to push (writers) the arch image.
# PR builds trying to pull (readers) the arch image can go concurrently.
logging.info('acquiring read lock for pulling %s', image_arch)
docker_rwlock.acquire_read_lock()
try:
labels = get_remote_image_labels(host_name, user_name, image_name, cpu_arch,
image_labels, login_name, login_token)
# Image in registry has expected onnx-mlir commit sha1, pull and
# tag it with pull request number for our private use.
if (labels and
labels[github_repo_name2 + '_sha1'] == exp[github_repo_name2 + '_sha1']):
for line in docker_api.pull(image_repo, tag = cpu_arch,
stream = True, decode = True):
print((line['id']+': '
if 'id' in line and 'progress' not in line else '') +
(line['status'] + '\n'
if 'status' in line and 'progress' not in line else ''),
end='', flush=True)
# Tag pulled arch image with pull request number then remove
# the arch image
docker_api.tag(image_arch, image_repo, image_tag, force = True)
docker_api.remove_image(image_arch, force = True)
# For logging purpose only
id = docker_api.images(name = image_full,
all = False, quiet = True)
logging.info('image %s (%s) tagged', image_full, id[0][0:19])
return
except:
logging.info(sys.exc_info()[1])
# Remove arch image and release lock regardless of exception or not
finally:
docker_rwlock.release_read_lock()
logging.info('released read lock for pulling %s', image_arch)
# Build project locally if one of the following is true
#
# - image in registry does not exist
# - pull image failed
# - image in registry has a project repo commit sha1 different
# from what we expect
#
layer_sha256 = ''
for line in docker_api.build(
path = '.',
dockerfile = PROJECT_DOCKERFILE[image_type],
tag = image_repo + ':' + image_tag,
decode = True,
rm = True,
buildargs = {
'BASE_IMAGE': base_image_repo + ':' + base_image_tag,
'NPROC': NPROC,
GITHUB_REPO_NAME2 + '_SHA1': exp[github_repo_name2 + '_sha1'],
GITHUB_REPO_NAME2 + '_SHA1_DATE': exp[github_repo_name2 + '_sha1_date'],
GITHUB_REPO_NAME2 + '_DOCKERFILE_SHA1': exp[github_repo_name2 + '_dockerfile_sha1'],
GITHUB_REPO_NAME2 + '_PR_NUMBER': github_pr_number,
GITHUB_REPO_NAME2 + '_PR_NUMBER2': github_pr_number2
}):
if 'stream' in line:
# Keep track of the latest successful image layer
m = re.match('^\s*---> ([0-9a-f]+)$', line['stream'])
if m:
layer_sha256 = m.group(1)
print(line['stream'], end='', flush=True)
if 'error' in line:
# Tag the latest successful image layer for easier debugging
if layer_sha256:
image_layer = 'sha256:' + layer_sha256
remove_dependent_containers(image_layer)
logging.info('tagging %s -> %s', image_layer, image_full)
docker_api.tag(image_layer, image_repo, image_tag, force=True)
else:
logging.info('no successful image layer for tagging')
raise Exception(line['error'])
id = docker_api.images(name=image_full, all=False, quiet=True)
logging.info('image %s (%s) built', image_full, id[0][0:19])
# Found useable local image
else:
logging.info('image %s (%s) found', image_full, id[0][0:19])
def main():
build_private_project('dev', get_proj_repo_info('dev', '.'))
build_private_project('usr', get_proj_repo_info('usr', '.'))
if __name__ == "__main__":
main()
|
[] |
[] |
[
"GITHUB_PR_NUMBER",
"GITHUB_REPO_NAME",
"GITHUB_PR_BASEREF",
"CPU_ARCH",
"DOCKER_REGISTRY_LOGIN_NAME",
"DOCKER_DAEMON_SOCKET",
"DOCKER_PUSHPULL_RWLOCK",
"DOCKER_REGISTRY_LOGIN_TOKEN",
"GITHUB_PR_NUMBER2",
"DOCKER_REGISTRY_HOST_NAME",
"DOCKER_REGISTRY_USER_NAME"
] |
[]
|
["GITHUB_PR_NUMBER", "GITHUB_REPO_NAME", "GITHUB_PR_BASEREF", "CPU_ARCH", "DOCKER_REGISTRY_LOGIN_NAME", "DOCKER_DAEMON_SOCKET", "DOCKER_PUSHPULL_RWLOCK", "DOCKER_REGISTRY_LOGIN_TOKEN", "GITHUB_PR_NUMBER2", "DOCKER_REGISTRY_HOST_NAME", "DOCKER_REGISTRY_USER_NAME"]
|
python
| 11 | 0 | |
Section-4/services/hello-message-golang-v1/main.go
|
package main
import (
"fmt"
"net/http"
"os"
"strconv"
"time"
)
func main() {
http.HandleFunc("/", indexHandler)
http.HandleFunc("/api/message/index", indexMsgHandler)
http.HandleFunc("/api/message/hello", helloMsgHandler)
fmt.Println("Listen and Serve Hello Message Golang v1")
http.ListenAndServe(port(), nil)
}
func port() string {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
return ":" + port
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
sleeps, ok := r.URL.Query()["sleep"]
if ok {
i, err := strconv.Atoi(sleeps[0])
if err == nil {
time.Sleep(time.Duration(i) * time.Second)
}
}
w.WriteHeader(http.StatusNoContent)
}
func indexMsgHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Handler func /api/message/index called.")
sleeps, ok := r.URL.Query()["sleep"]
if ok {
i, err := strconv.Atoi(sleeps[0])
if err == nil {
time.Sleep(time.Duration(i) * time.Second)
}
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Default Index Message from Golang (v1).")
}
func helloMsgHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Handler func /api/message/hello called.")
sleeps, ok := r.URL.Query()["sleep"]
if ok {
i, err := strconv.Atoi(sleeps[0])
if err == nil {
time.Sleep(time.Duration(i) * time.Second)
}
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Hello Istio Message from Golang (v1).")
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
src/main/java/SpeechToTextWebsocketsDemo.java
|
import com.github.catalystcode.fortis.speechtotext.Transcriber;
import com.github.catalystcode.fortis.speechtotext.config.OutputFormat;
import com.github.catalystcode.fortis.speechtotext.config.SpeechServiceConfig;
import com.github.catalystcode.fortis.speechtotext.config.SpeechType;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.Locale;
public class SpeechToTextWebsocketsDemo {
static {
BasicConfigurator.configure();
Logger.getRootLogger().setLevel(Level.INFO);
}
public static void main(String[] args) throws Exception {
final String subscriptionKey = System.getenv("OXFORD_SPEECH_TOKEN");
final SpeechType speechType = SpeechType.CONVERSATION;
final OutputFormat outputFormat = OutputFormat.SIMPLE;
final String audioPath = args[0];
final Locale locale = new Locale(args.length > 1 ? args[1] : "en-US");
final String audioType = args.length > 2 ? args[2] : audioPath;
SpeechServiceConfig config = new SpeechServiceConfig(subscriptionKey, speechType, outputFormat, locale);
try (InputStream audioStream = openStream(audioPath)) {
Transcriber.create(audioType, config).transcribe(audioStream, SpeechToTextWebsocketsDemo::onPhrase, SpeechToTextWebsocketsDemo::onHypothesis);
}
}
private static InputStream openStream(String audioPath) throws IOException {
InputStream inputStream = audioPath.startsWith("http://") || audioPath.startsWith("https://")
? new URL(audioPath).openConnection().getInputStream()
: new FileInputStream(audioPath);
return new BufferedInputStream(inputStream);
}
private static void onPhrase(String phrase) {
System.out.println("Phrase: " + phrase);
}
private static void onHypothesis(String hypothesis) {
System.out.println("Hypothesis: " + hypothesis);
}
}
|
[
"\"OXFORD_SPEECH_TOKEN\""
] |
[] |
[
"OXFORD_SPEECH_TOKEN"
] |
[]
|
["OXFORD_SPEECH_TOKEN"]
|
java
| 1 | 0 | |
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import ConfigParser
import StringIO
import hostname
import json
from NetUtil import NetUtil
import os
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
content = """
[server]
hostname=localhost
url_port=8440
secured_url_port=8441
[agent]
prefix={ps}tmp{ps}ambari-agent
tmp_dir={ps}tmp{ps}ambari-agent{ps}tmp
data_cleanup_interval=86400
data_cleanup_max_age=2592000
data_cleanup_max_size_MB = 100
ping_port=8670
cache_dir={ps}var{ps}lib{ps}ambari-agent{ps}cache
[services]
[python]
custom_actions_dir = {ps}var{ps}lib{ps}ambari-agent{ps}resources{ps}custom_actions
[command]
maxretries=2
sleepBetweenRetries=1
[security]
keysdir={ps}tmp{ps}ambari-agent
server_crt=ca.crt
passphrase_env_var_name=AMBARI_PASSPHRASE
[heartbeat]
state_interval = 6
dirs={ps}etc{ps}hadoop,{ps}etc{ps}hadoop{ps}conf,{ps}var{ps}run{ps}hadoop,{ps}var{ps}log{ps}hadoop
log_lines_count=300
""".format(ps=os.sep)
servicesToPidNames = {
'GLUSTERFS' : 'glusterd.pid$',
'NAMENODE': 'hadoop-{USER}-namenode.pid$',
'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
'DATANODE': 'hadoop-{USER}-datanode.pid$',
'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
'ZKFC': 'hadoop-{USER}-zkfc.pid$',
'OOZIE_SERVER': 'oozie.pid',
'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
'FLUME_SERVER': 'flume-node.pid',
'TEMPLETON_SERVER': 'templeton.pid',
'GANGLIA_SERVER': 'gmetad.pid',
'GANGLIA_MONITOR': 'gmond.pid',
'HBASE_MASTER': 'hbase-{USER}-master.pid',
'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
'HCATALOG_SERVER': 'webhcat.pid',
'KERBEROS_SERVER': 'kadmind.pid',
'HIVE_SERVER': 'hive-server.pid',
'HIVE_METASTORE': 'hive.pid',
'MYSQL_SERVER': 'mysqld.pid',
'HUE_SERVER': '/var/run/hue/supervisor.pid',
'WEBHCAT_SERVER': 'webhcat.pid',
}
#Each service, which's pid depends on user should provide user mapping
servicesToLinuxUser = {
'NAMENODE': 'hdfs_user',
'SECONDARY_NAMENODE': 'hdfs_user',
'DATANODE': 'hdfs_user',
'JOURNALNODE': 'hdfs_user',
'ZKFC': 'hdfs_user',
'JOBTRACKER': 'mapred_user',
'TASKTRACKER': 'mapred_user',
'RESOURCEMANAGER': 'yarn_user',
'NODEMANAGER': 'yarn_user',
'HISTORYSERVER': 'mapred_user',
'HBASE_MASTER': 'hbase_user',
'HBASE_REGIONSERVER': 'hbase_user',
}
pidPathVars = [
{'var' : 'glusterfs_pid_dir_prefix',
'defaultValue' : '/var/run'},
{'var' : 'hadoop_pid_dir_prefix',
'defaultValue' : '/var/run/hadoop'},
{'var' : 'hadoop_pid_dir_prefix',
'defaultValue' : '/var/run/hadoop'},
{'var' : 'ganglia_runtime_dir',
'defaultValue' : '/var/run/ganglia/hdp'},
{'var' : 'hbase_pid_dir',
'defaultValue' : '/var/run/hbase'},
{'var' : 'zk_pid_dir',
'defaultValue' : '/var/run/zookeeper'},
{'var' : 'oozie_pid_dir',
'defaultValue' : '/var/run/oozie'},
{'var' : 'hcat_pid_dir',
'defaultValue' : '/var/run/webhcat'},
{'var' : 'hive_pid_dir',
'defaultValue' : '/var/run/hive'},
{'var' : 'mysqld_pid_dir',
'defaultValue' : '/var/run/mysqld'},
{'var' : 'hcat_pid_dir',
'defaultValue' : '/var/run/webhcat'},
{'var' : 'yarn_pid_dir_prefix',
'defaultValue' : '/var/run/hadoop-yarn'},
{'var' : 'mapred_pid_dir_prefix',
'defaultValue' : '/var/run/hadoop-mapreduce'},
]
class AmbariConfig:
TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"
SERVER_CONNECTION_INFO = "{0}/connection_info"
CONNECTION_PROTOCOL = "https"
config = None
net = None
def __init__(self):
global content
self.config = ConfigParser.RawConfigParser()
self.net = NetUtil()
self.config.readfp(StringIO.StringIO(content))
def get(self, section, value, default=None):
try:
return self.config.get(section, value)
except ConfigParser.Error, err:
if default:
return default
raise err
def set(self, section, option, value):
self.config.set(section, option, value)
def add_section(self, section):
self.config.add_section(section)
def setConfig(self, customConfig):
self.config = customConfig
def getConfig(self):
return self.config
@staticmethod
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def getConfigFile():
if 'AMBARI_AGENT_CONF_DIR' in os.environ:
return os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
else:
return "ambari-agent.ini"
@staticmethod
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def getConfigFile():
if 'AMBARI_AGENT_CONF_DIR' in os.environ:
return os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
else:
return os.path.join(os.sep, "etc", "ambari-agent", "conf", "ambari-agent.ini")
@staticmethod
def getLogFile():
if 'AMBARI_AGENT_LOG_DIR' in os.environ:
return os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
else:
return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.log")
@staticmethod
def getOutFile():
if 'AMBARI_AGENT_OUT_DIR' in os.environ:
return os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
else:
return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.out")
def has_option(self, section, option):
return self.config.has_option(section, option)
def remove_option(self, section, option):
return self.config.remove_option(section, option)
def load(self, data):
self.config = ConfigParser.RawConfigParser(data)
def read(self, filename):
self.config.read(filename)
def getServerOption(self, url, name, default=None):
status, response = self.net.checkURL(url)
if status is True:
try:
data = json.loads(response)
if name in data:
return data[name]
except:
pass
return default
def get_api_url(self):
return "%s://%s:%s" % (self.CONNECTION_PROTOCOL,
hostname.server_hostname(self),
self.get('server', 'url_port'))
def isTwoWaySSLConnection(self):
req_url = self.get_api_url()
response = self.getServerOption(self.SERVER_CONNECTION_INFO.format(req_url), self.TWO_WAY_SSL_PROPERTY, 'false')
if response is None:
return False
elif response.lower() == "true":
return True
else:
return False
def updateConfigServerHostname(configFile, new_host):
# update agent config file
agent_config = ConfigParser.ConfigParser()
agent_config.read(configFile)
server_host = agent_config.get('server', 'hostname')
if new_host is not None and server_host != new_host:
print "Updating server host from " + server_host + " to " + new_host
agent_config.set('server', 'hostname', new_host)
with (open(configFile, "wb")) as new_agent_config:
agent_config.write(new_agent_config)
def main():
print AmbariConfig().config
if __name__ == "__main__":
main()
|
[] |
[] |
[
"AMBARI_AGENT_LOG_DIR",
"AMBARI_AGENT_CONF_DIR",
"AMBARI_AGENT_OUT_DIR"
] |
[]
|
["AMBARI_AGENT_LOG_DIR", "AMBARI_AGENT_CONF_DIR", "AMBARI_AGENT_OUT_DIR"]
|
python
| 3 | 0 | |
src/waldur_mastermind/marketplace_script/processors.py
|
from waldur_mastermind.marketplace import processors
from .utils import DockerExecutorMixin
"""
It is expected that offering plugin_options field is dict with following structure:
language: python
environ:
USERNAME: admin
PASSWORD: secret
create:
import os
print("Creating resource ", os.environ.get('RESOURCE_NAME'))
update:
import os
print("Updating resource ", os.environ.get('RESOURCE_NAME'))
delete:
import os
print("Deleting resource ", os.environ.get('RESOURCE_NAME'))
pull:
import os
print("Pulling resource ", os.environ.get('RESOURCE_NAME'))
"""
class CreateProcessor(DockerExecutorMixin, processors.AbstractCreateResourceProcessor):
hook_type = 'create'
def send_request(self, user):
output = super().send_request(user)
# return the last line of the output as a backend_id of a created resource
if output:
return output.split()[-1]
class UpdateProcessor(DockerExecutorMixin, processors.AbstractUpdateResourceProcessor):
hook_type = 'update'
def send_request(self, user):
super().send_request(user)
return True
class DeleteProcessor(DockerExecutorMixin, processors.AbstractDeleteResourceProcessor):
hook_type = 'delete'
def send_request(self, user, resource):
super().send_request(user, resource)
return True
|
[] |
[] |
[
"RESOURCE_NAME"
] |
[]
|
["RESOURCE_NAME"]
|
python
| 1 | 0 | |
test/parallel/test_tensorflow.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications copyright (C) 2018 Uber Technologies, Inc.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for horovod.tensorflow.mpi_ops."""
from distutils.version import LooseVersion
import itertools
import numpy as np
import os
import tensorflow as tf
from horovod.tensorflow.util import _executing_eagerly
from tensorflow.python.framework import ops
import warnings
import horovod.tensorflow as hvd
from common import mpi_env_rank_and_size
if hasattr(tf, 'ConfigProto'):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if hasattr(tf, 'config') and hasattr(tf.config, 'experimental') \
and hasattr(tf.config.experimental, 'set_memory_growth'):
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
else:
# Specifies the config to use with eager execution. Does not preclude
# tests from running in the graph mode.
tf.enable_eager_execution(config=config)
ccl_supported_types = set([tf.uint8, tf.int32, tf.int64, tf.float32, tf.float64])
_IS_TF2 = LooseVersion(tf.__version__) >= LooseVersion('2.0.0')
class TensorFlowTests(tf.test.TestCase):
"""
Tests for ops in horovod.tensorflow.
"""
def __init__(self, *args, **kwargs):
super(TensorFlowTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
if hasattr(tf, 'contrib') and hasattr(tf.contrib, 'eager'):
self.tfe = tf.contrib.eager
else:
self.tfe = tf
def evaluate(self, tensors):
if _executing_eagerly():
return self._eval_helper(tensors)
sess = ops.get_default_session()
if sess is None:
with self.test_session(config=config) as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
def assign(self, variables, values):
if _executing_eagerly():
for var, val in zip(variables, values):
var.assign(val)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session(config=config) as sess:
for var, val in zip(variables, values):
var.load(val, sess)
else:
for var, val in zip(variables, values):
var.load(val, sess)
def random_uniform(self, *args, **kwargs):
if hasattr(tf, 'random') and hasattr(tf.random, 'set_seed'):
tf.random.set_seed(1234)
return tf.random.uniform(*args, **kwargs)
else:
tf.set_random_seed(1234)
return tf.random_uniform(*args, **kwargs)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_rank_op(self):
"""Test that the rank returned by hvd.rank_op() is correct."""
hvd.init()
rank = self.evaluate(hvd.rank_op())
self.assertTrue(rank == hvd.rank(),
"hvd.rank_op produces incorrect results")
def test_horovod_local_rank_op(self):
"""Test that the local rank returned by hvd.local_rank_op() is correct."""
hvd.init()
local_rank = self.evaluate(hvd.local_rank_op())
self.assertTrue(local_rank == hvd.local_rank(),
"hvd.local_rank_op produces incorrect results")
def test_horovod_size_op(self):
"""Test that the size returned by hvd.size_op() is correct."""
hvd.init()
size = self.evaluate(hvd.size_op())
self.assertTrue(size == hvd.size(),
"hvd.size_op produces incorrect results")
def test_horovod_local_size_op(self):
"""Test that the local size returned by hvd.local_size_op() is correct."""
hvd.init()
local_size = self.evaluate(hvd.local_size_op())
self.assertTrue(local_size == hvd.local_size(),
"hvd.local_size_op produces incorrect results")
def test_horovod_allreduce_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold, "hvd.allreduce produces incorrect results")
def test_horovod_allreduce_average_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
averaged = hvd.allreduce(tensor, average=True)
max_difference = tf.reduce_max(tf.abs(tf.cast(averaged, dtype=dtype) - tensor))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold, "hvd.allreduce produces incorrect results")
def test_horovod_allreduce_cpu_fused(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])
dims = [1, 2, 3]
tests = []
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
test = max_difference <= threshold
tests.append(test)
self.assertTrue(self.evaluate(tf.reduce_all(tests)),
"hvd.allreduce produces incorrect results")
# Note: TF does not support FP64 op attributes so scaling factor is cast to FP32
# by op and loses precision. We skip FP64 version of pre/postscale tests for this reason.
# See https://github.com/tensorflow/tensorflow/pull/39452 for PR to resolve this limitation.
def test_horovod_allreduce_cpu_prescale(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors
with prescaling"""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])
int_types = [tf.int32, tf.int64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
np.random.seed(1234)
factor = np.random.uniform()
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
# Scaling done in FP64 math for integer types, FP32 math for FP16 on CPU
tensor = tf.cast(tensor, tf.float32 if dtype == tf.float16 else
tf.float64 if dtype in int_types else dtype)
factor = tf.convert_to_tensor(factor, tf.float32 if dtype == tf.float16 else
tf.float64 if dtype in int_types else dtype)
multiplied = tf.cast(factor * tensor, dtype) * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce produces incorrect results")
def test_horovod_allreduce_cpu_postscale(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors
with postscaling"""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])
int_types = [tf.int32, tf.int64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
np.random.seed(1234)
factor = np.random.uniform()
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
multiplied = tensor * size
# Scaling done in FP64 math for integer types, FP32 math for FP16 on CPU
multiplied = tf.cast(multiplied, tf.float32 if dtype == tf.float16 else
tf.float64 if dtype in int_types else dtype)
factor = tf.convert_to_tensor(factor, tf.float32 if dtype == tf.float16 else
tf.float64 if dtype in int_types else dtype)
multiplied = tf.cast(factor * multiplied, dtype)
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce produces incorrect results")
def test_horovod_allreduce_gpu(self):
"""Test that the allreduce works on GPUs."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold, "hvd.allreduce on GPU produces incorrect results")
def test_horovod_allreduce_average_gpu(self):
"""Test that the allreduce with average works on GPUs."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
averaged = hvd.allreduce(tensor, average=True)
max_difference = tf.reduce_max(tf.abs(tf.cast(averaged, dtype=dtype) - tensor))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold, "hvd.allreduce on GPU produces incorrect results")
def test_horovod_allreduce_gpu_fused(self):
"""Test that the allreduce works on GPUs with Tensor Fusion.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
tests = []
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
test = max_difference <= threshold
tests.append(test)
self.assertTrue(self.evaluate(tf.reduce_all(tests)),
"hvd.allreduce produces incorrect results")
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
# Only do this test if there are enough GPUs available.
if len(tf.config.experimental.list_physical_devices('GPU')) < 2:
self.skipTest(("Too few GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
iter = 0
gpu_ids = [local_rank * 2, local_rank * 2 + 1]
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
with tf.device("/gpu:%d" % gpu_ids[(iter + local_rank) % 2]):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
self.skipTest("Horovod cluster too large for precise multiplication comparison")
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce on GPU produces incorrect results")
def test_horovod_allreduce_gpu_prescale(self):
"""Test on GPU that the allreduce correctly sums 1D, 2D, 3D tensors
with prescaling"""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.
return
hvd.init()
size = hvd.size()
local_rank = hvd.local_rank()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])
int_types = [tf.int32, tf.int64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
np.random.seed(1234)
factor = np.random.uniform()
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
# Scaling done in FP64 math for integer types.
tensor = tf.cast(tensor, tf.float64 if dtype in int_types else dtype)
factor = tf.convert_to_tensor(factor, tf.float64 if dtype in int_types else dtype)
multiplied = tf.cast(factor * tensor, dtype) * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce produces incorrect results")
def test_horovod_allreduce_gpu_postscale(self):
"""Test on GPU that the allreduce correctly sums 1D, 2D, 3D tensors
with postscaling"""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.
return
hvd.init()
size = hvd.size()
local_rank = hvd.local_rank()
dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])
int_types = [tf.int32, tf.int64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
np.random.seed(1234)
factor = np.random.uniform()
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
multiplied = tensor * size
# Scaling done in FP64 math for integer types.
multiplied = tf.cast(multiplied, tf.float64 if dtype in int_types else dtype)
factor = tf.convert_to_tensor(factor, tf.float64 if dtype in int_types else dtype)
multiplied = tf.cast(factor * multiplied, dtype)
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = self.evaluate(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce produces incorrect results")
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17 + rank] * 3
tensor = self.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allreduce(tensor))
# Same number of elements, different rank
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = self.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allreduce(tensor))
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims,
dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allreduce(tensor))
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
device = "/gpu:%d" % local_rank if local_rank % 2 == 0 else "/cpu:0"
with tf.device(device):
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allreduce(tensor))
def test_horovod_allreduce_grad_cpu(self):
"""Test the correctness of the allreduce gradient on CPU."""
hvd.init()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
if _executing_eagerly():
tensor = self.tfe.Variable(self.random_uniform(
[5] * dim, -100, 100, dtype=dtype))
with tf.GradientTape() as tape:
summed = hvd.allreduce(tensor, average=False)
else:
tensor = self.random_uniform(
[5] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
grad_ys = tf.ones([5] * dim)
if _executing_eagerly():
grad_out = tape.gradient(summed, tensor, grad_ys)
else:
grad = tf.gradients(summed, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones([5] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_average_grad_cpu(self):
"""Test the correctness of the allreduce with average gradient on CPU."""
hvd.init()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
if _executing_eagerly():
tensor = self.tfe.Variable(self.random_uniform(
[5] * dim, -100, 100, dtype=dtype))
with tf.GradientTape() as tape:
averaged = hvd.allreduce(tensor, average=True)
else:
tensor = self.random_uniform(
[5] * dim, -100, 100, dtype=dtype)
averaged = hvd.allreduce(tensor, average=True)
grad_ys = tf.ones([5] * dim, dtype=dtype)
if _executing_eagerly():
grad_out = tape.gradient(averaged, tensor, grad_ys)
else:
grad = tf.gradients(averaged, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones([5] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_gpu(self):
"""Test the correctness of the allreduce gradient on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
if _executing_eagerly():
tensor = self.tfe.Variable(
self.random_uniform([5] * dim, -100, 100, dtype=dtype))
with tf.GradientTape() as tape:
summed = hvd.allreduce(tensor, average=False)
else:
tensor = self.random_uniform([5] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
grad_ys = tf.ones([5] * dim)
if _executing_eagerly():
grad_out = tape.gradient(summed, tensor, grad_ys)
else:
grad = tf.gradients(summed, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones([5] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_average_grad_gpu(self):
"""Test the correctness of the allreduce with average gradient on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
if _executing_eagerly():
tensor = self.tfe.Variable(
self.random_uniform([5] * dim, -100, 100, dtype=dtype))
with tf.GradientTape() as tape:
averaged = hvd.allreduce(tensor, average=True)
else:
tensor = self.random_uniform([5] * dim, -100, 100, dtype=dtype)
averaged = hvd.allreduce(tensor, average=True)
grad_ys = tf.ones([5] * dim, dtype=dtype)
if _executing_eagerly():
grad_out = tape.gradient(averaged, tensor, grad_ys)
else:
grad = tf.gradients(averaged, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones([5] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allgather_cpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/cpu:0"):
gathered = hvd.allgather(tensor)
gathered_tensor = self.evaluate(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor,
[i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
if dtype != tf.bool:
value = i
else:
value = i % 2
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value))),
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_gpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/gpu:%d" % local_rank):
gathered = hvd.allgather(tensor)
gathered_tensor = self.evaluate(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor,
[i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
if dtype != tf.bool:
value = i
else:
value = i % 2
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value))),
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_fused_cpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
tests = []
shape_tests = []
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/cpu:0"):
gathered = hvd.allgather(tensor)
shape_tests.append(
tf.reduce_all(tf.equal(tf.shape(gathered),
[17 * size] + [17] * (dim - 1))))
for i in range(size):
rank_tensor = tf.slice(gathered,
[i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
if dtype != tf.bool:
value = i
else:
value = i % 2
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
tests.append(
tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value)))
shape_tests_passed, value_tests_passed = \
self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])
self.assertTrue(shape_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
self.assertTrue(value_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_fused_gpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
tests = []
shape_tests = []
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/gpu:%d" % local_rank):
gathered = hvd.allgather(tensor)
shape_tests.append(
tf.reduce_all(tf.equal(tf.shape(gathered),
[17 * size] + [17] * (dim - 1))))
for i in range(size):
rank_tensor = tf.slice(gathered,
[i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
if dtype != tf.bool:
value = i
else:
value = i % 2
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
tests.append(
tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value)))
shape_tests_passed, value_tests_passed = \
self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])
self.assertTrue(shape_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
self.assertTrue(value_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_variable_size_fused_cpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors with
Tensor Fusion, even if those tensors have different sizes along the
first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
tests = []
shape_tests = []
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/cpu:0"):
gathered = hvd.allgather(tensor)
shape_tests.append(
tf.reduce_all(tf.equal(tf.shape(gathered),
[sum(tensor_sizes)] + [17] * (dim - 1))))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(
gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
if dtype != tf.bool:
value = i
else:
value = i % 2
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
tests.append(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value)))
shape_tests_passed, value_tests_passed = \
self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])
self.assertTrue(shape_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
self.assertTrue(value_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_variable_size_fused_gpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors with
Tensor Fusion, even if those tensors have different sizes along the
first dim."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
hvd.init()
rank = hvd.rank()
local_rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
tests = []
shape_tests = []
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/gpu:%d" % local_rank):
gathered = hvd.allgather(tensor)
shape_tests.append(
tf.reduce_all(tf.equal(tf.shape(gathered),
[sum(tensor_sizes)] + [17] * (dim - 1))))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(
gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
if dtype != tf.bool:
value = i
else:
value = i % 2
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
tests.append(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value)))
shape_tests_passed, value_tests_passed = \
self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])
self.assertTrue(shape_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
self.assertTrue(value_tests_passed,
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_variable_size_gpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
hvd.init()
rank = hvd.rank()
local_rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/gpu:%d" % local_rank):
gathered = hvd.allgather(tensor)
gathered_tensor = self.evaluate(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(
gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
if dtype != tf.bool:
value = i
else:
value = i % 2
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value))),
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_variable_size_cpu(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
with tf.device("/cpu:0"):
gathered = hvd.allgather(tensor)
gathered_tensor = self.evaluate(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(
gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
# tf.equal() does not support tf.uint16 as of TensorFlow 1.2,
# so need to cast rank_tensor to tf.int32.
if dtype != tf.bool:
value = i
else:
value = i % 2
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(rank_tensor, tf.int32), value))),
"hvd.allgather produces incorrect gathered tensor")
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allgather(tensor))
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.allgather(tensor))
def test_horovod_allgather_grad_cpu(self):
"""Test the correctness of the allgather gradient on CPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
with tf.device("/cpu:0"):
if _executing_eagerly():
with tf.GradientTape() as tape:
tensor = self.tfe.Variable(
tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank)
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
gathered = hvd.allgather(tensor)
grad_list = []
for r, tensor_size in enumerate(tensor_sizes):
g = tf.ones([tensor_size] + [17] * (dim - 1)) * r
grad_list.append(g)
grad_ys = tf.concat(grad_list, axis=0)
grad_out = tape.gradient(gathered, tensor, grad_ys)
else:
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
gathered = hvd.allgather(tensor)
grad_list = []
for r, tensor_size in enumerate(tensor_sizes):
g = tf.ones([tensor_size] + [17] * (dim - 1)) * r
grad_list.append(g)
grad_ys = tf.concat(grad_list, axis=0)
grad = tf.gradients(gathered, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" %
(grad_out, expected, str(err)))
def test_horovod_allgather_grad_gpu(self):
"""Test the correctness of the allgather gradient on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
with tf.device("/gpu:%d" % local_rank):
if _executing_eagerly():
with tf.GradientTape() as tape:
tensor = self.tfe.Variable(
tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank)
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
gathered = hvd.allgather(tensor)
grad_list = []
for r, tensor_size in enumerate(tensor_sizes):
g = tf.ones([tensor_size] + [17] * (dim - 1)) * r
grad_list.append(g)
grad_ys = tf.concat(grad_list, axis=0)
grad_out = tape.gradient(gathered, tensor, grad_ys)
else:
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank
if dtype == tf.bool:
tensor = tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
gathered = hvd.allgather(tensor)
grad_list = []
for r, tensor_size in enumerate(tensor_sizes):
g = tf.ones([tensor_size] + [17] * (dim - 1)) * r
grad_list.append(g)
grad_ys = tf.concat(grad_list, axis=0)
grad = tf.gradients(gathered, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" %
(grad_out, expected, str(err)))
def test_horovod_broadcast_cpu(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors on CPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
with tf.device("/cpu:0"):
tensor = tf.ones([17] * dim) * rank
root_tensor = tf.ones([17] * dim) * root_rank
if dtype == tf.bool:
tensor = tensor % 2
root_tensor = root_tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
root_tensor = tf.cast(root_tensor, dtype=dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
self.assertTrue(
self.evaluate(tf.reduce_all(tf.equal(
tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),
"hvd.broadcast produces incorrect broadcasted tensor")
def test_horovod_broadcast_gpu(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64, tf.bool]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = tf.ones([17] * dim) * rank
root_tensor = tf.ones([17] * dim) * root_rank
if dtype == tf.bool:
tensor = tensor % 2
root_tensor = root_tensor % 2
tensor = tf.cast(tensor, dtype=dtype)
root_tensor = tf.cast(root_tensor, dtype=dtype)
with tf.device("/gpu:%d" % local_rank):
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
self.assertTrue(
self.evaluate(tf.reduce_all(tf.equal(
tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),
"hvd.broadcast produces incorrect broadcasted tensor")
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.broadcast(tensor, 0))
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.broadcast(tensor, 0))
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor = tf.ones([17] * 3, dtype=tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.broadcast(tensor, rank))
def test_horovod_broadcast_grad_cpu(self):
"""Test the correctness of the broadcast gradient on CPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
with tf.device("/cpu:0"):
if _executing_eagerly():
tensor = self.tfe.Variable(tf.ones([5] * dim) * rank)
else:
tensor = tf.ones([5] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
if _executing_eagerly():
with tf.GradientTape() as tape:
tensor = tf.cast(tensor, dtype=dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
grad_out = tape.gradient(broadcasted_tensor, tensor)
else:
tensor = tf.cast(tensor, dtype=dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
grad_ys = tf.ones([5] * dim)
grad = tf.gradients(broadcasted_tensor, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
c = size if rank == root_rank else 0
expected = np.ones([5] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast_grad_gpu(self):
"""Test the correctness of the broadcast gradient on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
with tf.device("/gpu:%d" % local_rank):
if _executing_eagerly():
tensor = self.tfe.Variable(tf.ones([5] * dim) * rank)
else:
tensor = tf.ones([5] * dim) * rank
if dtype == tf.bool:
tensor = tensor % 2
if _executing_eagerly():
with tf.GradientTape() as tape:
tensor = tf.cast(tensor, dtype=dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
grad_out = tape.gradient(broadcasted_tensor, tensor)
else:
tensor = tf.cast(tensor, dtype=dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
grad_ys = tf.ones([5] * dim)
grad = tf.gradients(broadcasted_tensor, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
c = size if rank == root_rank else 0
expected = np.ones([5] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_cpu(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
splits = tf.convert_to_tensor([rank+1] * size, dtype=tf.int32)
collected = hvd.alltoall(tensor, splits)
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_gpu(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
splits = tf.convert_to_tensor([rank+1] * size, dtype=tf.int32)
collected = hvd.alltoall(tensor, splits)
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_equal_split_cpu(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor)
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_equal_split_gpu(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor)
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_empty_cpu(self):
"""Test that the alltoall correctly deals with an empty input tensor."""
hvd.init()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
for dtype in dtypes:
with tf.device("/cpu:0"):
vals = [[] for i in range(size)]
tensor = tf.convert_to_tensor(vals, dtype=dtype)
collected = hvd.alltoall(tensor)
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), 0)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_empty_gpu(self):
"""Test that the alltoall correctly deals with an empty input tensor."""
# ncclGroupEnd failed: invalid usage
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
for dtype in dtypes:
with tf.device("/gpu:%s" % local_rank):
vals = [[] for i in range(size)]
tensor = tf.convert_to_tensor(vals, dtype=dtype)
collected = hvd.alltoall(tensor)
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), 0)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_one_rank_sends_nothing_cpu(self):
"""Test where one rank sends nothing in an alltoall."""
hvd.init()
size = hvd.size()
rank = hvd.rank()
if hvd.size() < 2:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
if rank == 1:
splits = tf.convert_to_tensor([0] * size, dtype=tf.int32)
vals = []
tensor = tf.convert_to_tensor(vals, dtype=dtype)
tensor = tf.reshape(tensor, shape=[0] + (dim-1)*[2])
else:
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor, splits, name="a2a")
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1)
- (1+1) * 2 ** (dim-1) # subtract missing rank 1 contributions
)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_one_rank_sends_nothing_gpu(self):
"""Test where one rank sends nothing in an alltoall."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
rank = hvd.rank()
if hvd.size() < 2:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
if rank == 1:
splits = tf.convert_to_tensor([0] * size, dtype=tf.int32)
vals = []
tensor = tf.convert_to_tensor(vals, dtype=dtype)
tensor = tf.reshape(tensor, shape=[0] + (dim-1)*[2])
else:
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor, splits, name="a2a")
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1)
- (1+1) * 2 ** (dim-1) # subtract missing rank 1 contributions
)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_one_rank_receives_nothing_cpu(self):
"""Test where one rank receives nothing in an alltoall."""
hvd.init()
size = hvd.size()
rank = hvd.rank()
if hvd.size() < 2:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
# send nothing to rank 0
splits = tf.convert_to_tensor([0] + [rank + 1] * (size - 1), dtype=tf.int32)
vals = []
for i in range(1, size):
vals += [i] * (rank + 1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor, splits, name="a2a")
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
if rank == 0:
expected_size = 0
else:
expected_size = size * (size + 1) // 2 * 2**(dim - 1)
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), expected_size)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_one_rank_receives_nothing_gpu(self):
"""Test where one rank receives nothing in an alltoall."""
# ncclGroupEnd failed: invalid usage
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
size = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
if hvd.size() < 2:
self.skipTest("Only one worker available")
dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.float16, tf.float32,
tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
# send nothing to rank 0
splits = tf.convert_to_tensor([0] + [rank + 1] * (size - 1), dtype=tf.int32)
vals = []
for i in range(1, size):
vals += [i] * (rank + 1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
collected = hvd.alltoall(tensor, splits, name="a2a")
self.assertTrue(
self.evaluate(tf.reduce_all(
tf.equal(tf.cast(collected, tf.int32), rank))),
"hvd.alltoall produces incorrect collected tensor")
if rank == 0:
expected_size = 0
else:
expected_size = size * (size + 1) // 2 * 2**(dim - 1)
self.assertTrue(
self.evaluate(tf.equal(tf.size(collected), expected_size)),
"hvd.alltoall collected wrong number of values")
def test_horovod_alltoall_zero_splits_cpu(self):
"""Test alltoall with some ranks not participating / splits set to zero."""
hvd.init()
if hvd.size() == 1:
self.skipTest("Only one worker available")
active_ranks = range(0, hvd.size() // 2)
silent_ranks = range(hvd.size() // 2, hvd.size())
active_splits = [1 if r in active_ranks else 0 for r in range(hvd.size())]
active_shape = [sum(active_splits), 4]
silent_splits = [0] * hvd.size()
silent_shape = [0, 4]
with tf.device("/cpu:0"):
if hvd.rank() in active_ranks:
source_tensor = tf.fill(active_shape, value=tf.cast(hvd.rank(), tf.int32))
splits = tf.convert_to_tensor(active_splits)
else:
source_tensor = tf.fill(silent_shape, value=tf.cast(hvd.rank(), tf.int32))
splits = tf.convert_to_tensor(silent_splits)
collected = hvd.alltoall(source_tensor, splits, name="alltoall_zero_splits")
result = self.evaluate(collected)
print(hvd.rank(), "result.shape", result.shape)
print(hvd.rank(), "result", result)
if hvd.rank() in active_ranks:
expected_result_shape = active_shape
else:
expected_result_shape = silent_shape
self.assertSequenceEqual(result.shape, expected_result_shape)
if hvd.rank() in active_ranks:
for r_idx, r in enumerate(active_ranks):
self.assertTrue(np.all(result[r_idx, ...] == r))
else:
self.assertLen(result, 0)
def test_horovod_alltoall_zero_splits_gpu(self):
"""Test alltoall with some ranks not participating / splits set to zero."""
# ncclCommInitRank failed: invalid usage
hvd.init()
if hvd.size() == 1:
self.skipTest("Only one worker available")
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
active_ranks = range(0, hvd.size() // 2)
silent_ranks = range(hvd.size() // 2, hvd.size())
active_splits = [1 if r in active_ranks else 0 for r in range(hvd.size())]
active_shape = [sum(active_splits), 4]
silent_splits = [0] * hvd.size()
silent_shape = [0, 4]
with tf.device("/gpu:%s" % hvd.local_rank()):
if hvd.rank() in active_ranks:
source_tensor = tf.fill(active_shape, value=tf.cast(hvd.rank(), tf.int32))
splits = tf.convert_to_tensor(active_splits)
else:
source_tensor = tf.fill(silent_shape, value=tf.cast(hvd.rank(), tf.int32))
splits = tf.convert_to_tensor(silent_splits)
collected = hvd.alltoall(source_tensor, splits, name="alltoall_zero_splits")
result = self.evaluate(collected)
print(hvd.rank(), "result.shape", result.shape)
print(hvd.rank(), "result", result)
if hvd.rank() in active_ranks:
expected_result_shape = active_shape
else:
expected_result_shape = silent_shape
self.assertSequenceEqual(result.shape, expected_result_shape)
if hvd.rank() in active_ranks:
for r_idx, r in enumerate(active_ranks):
self.assertTrue(np.all(result[r_idx, ...] == r))
else:
self.assertLen(result, 0)
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
with tf.device("/cpu:0"):
if rank % 2:
tensor = tf.ones([size], dtype=tf.int32)
else:
tensor = tf.ones([size], dtype=tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.alltoall(tensor))
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the tensor length is not a multiple
of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
with tf.device("/cpu:0"):
tensor = tf.ones([size + 1], dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(hvd.alltoall(tensor))
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
with tf.device("/cpu:0"):
tensor = tf.ones([size-1], dtype=tf.float32)
splits = tf.ones([size], dtype=tf.int32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(hvd.alltoall(tensor))
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
with tf.device("/cpu:0"):
tensor = tf.ones(tensor_size)
with self.assertRaises(tf.errors.FailedPreconditionError):
self.evaluate(hvd.alltoall(tensor))
def test_horovod_alltoall_grad_cpu(self):
"""Test the correctness of the alltoall gradient on CPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
if _executing_eagerly():
tensor = self.tfe.Variable(tensor)
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
with tf.GradientTape() as tape:
collected = hvd.alltoall(tensor, splits)
else:
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
collected = hvd.alltoall(tensor, splits)
grad_ys = tf.ones(tf.shape(collected))
if _executing_eagerly():
grad_out = tape.gradient(collected, tensor, grad_ys)
else:
grad = tf.gradients(collected, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(tensor.get_shape().as_list())
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_grad_gpu(self):
"""Test the correctness of the alltoall gradient on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
if _executing_eagerly():
tensor = self.tfe.Variable(tensor)
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
with tf.GradientTape() as tape:
collected = hvd.alltoall(tensor, splits)
else:
splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)
collected = hvd.alltoall(tensor, splits)
grad_ys = tf.ones(tf.shape(collected))
if _executing_eagerly():
grad_out = tape.gradient(collected, tensor, grad_ys)
else:
grad = tf.gradients(collected, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(tensor.get_shape().as_list())
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad_cpu(self):
"""Test the correctness of the alltoall gradient with default splitting on CPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
if _executing_eagerly():
tensor = self.tfe.Variable(tensor)
with tf.GradientTape() as tape:
collected = hvd.alltoall(tensor)
else:
collected = hvd.alltoall(tensor)
grad_ys = tf.ones(tf.shape(collected))
if _executing_eagerly():
grad_out = tape.gradient(collected, tensor, grad_ys)
else:
grad = tf.gradients(collected, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(tensor.get_shape().as_list())
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad_gpu(self):
"""Test the correctness of the alltoall gradient with default splitting on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
hvd.init()
rank = hvd.rank()
local_rank = hvd.local_rank()
size = hvd.size()
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%s" % local_rank):
vals = []
for i in range(size):
vals += [i] * (rank+1)
tensor = tf.convert_to_tensor(vals, dtype=dtype)
for _ in range(dim - 1):
tensor = tf.expand_dims(tensor, axis=1)
tensor = tf.concat([tensor, tensor], axis=1)
if _executing_eagerly():
tensor = self.tfe.Variable(tensor)
with tf.GradientTape() as tape:
collected = hvd.alltoall(tensor)
else:
collected = hvd.alltoall(tensor)
grad_ys = tf.ones(tf.shape(collected))
if _executing_eagerly():
grad_out = tape.gradient(collected, tensor, grad_ys)
else:
grad = tf.gradients(collected, tensor, grad_ys)[0]
grad_out = self.evaluate(grad)
expected = np.ones(tensor.get_shape().as_list())
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast_eager_mode_error(self):
"""Test that tries to broadcast tensorflow global variables
in eager execution mode. This call should raise a RuntimeError."""
if not hvd.util._executing_eagerly():
self.skipTest("Only in eager execution mode")
with self.assertRaises(RuntimeError):
hvd.broadcast_global_variables(root_rank=0)
def test_horovod_broadcast_graph_mode(self):
"""Test that tries to broadcast tensorflow global variables
in graph execution mode. This call should not raise any exception."""
if hvd.util._executing_eagerly():
self.skipTest("Not in eager execution mode")
hvd.broadcast_global_variables(root_rank=0)
def test_compression_fp16(self):
valid_dtypes = [tf.float16, tf.float32, tf.float64]
invalid_dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,
tf.int32, tf.int64, tf.bool]
tensor_size = [17] * 3
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = tf.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, tf.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
actual = self.evaluate(tensor_decompressed)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - actual)
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = tf.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
actual = self.evaluate(tensor_decompressed)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - actual)
self.assertLess(err, 0.00000001)
def test_broadcast_object(self):
hvd.init()
with tf.device("/cpu:0"):
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
def test_broadcast_object_fn(self):
if hvd._executing_eagerly() or _IS_TF2:
# Only for TF 1.0 in graph mode
return
hvd.init()
with tf.device("/cpu:0"):
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
bcast = hvd.broadcast_object_fn(root_rank=0)
obj = bcast(obj)
self.assertDictEqual(obj, expected_obj)
def test_allgather_object(self):
hvd.init()
with tf.device("/cpu:0"):
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
def test_elastic_state(self):
if not hvd._executing_eagerly() and _IS_TF2:
# Only support TF 2.0 in eager mode
return
hvd.init()
with tf.device("/cpu:0"):
v = 1.0 if hvd.rank() == 0 else 2.0
weights1 = [
np.array([[v, v], [v, v]]),
np.array([v, v])
]
vars1 = [tf.Variable(arr) for arr in weights1]
weights2 = [
np.array([[1.0, 2.0], [3.0, 4.0]]),
np.array([0.0, 0.0])
]
if not hvd._executing_eagerly():
init = tf.global_variables_initializer()
self.evaluate(init)
state = hvd.elastic.TensorFlowState(vars1, batch=20 + hvd.rank(), epoch=10 + hvd.rank())
state.sync()
weights1 = [np.ones_like(w) for w in weights1]
# After sync, all values should match the root rank
for w in self.evaluate(vars1):
self.assertAllClose(w, np.ones_like(w))
assert state.batch == 20
assert state.epoch == 10
# Partially modify then restore
self.assign(vars1, weights2)
state.batch = 21
state.epoch = 11
state.restore()
for w1, w2 in zip(self.evaluate(vars1), weights1):
self.assertAllClose(w1, w2)
assert state.batch == 20
assert state.epoch == 10
# Partially modify then commit
self.assign(vars1, weights2)
state.batch = 21
state.epoch = 11
state.commit()
state.restore()
for w1, w2 in zip(self.evaluate(vars1), weights2):
self.assertAllClose(w1, w2)
assert state.batch == 21
assert state.epoch == 11
def test_horovod_join_allreduce(self):
"""Test that the hvd.join with allreduce works on GPUs."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.
self.skipTest("Not compiled with HOROVOD_GPU_ALLREDUCE")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
first_join_ranks = [0, 1]
for dtype, dim, first_join_rank in itertools.product(dtypes, dims, first_join_ranks):
with tf.device("/gpu:%d" % local_rank):
tensor = self.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
if local_rank == first_join_rank:
self.evaluate(hvd.join())
else:
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * (size-1)
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = self.evaluate(max_difference)
self.evaluate(hvd.join())
self.assertTrue(diff <= threshold,
"hvd.join with hvd.allreduce on GPU produces incorrect results")
def test_horovod_syncbn_gpu(self):
"""Test that the SyncBatchNormalization implementation is correct on GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
self.skipTest(("No GPUs available"))
hvd.init()
with tf.device("/gpu:%d" % hvd.local_rank()):
x_list = [
tf.convert_to_tensor(np.stack([
np.array([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
], dtype=np.float32)
for r in range(hvd.size())
]), np.float32),
tf.convert_to_tensor(np.stack([
np.array([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
], dtype=np.float32)
for r in range(hvd.size())
]), np.float32),
]
for x in x_list:
bn = tf.keras.layers.BatchNormalization(axis=1, fused=False)
sync_bn = hvd.SyncBatchNormalization(axis=1)
bn_func = bn.apply(x, training=True)
sync_bn_func = sync_bn.apply(tf.expand_dims(x[hvd.rank()], 0), training=True)
try:
init = tf.global_variables_initializer()
except AttributeError:
init = tf.compat.v1.global_variables_initializer()
self.evaluate(init)
bn_out = self.evaluate(bn_func)
sync_bn_out = self.evaluate(sync_bn_func)
self.assertAllClose(sync_bn_out, np.expand_dims(bn_out[hvd.rank()], 0))
self.assertAllClose(self.evaluate(sync_bn.moving_mean), self.evaluate(bn.moving_mean))
self.assertAllClose(self.evaluate(sync_bn.moving_variance), self.evaluate(bn.moving_variance))
def test_horovod_syncbn_cpu(self):
"""Test that the SyncBatchNormalization implementation is correct on CPU."""
hvd.init()
with tf.device("/cpu:0"):
x_list = [
tf.convert_to_tensor(np.stack([
np.array([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
], dtype=np.float32)
for r in range(hvd.size())
]), np.float32),
tf.convert_to_tensor(np.stack([
np.array([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
], dtype=np.float32)
for r in range(hvd.size())
]), np.float32),
]
for x in x_list:
bn = tf.keras.layers.BatchNormalization(axis=1, fused=False)
sync_bn = hvd.SyncBatchNormalization(axis=1)
bn_func = bn.apply(x, training=True)
sync_bn_func = sync_bn.apply(tf.expand_dims(x[hvd.rank()], 0), training=True)
try:
init = tf.global_variables_initializer()
except AttributeError:
init = tf.compat.v1.global_variables_initializer()
self.evaluate(init)
bn_out = self.evaluate(bn_func)
sync_bn_out = self.evaluate(sync_bn_func)
self.assertAllClose(sync_bn_out, np.expand_dims(bn_out[hvd.rank()], 0))
self.assertAllClose(self.evaluate(sync_bn.moving_mean), self.evaluate(bn.moving_mean))
self.assertAllClose(self.evaluate(sync_bn.moving_variance), self.evaluate(bn.moving_variance))
from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes
run_all_in_graph_and_eager_modes(TensorFlowTests)
if __name__ == '__main__':
tf.test.main()
|
[] |
[] |
[
"HOROVOD_MIXED_INSTALL",
"HOROVOD_SIZE",
"HOROVOD_RANK"
] |
[]
|
["HOROVOD_MIXED_INSTALL", "HOROVOD_SIZE", "HOROVOD_RANK"]
|
python
| 3 | 0 | |
cmd/duckeye/main.go
|
package main
import (
"log"
"net/http"
"os"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/signals"
"github.com/pierdipi/duckeye/pkg/clusterducktype"
discoveryclusterducktype "knative.dev/discovery/pkg/client/injection/informers/discovery/v1alpha1/clusterducktype"
)
func main() {
ctx := signals.NewContext()
config := sharedmain.ParseAndGetConfigOrDie()
ctx, informers := injection.Default.SetupInformers(ctx, config)
if err := controller.StartInformers(ctx.Done(), informers...); err != nil {
log.Fatal("Failed to start informers", err)
}
clusterDuckTypeHandler := clusterducktype.New(discoveryclusterducktype.Get(ctx).Lister())
s := http.NewServeMux()
// Serve website
s.Handle("/", http.FileServer(http.Dir("/var/run/ko/")))
// Serve ClusterDuckType
s.Handle("/clusterducktypes", clusterDuckTypeHandler)
s.Handle("/clusterducktypes/", clusterDuckTypeHandler)
if err := http.ListenAndServe(":"+os.Getenv("PORT"), s); err != nil {
log.Fatal(err)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# elasticsearch-lua documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 00:42:41 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'elasticsearch-lua'
copyright = u'2016, Dhaval Kapil'
author = u'Dhaval Kapil'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
app.add_stylesheet('css/custom.css')
else:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/custom.css',
],
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'elasticsearch-lua valpha'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'elasticsearch-luadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'elasticsearch-lua.tex', u'elasticsearch-lua Documentation',
u'Dhaval Kapil', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'elasticsearch-lua', u'elasticsearch-lua Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'elasticsearch-lua', u'elasticsearch-lua Documentation',
author, 'elasticsearch-lua', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
backend/pasatiempos_29783/wsgi.py
|
"""
WSGI config for pasatiempos_29783 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pasatiempos_29783.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/util/goleak_util_test.go
|
package util
import (
"os"
"testing"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
if os.Getenv("CORTEX_TEST_GOLEAK") == "1" {
goleak.VerifyTestMain(m)
}
}
|
[
"\"CORTEX_TEST_GOLEAK\""
] |
[] |
[
"CORTEX_TEST_GOLEAK"
] |
[]
|
["CORTEX_TEST_GOLEAK"]
|
go
| 1 | 0 | |
keras_retinanet/bin/evaluate.py
|
#!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
import numpy as np
import yaml
import keras
import tensorflow as tf
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin # noqa: F401
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from keras_retinanet import models
from keras_retinanet.preprocessing.csv_generator import CSVGenerator
from keras_retinanet.preprocessing.pascal_voc import PascalVocGenerator
from keras_retinanet.utils.eval import evaluate
from keras_retinanet.utils.keras_version import check_keras_version
from keras_retinanet.models.retinanet import AnchorParameters
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
#def get_absolute_name(name,prefix):
# return name if os.path.exists(name) else os.path.join(prefix,name)
def get_anchors_params(anchors_in=None):
if anchors_in:
anchors_in = open(anchors_in,'r')
anchors_params = yaml.load(anchors_in)
anchors_params.update(ratios=np.array(anchors_params['ratios'],keras.backend.floatx()))
anchors_params.update(scales=np.array(anchors_params['scales'],keras.backend.floatx()))
else:
#just use the default params.
anchors_params = {'sizes':AnchorParameters.default.sizes,
'ratios':AnchorParameters.default.ratios,
'scales':AnchorParameters.default.scales,
'strides':AnchorParameters.default.strides}
return anchors_params
def create_generator(args):
""" Create generators for evaluation.
"""
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
elif args.dataset_type == 'pascal':
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
elif args.dataset_type == 'csv':
validation_generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return validation_generator
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
parser.add_argument('model', help='Path to RetinaNet model.')
parser.add_argument("--convert-model", help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')
parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float)
parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)
parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)
parser.add_argument('--save-path', help='Path for saving images with detections (doesn\'t work for COCO).')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
parser.add_argument('--anchors', help='Load anchors parameters by a yaml file.',default=None)
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# make save path if it doesn't exist
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
if not args.anchors:
#automatically search the snapshot path for anchors configure
#if it doesn't exist, then default anchors paramaters are assumed.
anchors_path = os.path.join(os.path.dirname(args.model),"anchors.yaml")
anchors_path = anchors_path if os.path.exists(anchors_path) else None
else:
anchors_path = args.anchors
anchors_dict = get_anchors_params(anchors_path)
anchors_params = AnchorParameters(**anchors_dict)
# create the generator
#(It's ok not to update anchors args, as we only use the generator for load images and annotations.)
generator = create_generator(args)
# load the model
print('Loading model, this may take a second...')
model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model,anchor_parameters = anchors_params)
# print model summary
# print(model.summary())
# start evaluation
if args.dataset_type == 'coco':
from ..utils.coco_eval import evaluate_coco
evaluate_coco(generator, model, args.score_threshold)
else:
average_precisions = evaluate(
generator,
model,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
max_detections=args.max_detections,
save_path=args.save_path
)
# print evaluation
present_classes = 0
precision = 0
for label, (average_precision, num_annotations) in average_precisions.items():
print('{:.0f} instances of class'.format(num_annotations),
generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
if num_annotations > 0:
present_classes += 1
precision += average_precision
print('mAP: {:.4f}'.format(precision / present_classes))
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
Project/src/code.myceliUs.com/CodeGen/Generator.go
|
package main
import (
"encoding/xml"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"code.myceliUs.com/Utility"
"code.myceliUs.com/XML_Schemas"
)
/**
* Read the content of a CMOF file.
*/
func ReadCMOF(reader io.Reader) (*XML_Schemas.CMOF_Document, error) {
var doc *XML_Schemas.CMOF_Document
doc = new(XML_Schemas.CMOF_Document)
if err := xml.NewDecoder(reader).Decode(doc); err != nil {
return nil, err
}
return doc, nil
}
/**
* Load the content into various maps (CMOF.go)
*/
func loadCMOF(inputPath string) {
cmofFilePath, err := filepath.Abs(inputPath)
if err != nil {
log.Println(err)
os.Exit(1)
}
// Open the straps.xml file
file, err := os.Open(cmofFilePath)
if err != nil {
log.Println(err)
os.Exit(1)
}
defer file.Close()
// Read the straps file
cmofDocument, err := ReadCMOF(file)
if err != nil {
log.Println(err)
os.Exit(1)
}
loadDocument(cmofDocument)
}
/**
* Read the content of XSD file.
*/
func ReadXSD(reader io.Reader) (*XML_Schemas.XSD_Schema, error) {
var doc *XML_Schemas.XSD_Schema
doc = new(XML_Schemas.XSD_Schema)
if err := xml.NewDecoder(reader).Decode(doc); err != nil {
return nil, err
}
return doc, nil
}
/**
* Load the content of the file into various maps (XSD.go)
*/
func loadXSD(inputPath string) {
xsdFilePath, err := filepath.Abs(inputPath)
if err != nil {
log.Println(err)
os.Exit(1)
}
// Open the straps.xml file
file, err := os.Open(xsdFilePath)
if err != nil {
log.Println(err)
os.Exit(1)
}
defer file.Close()
// Read the straps file
xsdDocument, err := ReadXSD(file)
if err != nil {
log.Println(err)
os.Exit(1)
}
loadSchema(xsdDocument)
}
var (
outputPath string
)
func WriteClassFile(outputPath, packName string, className string, classCode string) {
goPath := os.Getenv("GOPATH")
classCode = "// +build " + packName + "\n\n" + classCode
path := goPath + "/src/" + outputPath + packName
path = strings.Replace(path, "/", string(os.PathSeparator), -1)
path = strings.Replace(path, "\\", string(os.PathSeparator), -1)
if !strings.HasSuffix(path, string(os.PathSeparator)) {
path += string(os.PathSeparator)
}
if !Utility.Exists(path) {
os.MkdirAll(path, 0777)
}
log.Println("Create go file for class: ", path+className+".go")
if !Utility.Exists(path + className + ".go") {
err := ioutil.WriteFile(path+className+".go", []byte(classCode), 0x777)
if err != nil {
panic(err)
}
}
}
func generateCode(packName string) {
// The interface...
generateGoInterfacesCode(packName)
// The class
generateGoClassCode(packName)
// The enumeration
generateGoEnumerationCode(packName)
}
func initMaps() {
// CMOF maps
classesMap = make(map[string]*XML_Schemas.CMOF_OwnedMember)
enumMap = make(map[string]*XML_Schemas.CMOF_OwnedMember)
packagesMembers = make(map[string][]string)
membersPackage = make(map[string]string)
superClassMembers = make(map[string]map[string]*XML_Schemas.CMOF_OwnedMember)
associationsSrcMap = make(map[string]*XML_Schemas.CMOF_OwnedMember)
associationsDstMap = make(map[string][]string)
cmofPrimitiveTypesMap = make(map[string]string)
superClassesLst = make([]string, 0)
abstractClassLst = make([]string, 0)
compositionEntityCreatorMap = make(map[string][]string)
// XSD MAPS
elementsNameMap = make(map[string]*XML_Schemas.XSD_Element)
elementsTypeMap = make(map[string]*XML_Schemas.XSD_Element)
complexTypesMap = make(map[string]*XML_Schemas.XSD_ComplexType)
simpleTypesMap = make(map[string]*XML_Schemas.XSD_SimpleType)
xsdPrimitiveTypesMap = make(map[string]string)
substitutionGroupMap = make(map[string][]string)
aliasElementType = make(map[string]string)
// Factory
factoryImports = make([]string, 0)
// That map will contain initialisation function for a given data type
initFunctions = make(map[string]string)
serializationFunctions = make(map[string]string)
}
/**
* Genrate the code for the files in the given path.
*/
func generate(inputPath string, packName string, xmlRootElementName string) {
// Clear the maps.
initMaps()
// Set the output...
outputPath = "code.myceliUs.com/CargoWebServer/Cargo/Entities/"
path := inputPath + "\\" + packName
path = strings.Replace(path, "/", string(os.PathSeparator), -1)
path = strings.Replace(path, "\\", string(os.PathSeparator), -1)
loadCMOF(path + ".cmof")
loadXSD(path + ".xsd")
// generate the code
generateCode(packName)
// generate the factory if there a list of entities.
if len(xmlRootElementName) > 0 {
generateGoXmlFactory(xmlRootElementName, "Entities", outputPath, packName)
}
// generate the entity.
generateEntity(packName)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
src/main/java/com/revature/util/DBConnector.java
|
package com.revature.util;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
public class DBConnector {
private static Connection conn = null;
private DBConnector() {
super();
}
public static Connection getConnection() {
try {
if(conn != null && !conn.isClosed()) {
return conn;
}
} catch(SQLException e) {
e.printStackTrace();
System.out.println("FAILED TO REUSE A CONNECTION");
return null;
}
String url = System.getenv("DB_URL");
String username = System.getenv("DB_USERNAME");
String password = System.getenv("DB_PASSWORD");
try {
conn = DriverManager.getConnection(url, username, password);
} catch(SQLException e) {
e.printStackTrace();
System.out.println("FAILED TO GET A CONNECTION!");
return null;
}
return conn;
}
}
|
[
"\"DB_URL\"",
"\"DB_USERNAME\"",
"\"DB_PASSWORD\""
] |
[] |
[
"DB_USERNAME",
"DB_PASSWORD",
"DB_URL"
] |
[]
|
["DB_USERNAME", "DB_PASSWORD", "DB_URL"]
|
java
| 3 | 0 | |
Tests/configure_and_test_integration_instances.py
|
from __future__ import print_function
import argparse
import ast
import json
import logging
import os
import subprocess
import sys
import uuid
import zipfile
from datetime import datetime
from distutils.version import LooseVersion
from enum import IntEnum
from pprint import pformat
from threading import Thread
from time import sleep
from typing import List, Tuple
from urllib.parse import quote_plus
import demisto_client
from demisto_sdk.commands.test_content.constants import SSH_USER
from ruamel import yaml
from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \
upload_zipped_packs, install_all_content_packs_for_nightly
from Tests.scripts.utils.log_util import install_logging
from Tests.test_content import extract_filtered_tests, get_server_numeric_version
from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations
from Tests.tools import run_with_proxy_configured
from Tests.update_content_data import update_content
from demisto_sdk.commands.common.constants import FileType
from demisto_sdk.commands.common.tools import run_threads_list, run_command, get_yaml, \
str2bool, format_version, find_type
from demisto_sdk.commands.test_content.mock_server import MITMProxy, run_with_mock, RESULT
from demisto_sdk.commands.test_content.tools import update_server_configuration, is_redhat_instance
from demisto_sdk.commands.validate.validate_manager import ValidateManager
MARKET_PLACE_MACHINES = ('master',)
SKIPPED_PACKS = ['NonSupported', 'ApiModules']
NO_PROXY = ','.join([
'oproxy.demisto.ninja',
'oproxy-dev.demisto.ninja',
])
NO_PROXY_CONFIG = {'python.pass.extra.keys': f'--env##no_proxy={NO_PROXY}'} # noqa: E501
DOCKER_HARDENING_CONFIGURATION = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': f'--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192##--env##no_proxy={NO_PROXY}', # noqa: E501
'powershell.pass.extra.keys': f'--env##no_proxy={NO_PROXY}',
}
DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN = {
'docker.run.internal.asuser': 'true'
}
MARKET_PLACE_CONFIGURATION = {
'content.pack.verify': 'false',
'marketplace.initial.sync.delay': '0',
'content.pack.ignore.missing.warnings.contentpack': 'true'
}
AVOID_DOCKER_IMAGE_VALIDATION = {
'content.validate.docker.images': 'false'
}
ID_SET_PATH = './artifacts/id_set.json'
class Running(IntEnum):
CI_RUN = 0
WITH_OTHER_SERVER = 1
WITH_LOCAL_SERVER = 2
class Server:
def __init__(self, internal_ip, port, user_name, password):
self.__ssh_client = None
self.__client = None
self.internal_ip = internal_ip
self.ssh_tunnel_port = port
self.user_name = user_name
self.password = password
def __str__(self):
return self.internal_ip
@property
def client(self):
if self.__client is None:
self.__client = self.reconnect_client()
return self.__client
def reconnect_client(self):
self.__client = demisto_client.configure(f'https://localhost:{self.ssh_tunnel_port}',
verify_ssl=False,
username=self.user_name,
password=self.password)
return self.__client
def add_server_configuration(self, config_dict, error_msg, restart=False):
update_server_configuration(self.client, config_dict, error_msg)
if restart:
self.exec_command('sudo systemctl restart demisto')
def exec_command(self, command):
subprocess.check_output(f'ssh {SSH_USER}@{self.internal_ip} {command}'.split(),
stderr=subprocess.STDOUT)
def get_id_set(id_set_path) -> dict:
"""
Used to collect the ID set so it can be passed to the Build class on init.
:return: ID set as a dict if it exists.
"""
if os.path.isfile(id_set_path):
return get_json_file(id_set_path)
class Build:
# START CHANGE ON LOCAL RUN #
content_path = f'{os.getenv("HOME")}/project' if os.getenv('CIRCLECI') else os.getenv('CI_PROJECT_DIR')
test_pack_target = f'{os.getenv("HOME")}/project/Tests' if os.getenv('CIRCLECI') else f'{os.getenv("CI_PROJECT_DIR")}/Tests' # noqa
key_file_path = 'Use in case of running with non local server'
run_environment = Running.CI_RUN
env_results_path = f'{os.getenv("ARTIFACTS_FOLDER")}/env_results.json'
DEFAULT_SERVER_VERSION = '99.99.98'
# END CHANGE ON LOCAL RUN #
def __init__(self, options):
self._proxy = None
self.git_sha1 = options.git_sha1
self.branch_name = options.branch
self.ci_build_number = options.build_number
self.is_nightly = options.is_nightly
self.ami_env = options.ami_env
self.server_to_port_mapping, self.server_numeric_version = self.get_servers(options.ami_env)
self.secret_conf = get_json_file(options.secret)
self.username = options.user if options.user else self.secret_conf.get('username')
self.password = options.password if options.password else self.secret_conf.get('userPassword')
self.servers = [Server(internal_ip,
port,
self.username,
self.password) for internal_ip, port in self.server_to_port_mapping.items()]
self.is_private = options.is_private
conf = get_json_file(options.conf)
self.tests = conf['tests']
self.skipped_integrations_conf = conf['skipped_integrations']
self.unmockable_integrations = conf['unmockable_integrations']
id_set_path = options.id_set_path if options.id_set_path else ID_SET_PATH
self.id_set = get_id_set(id_set_path)
self.test_pack_path = options.test_pack_path if options.test_pack_path else None
self.tests_to_run = self.fetch_tests_list(options.tests_to_run)
self.content_root = options.content_root
self.pack_ids_to_install = self.fetch_pack_ids_to_install(options.pack_ids_to_install)
self.service_account = options.service_account
@property
def proxy(self) -> MITMProxy:
"""
A property method that should create and return a single proxy instance through out the build
Returns:
The single proxy instance that should be used in this build.
"""
if not self._proxy:
self._proxy = MITMProxy(self.servers[0].internal_ip,
logging_module=logging,
build_number=self.ci_build_number,
branch_name=self.branch_name)
return self._proxy
@staticmethod
def fetch_tests_list(tests_to_run_path: str):
"""
Fetches the test list from the filter.
:param tests_to_run_path: Path to location of test filter.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = []
with open(tests_to_run_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def fetch_pack_ids_to_install(packs_to_install_path: str):
"""
Fetches the test list from the filter.
:param packs_to_install_path: Path to location of pack IDs to install file.
:return: List of Pack IDs if there are any, otherwise empty list.
"""
tests_to_run = []
with open(packs_to_install_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def get_servers(ami_env):
env_conf = get_env_conf()
server_to_port_mapping = map_server_to_port(env_conf, ami_env)
if Build.run_environment == Running.CI_RUN:
server_numeric_version = get_server_numeric_version(ami_env)
else:
server_numeric_version = Build.DEFAULT_SERVER_VERSION
return server_to_port_mapping, server_numeric_version
def options_handler():
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('-u', '--user', help='The username for the login', required=True)
parser.add_argument('-p', '--password', help='The password for the login', required=True)
parser.add_argument('--ami_env', help='The AMI environment for the current run. Options are '
'"Server Master", "Server 6.0". '
'The server url is determined by the AMI environment.')
parser.add_argument('-g', '--git_sha1', help='commit sha1 to compare changes with')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-s', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--is-nightly', type=str2bool, help='Is nightly build')
parser.add_argument('-pr', '--is_private', type=str2bool, help='Is private build')
parser.add_argument('--branch', help='GitHub branch name', required=True)
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
parser.add_argument('--test_pack_path', help='Path to where the test pack will be saved.',
default='/home/runner/work/content-private/content-private/content/artifacts/packs')
parser.add_argument('--content_root', help='Path to the content root.',
default='/home/runner/work/content-private/content-private/content')
parser.add_argument('--id_set_path', help='Path to the ID set.')
parser.add_argument('-l', '--tests_to_run', help='Path to the Test Filter.',
default='./artifacts/filter_file.txt')
parser.add_argument('-pl', '--pack_ids_to_install', help='Path to the packs to install file.',
default='./artifacts/content_packs_to_install.txt')
# disable-secrets-detection-start
parser.add_argument('-sa', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
# disable-secrets-detection-end
options = parser.parse_args()
return options
def check_test_version_compatible_with_server(test, server_version):
"""
Checks if a given test is compatible wis the given server version.
Arguments:
test: (dict)
Test playbook object from content conf.json. May contain the following fields: "playbookID",
"integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion.
server_version: (int)
The server numerical version.
Returns:
(bool) True if test is compatible with server version or False otherwise.
"""
test_from_version = format_version(test.get('fromversion', '0.0.0'))
test_to_version = format_version(test.get('toversion', '99.99.99'))
server_version = format_version(server_version)
if not (LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version)):
playbook_id = test.get('playbookID')
logging.debug(
f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch '
f'(test versions: {test_from_version}-{test_to_version}, server version: {server_version})')
return False
return True
def filter_tests_with_incompatible_version(tests, server_version):
"""
Filter all tests with incompatible version to the given server.
Arguments:
tests: (list)
List of test objects.
server_version: (int)
The server numerical version.
Returns:
(lst): List of filtered tests (compatible version)
"""
filtered_tests = [test for test in tests if
check_test_version_compatible_with_server(test, server_version)]
return filtered_tests
def configure_integration_instance(integration, client, placeholders_map):
"""
Configure an instance for an integration
Arguments:
integration: (dict)
Integration object whose params key-values are set
client: (demisto_client)
The client to connect to
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
Returns:
(dict): Configured integration instance
"""
integration_name = integration.get('name')
logging.info(f'Configuring instance for integration "{integration_name}"')
integration_instance_name = integration.get('instance_name', '')
integration_params = change_placeholders_to_values(placeholders_map, integration.get('params'))
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
integration_configuration = __get_integration_config(client, integration_name)
if not integration_configuration:
return None
# In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
if not validate_test:
logging.debug(f'Skipping configuration for integration: {integration_name} (it has test_validate set to false)')
return None
module_instance = set_integration_instance_parameters(integration_configuration, integration_params,
integration_instance_name, is_byoi, client)
return module_instance
def filepath_to_integration_name(integration_file_path):
"""Load an integration file and return the integration name.
Args:
integration_file_path (str): The path to an integration yml file.
Returns:
(str): The name of the integration.
"""
integration_yaml = get_yaml(integration_file_path)
integration_name = integration_yaml.get('name')
return integration_name
def get_integration_names_from_files(integration_files_list):
integration_names_list = [filepath_to_integration_name(path) for path in integration_files_list]
return [name for name in integration_names_list if name] # remove empty values
def get_new_and_modified_integration_files(branch_name):
"""Return 2 lists - list of new integrations and list of modified integrations since the first commit of the branch.
Args:
branch_name: The branch name against which we will run the 'git diff' command.
Returns:
(tuple): Returns a tuple of two lists, the file paths of the new integrations and modified integrations.
"""
# get changed yaml files (filter only added and modified files)
file_validator = ValidateManager(skip_dependencies=True)
file_validator.branch_name = branch_name
modified_files, added_files, _, _ = file_validator.get_changed_files_from_git()
new_integration_files = [
file_path for file_path in added_files if
find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
modified_integration_files = [
file_path for file_path in modified_files if
isinstance(file_path, str) and find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
return new_integration_files, modified_integration_files
def is_content_update_in_progress(client):
"""Make request to check if content is updating.
Args:
client (demisto_client): The configured client to use.
Returns:
(str): Returns the request response data which is 'true' if updating and 'false' if not.
"""
host = client.api_client.configuration.host
logging.debug(f'Making "Get" request to server - "{host}" to check if content is installing.')
# make request to check if content is updating
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/content/updating',
method='GET', accept='application/json')
if status_code >= 300 or status_code < 200:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
logging.error(f"Failed to check if content is installing - with status code {status_code}\n{message}")
return 'request unsuccessful'
return response_data
def get_content_version_details(client, ami_name):
"""Make request for details about the content installed on the demisto instance.
Args:
client (demisto_client): The configured client to use.
ami_name (string): the role name of the machine
Returns:
(tuple): The release version and asset ID of the content installed on the demisto instance.
"""
host = client.api_client.configuration.host
logging.info(f'Making "POST" request to server - "{host}" to check installed content.')
# make request to installed content details
uri = '/content/installedlegacy' if ami_name in MARKET_PLACE_MACHINES else '/content/installed'
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path=uri,
method='POST')
try:
result_object = ast.literal_eval(response_data)
logging.debug(f'Response was {response_data}')
except ValueError:
logging.exception('failed to parse response from demisto.')
return '', 0
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to check if installed content details - with status code {status_code}\n{message}')
return result_object.get('release', ''), result_object.get('assetId', 0)
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, str(value))
return json.loads(item_as_string)
def set_integration_params(build,
integrations,
secret_params,
instance_names,
placeholders_map,
logging_module=logging):
"""
For each integration object, fill in the parameter values needed to configure an instance from
the secret_params taken from our secret configuration file. Because there may be a number of
configurations for a single integration (if there are values provided in our secret conf for
multiple different instances of the same integration) then selects the parameter values for the
configuration of the instance whose instance is in 'instance_names' (will take the last one listed
in 'secret_params'). Note that this function does not explicitly return the modified 'integrations'
object but rather it modifies the 'integrations' object since it is passed by reference and not by
value, so the 'integrations' object that was passed to this function will have been changed once
this function has completed execution and gone out of scope.
Arguments:
build: Build object
integrations: (list of dicts)
List of integration objects whose 'params' attribute will be populated in this function.
secret_params: (list of dicts)
List of secret configuration values for all of our integrations (as well as specific
instances of said integrations).
instance_names: (list)
The names of particular instances of an integration to use the secret_params of as the
configuration values.
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
logging_module (Union[ParallelLoggingManager,logging]): The logging module to use
Returns:
(bool): True if integrations params were filled with secret configuration values, otherwise false
"""
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
# if there are more than one integration params, it means that there are configuration
# values in our secret conf for multiple instances of the given integration and now we
# need to match the configuration values to the proper instance as specified in the
# 'instance_names' list argument
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
failed_match_instance_msg = 'There are {} instances of {}, please select one of them by using' \
' the instance_name argument in conf.json. The options are:\n{}'
logging_module.error(failed_match_instance_msg.format(len(integration_params),
integration['name'],
'\n'.join(optional_instance_names)))
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
if integration['name'] not in build.unmockable_integrations:
integration['params'].update({'proxy': True})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=True')
else:
integration['params'].update({'proxy': False})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=False')
return True
def set_module_params(param_conf, integration_params):
"""Configure a parameter object for use in a module instance.
Each integration parameter is actually an object with many fields that together describe it. E.g. a given
parameter will have all of the following fields - "name", "display", "value", "hasvalue", "defaultValue",
etc. This function fills the "value" field for a parameter configuration object and returns it for use in
a module instance.
Args:
param_conf (dict): The parameter configuration object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
Returns:
(dict): The configured parameter object
"""
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# if the parameter doesn't have a value provided in the integration's configuration values
# but does have a default value then assign it to the parameter for the module instance
param_conf['value'] = param_conf['defaultValue']
return param_conf
def __set_server_keys(client, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging.info(f'Setting server keys for integration: {integration_name}')
data = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
update_server_configuration(
client=client,
server_configuration=data,
error_msg='Failed to set server keys'
)
def set_integration_instance_parameters(integration_configuration,
integration_params,
integration_instance_name,
is_byoi,
client):
"""Set integration module values for integration instance creation
The integration_configuration and integration_params should match, in that
they are for the same integration
Arguments:
integration_configuration: (dict)
dictionary of the integration configuration parameters/keys that need
filling to instantiate an instance of a given integration
integration_params: (dict)
values for a given integration taken from the configuration file in
which the secret values are stored to configure instances of various
integrations
integration_instance_name: (str)
The name of the integration instance being configured if there is one
provided in the conf.json
is_byoi: (bool)
If the integration is byoi or not
client: (demisto_client)
The client to connect to
Returns:
(dict): The configured module instance to send to the Demisto server for
instantiation.
"""
module_configuration = integration_configuration.get('configuration', {})
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
else:
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'), str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': integration_configuration['name'],
'category': integration_configuration['category'],
'configuration': integration_configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(client, integration_params, integration_configuration['name'])
# set module params
for param_conf in module_configuration:
configured_param = set_module_params(param_conf, integration_params)
module_instance['data'].append(configured_param)
return module_instance
def group_integrations(integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names):
"""
Filter integrations into their respective lists - new, modified or unchanged. if it's on the skip list, then
skip if random tests were chosen then we may be configuring integrations that are neither new or modified.
Args:
integrations (list): The integrations to categorize.
skipped_integrations_conf (dict): Integrations that are on the skip list.
new_integrations_names (list): The names of new integrations.
modified_integrations_names (list): The names of modified integrations.
Returns:
(tuple): Lists of integrations objects as well as an Integration-to-Status dictionary useful for logs.
"""
new_integrations = []
modified_integrations = []
unchanged_integrations = []
integration_to_status = {}
for integration in integrations:
integration_name = integration.get('name', '')
if integration_name in skipped_integrations_conf.keys():
continue
if integration_name in new_integrations_names:
new_integrations.append(integration)
elif integration_name in modified_integrations_names:
modified_integrations.append(integration)
integration_to_status[integration_name] = 'Modified Integration'
else:
unchanged_integrations.append(integration)
integration_to_status[integration_name] = 'Unchanged Integration'
return new_integrations, modified_integrations, unchanged_integrations, integration_to_status
def get_integrations_for_test(test, skipped_integrations_conf):
"""Return a list of integration objects that are necessary for a test (excluding integrations on the skip list).
Args:
test (dict): Test dictionary from the conf.json file containing the playbookID, integrations and
instance names.
skipped_integrations_conf (dict): Skipped integrations dictionary with integration names as keys and
the skip reason as values.
Returns:
(list): List of integration objects to configure.
"""
integrations_conf = test.get('integrations', [])
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf]
integrations = [
{'name': integration, 'params': {}} for
integration in integrations_conf if integration not in skipped_integrations_conf
]
return integrations
def update_content_on_demisto_instance(client, server, ami_name):
"""Try to update the content
Args:
client (demisto_client): The configured client to use.
server (str): The server url to pass to Tests/update_content_data.py
"""
content_zip_path = 'artifacts/all_content.zip'
update_content(content_zip_path, server=server, client=client)
# Check if content update has finished installing
sleep_interval = 20
updating_content = is_content_update_in_progress(client)
while updating_content.lower() == 'true':
sleep(sleep_interval)
updating_content = is_content_update_in_progress(client)
if updating_content.lower() == 'request unsuccessful':
# since the request to check if content update installation finished didn't work, can't use that mechanism
# to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
logging.debug('Request to install content was unsuccessful, sleeping for 30 seconds and retrying')
sleep(30)
else:
# check that the content installation updated
# verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
release, asset_id = get_content_version_details(client, ami_name)
logging.info(f'Content Release Version: {release}')
with open('./artifacts/content-descriptor.json', 'r') as cd_file:
cd_json = json.loads(cd_file.read())
cd_release = cd_json.get('release')
cd_asset_id = cd_json.get('assetId')
if release == cd_release and asset_id == cd_asset_id:
logging.success(f'Content Update Successfully Installed on server {server}.')
else:
logging.error(
f'Content Update to version: {release} was Unsuccessful:\nAttempted to install content with release '
f'"{cd_release}" and assetId "{cd_asset_id}" but release "{release}" and assetId "{asset_id}" '
f'were retrieved from the instance post installation.')
if ami_name not in MARKET_PLACE_MACHINES:
os._exit(1)
def report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, build=None):
"""Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.
Args:
preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
preupdate_success (set): List of tuples of integrations that succeeded the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_success (set): List of tuples of integrations that succeeded the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
new_integrations_names (list): List of the names of integrations that are new since the last official
content release and that will only be present on the demisto instance after the content update is
performed.
build: Build object
Returns:
(bool): False if there were integration instances that succeeded prior to the content update and then
failed after content was updated, otherwise True.
"""
testing_status = True
# a "Test" can be either successful both before and after content update(succeeded_pre_and_post variable),
# fail on one of them(mismatched_statuses variable), or on both(failed_pre_and_post variable)
succeeded_pre_and_post = preupdate_success.intersection(postupdate_success)
if succeeded_pre_and_post:
succeeded_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in succeeded_pre_and_post])
logging.success(
'Integration instances that had ("Test" Button) succeeded both before and after the content update:\n'
f'{succeeded_pre_and_post_string}')
failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
mismatched_statuses = postupdate_fails - preupdate_fails
failed_only_after_update = []
failed_but_is_new = []
for instance_name, integration_of_instance in mismatched_statuses:
if integration_of_instance in new_integrations_names:
failed_but_is_new.append((instance_name, integration_of_instance))
else:
failed_only_after_update.append((instance_name, integration_of_instance))
# warnings but won't fail the build step
if failed_but_is_new:
failed_but_is_new_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_but_is_new])
logging.warning(f'New Integrations ("Test" Button) Failures:\n{failed_but_is_new_string}')
if failed_pre_and_post:
failed_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_pre_and_post])
logging.warning(f'Integration instances that had ("Test" Button) failures '
f'both before and after the content update:\n{pformat(failed_pre_and_post_string)}')
# fail the step if there are instances that only failed after content was updated
if failed_only_after_update:
failed_only_after_update_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in failed_only_after_update])
testing_status = False
logging.critical('Integration instances that had ("Test" Button) failures only after content was updated:\n'
f'{pformat(failed_only_after_update_string)}.\n'
f'This indicates that your updates introduced breaking changes to the integration.')
else:
# creating this file to indicates that this instance passed post update tests
if build:
with open("./Tests/is_post_update_passed_{}.txt".format(build.ami_env.replace(' ', '')), 'a'):
pass
return testing_status
def get_env_conf():
if Build.run_environment == Running.CI_RUN:
return get_json_file(Build.env_results_path)
elif Build.run_environment == Running.WITH_LOCAL_SERVER:
# START CHANGE ON LOCAL RUN #
return [{
"InstanceDNS": "http://localhost:8080",
"Role": "Server Master" # e.g. 'Server Master'
}]
elif Build.run_environment == Running.WITH_OTHER_SERVER:
return [{
"InstanceDNS": "DNS NANE", # without http prefix
"Role": "DEMISTO EVN" # e.g. 'Server Master'
}]
# END CHANGE ON LOCAL RUN #
def map_server_to_port(env_results, instance_role):
"""
Arguments:
env_results: (dict)
env_results.json in server
instance_role: (str)
The amazon machine image environment whose IP we should connect to.
Returns:
(lst): The server url list to connect to
"""
ip_to_port_map = {env.get('InstanceDNS'): env.get('TunnelPort') for env in env_results if
instance_role in env.get('Role', '')}
return ip_to_port_map
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def configure_servers_and_restart(build):
manual_restart = Build.run_environment == Running.WITH_LOCAL_SERVER
for server in build.servers:
configurations = dict()
configure_types = []
if is_redhat_instance(server.internal_ip):
configurations.update(DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN)
configurations.update(NO_PROXY_CONFIG)
configurations['python.pass.extra.keys'] += "##--network=slirp4netns:cidr=192.168.0.0/16"
else:
configurations.update(DOCKER_HARDENING_CONFIGURATION)
configure_types.append('docker hardening')
configure_types.append('marketplace')
configurations.update(MARKET_PLACE_CONFIGURATION)
error_msg = 'failed to set {} configurations'.format(' and '.join(configure_types))
server.add_server_configuration(configurations, error_msg=error_msg, restart=not manual_restart)
if manual_restart:
input('restart your server and then press enter.')
else:
logging.info('Done restarting servers. Sleeping for 1 minute')
sleep(60)
def get_tests(build: Build) -> List[str]:
"""
Selects the tests from that should be run in this execution and filters those that cannot run in this server version
Args:
build: Build object
Returns:
Test configurations from conf.json that should be run in this execution
"""
server_numeric_version: str = build.server_numeric_version
tests: dict = build.tests
if Build.run_environment == Running.CI_RUN:
filtered_tests = extract_filtered_tests()
if build.is_nightly:
# skip test button testing
logging.debug('Not running instance tests in nightly flow')
tests_for_iteration = []
elif filtered_tests:
tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests]
else:
tests_for_iteration = tests
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version)
return tests_for_iteration
else:
# START CHANGE ON LOCAL RUN #
return [
{
"playbookID": "Docker Hardening Test",
"fromversion": "5.0.0"
},
{
"integrations": "SplunkPy",
"playbookID": "SplunkPy-Test-V2",
"memory_threshold": 500,
"instance_names": "use_default_handler"
}
]
# END CHANGE ON LOCAL RUN #
def get_changed_integrations(build: Build) -> tuple:
"""
Return 2 lists - list of new integrations and list of modified integrations since the commit of the git_sha1.
Args:
build: the build object
Returns:
list of new integrations and list of modified integrations
"""
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(
build.branch_name) if not build.is_private else ([], [])
new_integrations_names, modified_integrations_names = [], []
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
logging.debug(f'New Integrations Since Last Release:\n{new_integrations_names}')
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
logging.debug(f'Updated Integrations Since Last Release:\n{modified_integrations_names}')
return new_integrations_names, modified_integrations_names
def get_pack_ids_to_install():
if Build.run_environment == Running.CI_RUN:
with open('./artifacts/content_packs_to_install.txt', 'r') as packs_stream:
pack_ids = packs_stream.readlines()
return [pack_id.rstrip('\n') for pack_id in pack_ids]
else:
# START CHANGE ON LOCAL RUN #
return [
'SplunkPy'
]
# END CHANGE ON LOCAL RUN #
def nightly_install_packs(build, install_method=None, pack_path=None, service_account=None):
threads_list = []
if not install_method:
raise Exception('Install method was not provided.')
# For each server url we install pack/ packs
for thread_index, server in enumerate(build.servers):
kwargs = {'client': server.client, 'host': server.internal_ip}
if service_account:
kwargs['service_account'] = service_account
if pack_path:
kwargs['pack_path'] = pack_path
threads_list.append(Thread(target=install_method, kwargs=kwargs))
run_threads_list(threads_list)
def install_nightly_pack(build):
nightly_install_packs(build, install_method=install_all_content_packs_for_nightly,
service_account=build.service_account)
create_nightly_test_pack()
nightly_install_packs(build, install_method=upload_zipped_packs,
pack_path=f'{Build.test_pack_target}/test_pack.zip')
logging.info('Sleeping for 45 seconds while installing nightly packs')
sleep(45)
def install_packs(build, pack_ids=None):
pack_ids = get_pack_ids_to_install() if pack_ids is None else pack_ids
installed_content_packs_successfully = True
for server in build.servers:
try:
_, flag = search_and_install_packs_and_their_dependencies(pack_ids, server.client)
if not flag:
raise Exception('Failed to search and install packs.')
except Exception:
logging.exception('Failed to search and install packs')
installed_content_packs_successfully = False
return installed_content_packs_successfully
def configure_server_instances(build: Build, tests_for_iteration, all_new_integrations, modified_integrations):
modified_module_instances = []
new_module_instances = []
testing_client = build.servers[0].client
for test in tests_for_iteration:
integrations = get_integrations_for_test(test, build.skipped_integrations_conf)
playbook_id = test.get('playbookID')
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, build.skipped_integrations_conf, all_new_integrations, modified_integrations
)
integration_to_status_string = '\n\t\t\t\t\t\t'.join(
[f'"{key}" - {val}' for key, val in integration_to_status.items()])
if integration_to_status_string:
logging.info(f'All Integrations for test "{playbook_id}":\n\t\t\t\t\t\t{integration_to_status_string}')
else:
logging.info(f'No Integrations for test "{playbook_id}"')
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
new_ints_params_set = set_integration_params(build,
new_integrations,
build.secret_conf['integrations'],
instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(build,
integrations_to_configure,
build.secret_conf['integrations'],
instance_names_conf, placeholders_map)
if not new_ints_params_set:
logging.error(f'failed setting parameters for integrations: {new_integrations}')
if not ints_to_configure_params_set:
logging.error(f'failed setting parameters for integrations: {integrations_to_configure}')
if not (new_ints_params_set and ints_to_configure_params_set):
continue
modified_module_instances_for_test, new_module_instances_for_test = configure_modified_and_new_integrations(
build,
integrations_to_configure,
new_integrations,
testing_client)
modified_module_instances.extend(modified_module_instances_for_test)
new_module_instances.extend(new_module_instances_for_test)
return modified_module_instances, new_module_instances
def configure_modified_and_new_integrations(build: Build,
modified_integrations_to_configure: list,
new_integrations_to_configure: list,
demisto_client: demisto_client) -> tuple:
"""
Configures old and new integrations in the server configured in the demisto_client.
Args:
build: The build object
modified_integrations_to_configure: Integrations to configure that are already exists
new_integrations_to_configure: Integrations to configure that were created in this build
demisto_client: A demisto client
Returns:
A tuple with two lists:
1. List of configured instances of modified integrations
2. List of configured instances of new integrations
"""
modified_modules_instances = []
new_modules_instances = []
for integration in modified_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
modified_modules_instances.append(module_instance)
for integration in new_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
new_modules_instances.append(module_instance)
return modified_modules_instances, new_modules_instances
def instance_testing(build: Build,
all_module_instances: list,
pre_update: bool,
use_mock: bool = True,
first_call: bool = True) -> Tuple[set, set]:
"""
Runs 'test-module' command for the instances detailed in `all_module_instances`
Args:
build: An object containing the current build info.
all_module_instances: The integration instances that should be tested
pre_update: Whether this instance testing is before or after the content update on the server.
use_mock: Whether to use mock while testing mockable integrations. Should be used mainly with
private content build which aren't using the mocks.
first_call: indicates if its the first time the function is called from the same place
Returns:
A set of the successful tests containing the instance name and the integration name
A set of the failed tests containing the instance name and the integration name
"""
update_status = 'Pre' if pre_update else 'Post'
failed_tests = set()
successful_tests = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
logging.info(f'Start of Instance Testing ("Test" button) ({update_status}-update)')
else:
logging.info(f'No integrations to configure for the chosen tests. ({update_status}-update)')
failed_instances = []
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
# If there is a failure, __test_integration_instance will print it
if integration_of_instance not in build.unmockable_integrations and use_mock:
success = test_integration_with_mock(build, instance, pre_update)
else:
testing_client = build.servers[0].reconnect_client()
success, _ = __test_integration_instance(testing_client, instance)
if not success:
failed_tests.add((instance_name, integration_of_instance))
failed_instances.append(instance)
else:
successful_tests.add((instance_name, integration_of_instance))
# in case some tests failed post update, wait a 15 secs, runs the tests again
if failed_instances and not pre_update and first_call:
logging.info("some post-update tests failed, sleeping for 15 seconds, then running the failed tests again")
sleep(15)
succeeded, failed_tests = instance_testing(build, failed_instances, pre_update=False, first_call=False)
return successful_tests, failed_tests
def test_integration_with_mock(build: Build, instance: dict, pre_update: bool):
"""
Runs 'test-module' for given integration with mitmproxy
In case the playback mode fails and this is a pre-update run - a record attempt will be executed.
Args:
build: An object containing the current build info.
instance: A dict containing the instance details
pre_update: Whether this instance testing is before or after the content update on the server.
Returns:
The result of running the 'test-module' command for the given integration.
If a record was executed - will return the result of the 'test--module' with the record mode only.
"""
testing_client = build.servers[0].reconnect_client()
integration_of_instance = instance.get('brand', '')
logging.debug(f'Integration "{integration_of_instance}" is mockable, running test-module with mitmproxy')
has_mock_file = build.proxy.has_mock_file(integration_of_instance)
success = False
if has_mock_file:
with run_with_mock(build.proxy, integration_of_instance) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.warning(f'Running test-module for "{integration_of_instance}" has failed in playback mode')
if not success and not pre_update:
logging.debug(f'Recording a mock file for integration "{integration_of_instance}".')
with run_with_mock(build.proxy, integration_of_instance, record=True) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.debug(f'Record mode for integration "{integration_of_instance}" has failed.')
return success
def update_content_till_v6(build: Build):
threads_list = []
# For each server url we install content
for thread_index, server in enumerate(build.servers):
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': server.client, 'server': server.internal_ip, 'ami_name': build.ami_env})
threads_list.append(t)
run_threads_list(threads_list)
def disable_instances(build: Build):
for server in build.servers:
disable_all_integrations(server.client)
def create_nightly_test_pack():
test_pack_zip(Build.content_path, Build.test_pack_target)
def test_files(content_path):
packs_root = f'{content_path}/Packs'
packs = filter(lambda x: x.is_dir(), os.scandir(packs_root))
for pack_dir in packs:
if pack_dir in SKIPPED_PACKS:
continue
playbooks_root = f'{pack_dir.path}/TestPlaybooks'
if os.path.isdir(playbooks_root):
for playbook_path, playbook in get_test_playbooks_in_dir(playbooks_root):
yield playbook_path, playbook
if os.path.isdir(f'{playbooks_root}/NonCircleTests'):
for playbook_path, playbook in get_test_playbooks_in_dir(f'{playbooks_root}/NonCircleTests'):
yield playbook_path, playbook
def get_test_playbooks_in_dir(path):
playbooks = filter(lambda x: x.is_file(), os.scandir(path))
for playbook in playbooks:
yield playbook.path, playbook
def test_pack_metadata():
now = datetime.now().isoformat().split('.')[0]
now = f'{now}Z'
metadata = {
"name": "nightly test",
"id": str(uuid.uuid4()),
"description": "nightly test pack (all test playbooks and scripts).",
"created": now,
"updated": now,
"legacy": True,
"support": "Cortex XSOAR",
"supportDetails": {},
"author": "Cortex XSOAR",
"authorImage": "",
"certification": "certified",
"price": 0,
"serverMinVersion": "6.0.0",
"serverLicense": "",
"currentVersion": "1.0.0",
"general": [],
"tags": [],
"categories": [
"Forensics & Malware Analysis"
],
"contentItems": {},
"integrations": [],
"useCases": [],
"keywords": [],
"dependencies": {}
}
return json.dumps(metadata, indent=4)
def test_pack_zip(content_path, target):
with zipfile.ZipFile(f'{target}/test_pack.zip', 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('test_pack/metadata.json', test_pack_metadata())
for test_path, test in test_files(content_path):
if not test_path.endswith('.yml'):
continue
test = test.name
with open(test_path, 'r') as test_file:
if not (test.startswith('playbook-') or test.startswith('script-')):
test_type = find_type(_dict=yaml.safe_load(test_file), file_type='yml').value
test_file.seek(0)
test_target = f'test_pack/TestPlaybooks/{test_type}-{test}'
else:
test_target = f'test_pack/TestPlaybooks/{test}'
zip_file.writestr(test_target, test_file.read())
def get_non_added_packs_ids(build: Build):
"""
:param build: the build object
:return: all non added packs i.e. unchanged packs (dependencies) and modified packs
"""
compare_against = 'origin/master{}'.format('' if not build.branch_name == 'master' else '~1')
added_files = run_command(f'git diff --name-only --diff-filter=A '
f'{compare_against}..refs/heads/{build.branch_name} -- Packs/*/pack_metadata.json')
if os.getenv('CONTRIB_BRANCH'):
added_contrib_files = run_command(
'git status -uall --porcelain -- Packs/*/pack_metadata.json | grep "?? "').replace('?? ', '')
added_files = added_files if not added_contrib_files else '\n'.join([added_files, added_contrib_files])
added_files = filter(lambda x: x, added_files.split('\n'))
added_pack_ids = map(lambda x: x.split('/')[1], added_files)
return set(get_pack_ids_to_install()) - set(added_pack_ids)
def set_marketplace_url(servers, branch_name, ci_build_number):
url_suffix = quote_plus(f'{branch_name}/{ci_build_number}')
config_path = 'marketplace.bootstrap.bypass.url'
config = {config_path: f'https://storage.googleapis.com/marketplace-ci-build/content/builds/{url_suffix}'}
for server in servers:
server.add_server_configuration(config, 'failed to configure marketplace custom url ', True)
logging.success('Updated marketplace url and restarted servers')
logging.info('sleeping for 60 seconds')
sleep(60)
@run_with_proxy_configured
def test_integrations_post_update(build: Build, new_module_instances: list, modified_module_instances: list) -> tuple:
"""
Runs 'test-module on all integrations for post-update check
Args:
build: A build object
new_module_instances: A list containing new integrations instances to run test-module on
modified_module_instances: A list containing old (existing) integrations instances to run test-module on
Returns:
* A list of integration names that have failed the 'test-module' execution post update
* A list of integration names that have succeeded the 'test-module' execution post update
"""
modified_module_instances.extend(new_module_instances)
successful_tests_post, failed_tests_post = instance_testing(build, modified_module_instances, pre_update=False)
return successful_tests_post, failed_tests_post
def update_content_on_servers(build: Build) -> bool:
"""
Updates content on the build's server according to the server version
Args:
build: Build object
Returns:
A boolean that indicates whether the content installation was successful.
If the server version is lower then 5.9.9 will return the 'installed_content_packs_successfully' parameter as is
If the server version is higher or equal to 6.0 - will return True if the packs installation was successful
both before that update and after the update.
"""
installed_content_packs_successfully = True
if LooseVersion(build.server_numeric_version) < LooseVersion('6.0.0'):
update_content_till_v6(build)
elif not build.is_nightly:
set_marketplace_url(build.servers, build.branch_name, build.ci_build_number)
installed_content_packs_successfully = install_packs(build)
return installed_content_packs_successfully
@run_with_proxy_configured
def configure_and_test_integrations_pre_update(build: Build, new_integrations, modified_integrations) -> tuple:
"""
Configures integration instances that exist in the current version and for each integration runs 'test-module'.
Args:
build: Build object
new_integrations: A list containing new integrations names
modified_integrations: A list containing modified integrations names
Returns:
A tuple consists of:
* A list of modified module instances configured
* A list of new module instances configured
* A list of integrations that have failed the 'test-module' command execution
* A list of integrations that have succeeded the 'test-module' command execution
* A list of new integrations names
"""
tests_for_iteration = get_tests(build)
modified_module_instances, new_module_instances = configure_server_instances(build,
tests_for_iteration,
new_integrations,
modified_integrations)
successful_tests_pre, failed_tests_pre = instance_testing(build, modified_module_instances, pre_update=True)
return modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre
def install_packs_pre_update(build: Build) -> bool:
"""
Install packs on server according to server version
Args:
build: A build object
Returns:
A boolean that indicates whether the installation was successful or not
"""
installed_content_packs_successfully = False
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
if build.is_nightly:
install_nightly_pack(build)
installed_content_packs_successfully = True
else:
if not build.is_private:
pack_ids = get_non_added_packs_ids(build)
installed_content_packs_successfully = install_packs(build, pack_ids=pack_ids)
else:
installed_content_packs_successfully = True
return installed_content_packs_successfully
def main():
install_logging('Install_Content_And_Configure_Integrations_On_Server.log')
build = Build(options_handler())
logging.info(f"Build Number: {build.ci_build_number}")
configure_servers_and_restart(build)
disable_instances(build)
install_packs_pre_update(build)
new_integrations, modified_integrations = get_changed_integrations(build)
pre_update_configuration_results = configure_and_test_integrations_pre_update(build,
new_integrations,
modified_integrations)
modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre = pre_update_configuration_results
installed_content_packs_successfully = update_content_on_servers(build)
successful_tests_post, failed_tests_post = test_integrations_post_update(build,
new_module_instances,
modified_module_instances)
success = report_tests_status(failed_tests_pre, failed_tests_post, successful_tests_pre, successful_tests_post,
new_integrations, build)
if not success or not installed_content_packs_successfully:
sys.exit(2)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CIRCLECI",
"CONTRIB_BRANCH",
"ARTIFACTS_FOLDER",
"CI_PROJECT_DIR",
"HOME"
] |
[]
|
["CIRCLECI", "CONTRIB_BRANCH", "ARTIFACTS_FOLDER", "CI_PROJECT_DIR", "HOME"]
|
python
| 5 | 0 | |
chemvae/VAE.py
|
"""
ahihi do ngoc
This version of autoencoder is able to save weights and load weights for the
encoder and decoder portions of the network
"""
# from gpu_utils import pick_gpu_lowest_memory
# gpu_free_number = str(pick_gpu_lowest_memory())
#
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(gpu_free_number)
from sklearn.preprocessing import OneHotEncoder
import argparse
import numpy as np
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = True
import yaml
import time
import os
from keras import backend as K
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
import hyperparameters
import pandas as pd
import mol_callbacks as mol_cb
from keras.callbacks import CSVLogger
from models import encoder_model, load_encoder
from models import decoder_model, load_decoder
from models import property_predictor_model, load_property_predictor
from models import variational_layers
from functools import partial
from keras.layers import Lambda
from keras.utils import to_categorical
import numpy as np
DICT = {'5': 29, '=': 22, 'N': 31, 'l': 16, 'H': 18, ']': 3, '@': 21, '6': 1, 'O': 17, 'c': 19, '2': 27, '8': 25, '3': 4, '7': 0, 'I': 15, 'C': 26, 'F': 28, '-': 7, 'P': 24, '/': 9, ')': 13, ' ': 34, '#': 14, 'r': 30, '\\': 33, '1': 20, 'n': 23, '+': 32, '[': 12, 'o': 2, 's': 5, '4': 11, 'S': 8, '(': 6, 'B': 10}
str = "CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1"
def one_hot(str, LEN_MAX = 120):
str = list(str)
if len(str) < LEN_MAX:
for i in range(LEN_MAX - len(str)):
str.append(" ")
hot = []
for char in list(str):
hot.append(DICT[char])
return to_categorical(hot)
import pandas as pd
def load_data():
link1 = '250k_rndm_zinc_drugs_clean_3.csv'
df1 = pd.read_csv(link1, delimiter=',', names = ['smiles','1','2','3'])
smiles = list(df1.smiles)[1:]
X = []
for smile in smiles:
try:
X.append(one_hot(smile[:-1]))
except:
print ("ahihi do ngoc")
X = np.array(X)
print(X.shape)
id = int (X.shape[0] / 126)
idx = int (id * 0.8)
X_train = X[: idx*126,:,:]
X_val = X[idx*126 : id*126,:,:]
return X_train, X_val
def load_models(params):
def identity(x):
return K.identity(x)
# def K_params with kl_loss_var
kl_loss_var = K.variable(params['kl_loss_weight'])
if params['reload_model'] == True:
encoder = load_encoder(params)
decoder = load_decoder(params)
else:
encoder = encoder_model(params)
decoder = decoder_model(params)
x_in = encoder.inputs[0]
z_mean, enc_output = encoder(x_in)
z_samp, z_mean_log_var_output = variational_layers(z_mean, enc_output, kl_loss_var, params)
# Decoder
if params['do_tgru']:
x_out = decoder([z_samp, x_in])
else:
x_out = decoder(z_samp)
x_out = Lambda(identity, name='x_pred')(x_out)
model_outputs = [x_out, z_mean_log_var_output]
AE_only_model = Model(x_in, model_outputs)
print(encoder.summary())
print("---------------------------")
print(decoder.summary())
print("--------------------------")
print(AE_only_model.summary())
if params['do_prop_pred']:
if params['reload_model'] == True:
property_predictor = load_property_predictor(params)
else:
property_predictor = property_predictor_model(params)
if (('reg_prop_tasks' in params) and (len(params['reg_prop_tasks']) > 0 ) and
('logit_prop_tasks' in params) and (len(params['logit_prop_tasks']) > 0 )):
reg_prop_pred, logit_prop_pred = property_predictor(z_mean)
reg_prop_pred = Lambda(identity, name='reg_prop_pred')(reg_prop_pred)
logit_prop_pred = Lambda(identity, name='logit_prop_pred')(logit_prop_pred)
model_outputs.extend([reg_prop_pred, logit_prop_pred])
# regression only scenario
elif ('reg_prop_tasks' in params) and (len(params['reg_prop_tasks']) > 0 ):
reg_prop_pred = property_predictor(z_mean)
reg_prop_pred = Lambda(identity, name='reg_prop_pred')(reg_prop_pred)
model_outputs.append(reg_prop_pred)
# logit only scenario
elif ('logit_prop_tasks' in params) and (len(params['logit_prop_tasks']) > 0 ):
logit_prop_pred = property_predictor(z_mean)
logit_prop_pred = Lambda(identity, name='logit_prop_pred')(logit_prop_pred)
model_outputs.append(logit_prop_pred)
else:
raise ValueError('no logit tasks or regression tasks specified for property prediction')
# making the models:
AE_PP_model = Model(x_in, model_outputs)
return AE_only_model, AE_PP_model, encoder, decoder, property_predictor, kl_loss_var
else:
return AE_only_model, encoder, decoder, kl_loss_var
def kl_loss(truth_dummy, x_mean_log_var_output):
x_mean, x_log_var = tf.split(x_mean_log_var_output, 2, axis=1)
print('x_mean shape in kl_loss: ', x_mean.get_shape())
kl_loss = - 0.5 * \
K.mean(1 + x_log_var - K.square(x_mean) -
K.exp(x_log_var), axis=-1)
return kl_loss
def main_no_prop(params):
start_time = time.time()
X_train, X_test = load_data()
print("---------------------------")
print(X_train.shape)
print(X_test.shape)
print("---------------------------")
AE_only_model, encoder, decoder, kl_loss_var = load_models(params)
# compile models
if params['optim'] == 'adam':
optim = Adam(lr=params['lr'], beta_1=params['momentum'])
elif params['optim'] == 'rmsprop':
optim = RMSprop(lr=params['lr'], rho=params['momentum'])
elif params['optim'] == 'sgd':
optim = SGD(lr=params['lr'], momentum=params['momentum'])
else:
raise NotImplemented("Please define valid optimizer")
model_losses = {'x_pred': params['loss'],
'z_mean_log_var': kl_loss}
# vae metrics, callbacks
vae_sig_schedule = partial(mol_cb.sigmoid_schedule, slope=params['anneal_sigmod_slope'],
start=params['vae_annealer_start'])
vae_anneal_callback = mol_cb.WeightAnnealer_epoch(
vae_sig_schedule, kl_loss_var, params['kl_loss_weight'], 'vae' )
csv_clb = CSVLogger(params["history_file"], append=False)
callbacks = [ vae_anneal_callback, csv_clb]
def vae_anneal_metric(y_true, y_pred):
return kl_loss_var
xent_loss_weight = K.variable(params['xent_loss_weight'])
model_train_targets = {'x_pred':X_train,
'z_mean_log_var':np.ones((np.shape(X_train)[0], params['hidden_dim'] * 2))}
model_test_targets = {'x_pred':X_test,
'z_mean_log_var':np.ones((np.shape(X_test)[0], params['hidden_dim'] * 2))}
AE_only_model.compile(loss=model_losses,
loss_weights=[xent_loss_weight,
kl_loss_var],
optimizer=optim,
metrics={'x_pred': ['categorical_accuracy',vae_anneal_metric]}
)
keras_verbose = params['verbose_print']
AE_only_model.fit(X_train, model_train_targets,
batch_size=params['batch_size'],
epochs=params['epochs'],
initial_epoch=params['prev_epochs'],
callbacks=callbacks,
verbose=keras_verbose,
validation_data=[ X_test, model_test_targets]
)
encoder.save(params['encoder_weights_file'])
decoder.save(params['decoder_weights_file'])
print('time of run : ', time.time() - start_time)
print('**FINISHED**')
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_file',
help='experiment file', default='./exp.json')
parser.add_argument('-d', '--directory',
help='exp directory', default=None)
args = vars(parser.parse_args())
if args['directory'] is not None:
args['exp_file'] = os.path.join(args['directory'], args['exp_file'])
params = hyperparameters.load_params(args['exp_file'])
print("All params:--------------------------------\n", params)
print("----------------------------------------")
main_no_prop(params)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
SDN/pox_controller.py
|
import os
from pox.core import core
from pox.lib.util import dpid_to_str
from pox_common import *
def on_packet(event):
dpid = dpid_to_str(event.connection.dpid)
switch = Switch.by_dpid[dpid]
print(f"Received unhandled packet from {switch}, sending program back...")
messages = []
if switch.number == 1:
messages += allow_all_ip("192.168.60.1", Action.fwd(1))
messages += allow_all_ip("192.168.61.0/24", Action.fwd(2))
messages += allow_all_ip("192.168.62.0/24", Action.fwd(3))
elif switch.number == 2:
messages += allow_all_ip("192.168.60.0/24", Action.fwd(1))
messages += allow_all_ip("192.168.61.1", Action.fwd(2))
messages += allow_all_ip("192.168.62.0/24", Action.fwd(3))
elif switch.number == 3:
if os.getenv("LIMITED_FLOWS") == "1":
for peer, action in [
["192.168.60.1", Action.fwd(1)],
["192.168.61.1", Action.fwd(2)],
]:
for protocol in [Protocol.arp]:
messages += add_ip_flow(
"192.168.62.2", None, peer, None, [action], protocol, 3
)
for peer_port in [22, 80]:
messages += add_ip_flow(
"192.168.62.2", None, peer, peer_port, [action], Protocol.tcp, 3
)
for protocol in [Protocol.arp, Protocol.ip]:
messages += add_ip_flow(
"192.168.62.2", None, None, None, [], protocol, 2
)
messages += allow_all_ip("192.168.60.0/24", Action.fwd(1))
messages += allow_all_ip("192.168.61.0/24", Action.fwd(2))
messages += allow_all_ip("192.168.62.1", Action.fwd(4))
messages += allow_all_ip("192.168.62.2", Action.fwd(3))
for message in messages:
event.connection.send(message)
def launch():
print("Launching POX Controller...")
core.openflow.addListenerByName(
"ConnectionUp", lambda evt: Switch.on_connection_up(evt)
)
core.openflow.addListenerByName("PacketIn", on_packet)
print("Added listeners.")
|
[] |
[] |
[
"LIMITED_FLOWS"
] |
[]
|
["LIMITED_FLOWS"]
|
python
| 1 | 0 | |
src/initTest/initTest.go
|
package main
import (
"fmt"
"go_awesomeProject/src/initTest/utils"
"os"
"os/exec"
"time"
)
func main() {
path := utils.GetPath()
fmt.Println(path)
fmt.Println("subType=", utils.GetSubType())
time.Sleep(1 * time.Second)
go func() {
fmt.Println("子进程获取环境变量")
cmd := exec.Command("/bin/bash", "-c", "sh /Users/xieym/MyWorkspace/mycode/GolandProjects/go_awesomeProject/src/initTest/getEnv.sh")
output, err := cmd.Output()
if err != nil {
fmt.Println("获取出现异常")
fmt.Println(err)
}
fmt.Println("output=", string(output))
getenv := os.Getenv("WCloud_Mesh_SubType")
fmt.Println("子进程获取:", getenv)
}()
time.Sleep(5 * time.Second)
}
|
[
"\"WCloud_Mesh_SubType\""
] |
[] |
[
"WCloud_Mesh_SubType"
] |
[]
|
["WCloud_Mesh_SubType"]
|
go
| 1 | 0 | |
hackowasp/wsgi.py
|
"""
WSGI config for hackowasp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hackowasp.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
transformers/file_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import six
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
import requests
from tqdm.auto import tqdm
from contextlib import contextmanager
from . import __version__
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
os.environ.setdefault('USE_TORCH', 'YES')
if os.environ['USE_TORCH'].upper() in ('1', 'ON', 'YES'):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("USE_TORCH override through env variable, disabling PyTorch")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
os.environ.setdefault('USE_TF', 'YES')
if os.environ['USE_TF'].upper() in ('1', 'ON', 'YES'):
import tensorflow as tf
assert hasattr(tf, '__version__') and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("USE_TF override through env variable, disabling Tensorflow")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path))
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = 'tf_model.h5'
TF_WEIGHTS_NAME = 'model.ckpt'
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://d2ws9o8vfrpkyk.cloudfront.net"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + ''.join(docstr)
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ('http', 'https', 's3')
def hf_bucket_url(identifier, postfix=None, cdn=False):
endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX
if postfix is None:
return "/".join((endpoint, identifier))
else:
return "/".join((endpoint, identifier, postfix))
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
if url.endswith('.h5'):
filename += '.h5'
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir=cache_dir,
force_download=force_download, proxies=proxies,
resume_download=resume_download, user_agent=user_agent)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif urlparse(url_or_filename).scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url, proxies=None):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file, proxies=None):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent=None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if isinstance(user_agent, dict):
ua += "; " + "; ".join(
"{}/{}".format(k, v) for k, v in user_agent.items()
)
elif isinstance(user_agent, six.string_types):
ua += "; "+ user_agent
headers = {
"user-agent": ua
}
if resume_size > 0:
headers['Range'] = 'bytes=%d-' % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get('Content-Length')
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(unit="B", unit_scale=True, total=total, initial=resume_size,
desc="Downloading", disable=bool(logger.level<=logging.INFO))
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if resume_download:
incomplete_path = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(incomplete_path,'a+b') as f:
yield f
os.remove(incomplete_path)
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = tempfile.NamedTemporaryFile
resume_size = 0
if etag is not None and (not os.path.exists(cache_path) or force_download):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
if resume_download:
logger.warn('Warning: resumable downloads are not implemented for "s3://" urls')
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
|
[] |
[] |
[
"TORCH_HOME",
"PYTORCH_TRANSFORMERS_CACHE",
"XDG_CACHE_HOME",
"USE_TF",
"PYTORCH_PRETRAINED_BERT_CACHE",
"USE_TORCH"
] |
[]
|
["TORCH_HOME", "PYTORCH_TRANSFORMERS_CACHE", "XDG_CACHE_HOME", "USE_TF", "PYTORCH_PRETRAINED_BERT_CACHE", "USE_TORCH"]
|
python
| 6 | 0 | |
fschecker/checker_test.go
|
//go:build go1.12
// +build go1.12
package fschecker_test
import (
"errors"
"flag"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/gqlgo/gqlanalysis"
"github.com/gqlgo/gqlanalysis/fschecker"
)
func main() {
var (
report bool
hasError bool
)
flag.BoolVar(&report, "report", false, "report diagnositc")
flag.BoolVar(&hasError, "error", false, "return an error")
a := &gqlanalysis.Analyzer{
Name: "lint",
Doc: "test linter",
Run: func(pass *gqlanalysis.Pass) (interface{}, error) {
if report {
pos := pass.Queries[0].Position
pass.Reportf(pos, "NG")
}
if hasError {
return nil, errors.New("error")
}
return nil, nil
},
}
fschecker.Main(os.DirFS("."), a)
}
func TestExitCode(t *testing.T) {
if os.Getenv("FSCHECKER_CHILD") == "1" {
// child process
// replace [progname -test.run=TestExitCode -- ...]
// by [progname ...]
os.Args = os.Args[2:]
os.Args[0] = "lint"
main()
panic("unreachable")
}
cases := []struct {
args string
want int
}{
{"", 0},
{"-error", 1},
{"-report", 2},
}
for _, tt := range cases {
schema := filepath.Join("testdata", "schema", "**", "**.graphql")
query := filepath.Join("testdata", "query", "**", "**.graphql")
args := []string{"-test.run=TestExitCode", "--", "-schema", schema, "-query", query}
args = append(args, strings.Split(tt.args, " ")...)
cmd := exec.Command(os.Args[0], args...)
cmd.Env = append(os.Environ(), "FSCHECKER_CHILD=1")
out, err := cmd.CombinedOutput()
if len(out) > 0 {
t.Logf("%s: out=<<%s>>", tt.args, out)
}
var exitcode int
if err, ok := err.(*exec.ExitError); ok {
exitcode = err.ExitCode() // requires go1.12
}
if exitcode != tt.want {
t.Errorf("%s: exited %d, want %d", tt.args, exitcode, tt.want)
}
}
}
|
[
"\"FSCHECKER_CHILD\""
] |
[] |
[
"FSCHECKER_CHILD"
] |
[]
|
["FSCHECKER_CHILD"]
|
go
| 1 | 0 | |
vendor/github.com/gophercloud/gophercloud/openstack/client.go
|
package openstack
import (
"fmt"
"reflect"
"github.com/gophercloud/gophercloud"
tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens"
tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
"github.com/gophercloud/gophercloud/openstack/utils"
)
const (
// v2 represents Keystone v2.
// It should never increase beyond 2.0.
v2 = "v2.0"
// v3 represents Keystone v3.
// The version can be anything from v3 to v3.x.
v3 = "v3"
)
/*
NewClient prepares an unauthenticated ProviderClient instance.
Most users will probably prefer using the AuthenticatedClient function
instead.
This is useful if you wish to explicitly control the version of the identity
service that's used for authentication explicitly, for example.
A basic example of using this would be:
ao, err := openstack.AuthOptionsFromEnv()
provider, err := openstack.NewClient(ao.IdentityEndpoint)
client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})
*/
func NewClient(endpoint string) (*gophercloud.ProviderClient, error) {
base, err := utils.BaseEndpoint(endpoint)
if err != nil {
return nil, err
}
endpoint = gophercloud.NormalizeURL(endpoint)
base = gophercloud.NormalizeURL(base)
p := new(gophercloud.ProviderClient)
p.IdentityBase = base
p.IdentityEndpoint = endpoint
p.UseTokenLock()
return p, nil
}
/*
AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint
specified by the options, acquires a token, and returns a Provider Client
instance that's ready to operate.
If the full path to a versioned identity endpoint was specified (example:
http://example.com:5000/v3), that path will be used as the endpoint to query.
If a versionless endpoint was specified (example: http://example.com:5000/),
the endpoint will be queried to determine which versions of the identity service
are available, then chooses the most recent or most supported version.
Example:
ao, err := openstack.AuthOptionsFromEnv()
provider, err := openstack.AuthenticatedClient(ao)
client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
*/
func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) {
client, err := NewClient(options.IdentityEndpoint)
if err != nil {
return nil, err
}
err = Authenticate(client, options)
if err != nil {
return nil, err
}
return client, nil
}
// Authenticate or re-authenticate against the most recent identity service
// supported at the provided endpoint.
func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
versions := []*utils.Version{
{ID: v2, Priority: 20, Suffix: "/v2.0/"},
{ID: v3, Priority: 30, Suffix: "/v3/"},
}
chosen, endpoint, err := utils.ChooseVersion(client, versions)
if err != nil {
return err
}
switch chosen.ID {
case v2:
return v2auth(client, endpoint, options, gophercloud.EndpointOpts{})
case v3:
return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{})
default:
// The switch statement must be out of date from the versions list.
return fmt.Errorf("Unrecognized identity version: %s", chosen.ID)
}
}
// AuthenticateV2 explicitly authenticates against the identity v2 endpoint.
func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error {
return v2auth(client, "", options, eo)
}
func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error {
v2Client, err := NewIdentityV2(client, eo)
if err != nil {
return err
}
if endpoint != "" {
v2Client.Endpoint = endpoint
}
v2Opts := tokens2.AuthOptions{
IdentityEndpoint: options.IdentityEndpoint,
Username: options.Username,
Password: options.Password,
TenantID: options.TenantID,
TenantName: options.TenantName,
AllowReauth: options.AllowReauth,
TokenID: options.TokenID,
}
result := tokens2.Create(v2Client, v2Opts)
token, err := result.ExtractToken()
if err != nil {
return err
}
catalog, err := result.ExtractServiceCatalog()
if err != nil {
return err
}
if options.AllowReauth {
// here we're creating a throw-away client (tac). it's a copy of the user's provider client, but
// with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`,
// this should retry authentication only once
tac := *client
tac.ReauthFunc = nil
tac.TokenID = ""
tao := options
tao.AllowReauth = false
client.ReauthFunc = func() error {
err := v2auth(&tac, endpoint, tao, eo)
if err != nil {
return err
}
client.TokenID = tac.TokenID
return nil
}
}
client.TokenID = token.ID
client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
return V2EndpointURL(catalog, opts)
}
return nil
}
// AuthenticateV3 explicitly authenticates against the identity v3 service.
func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error {
return v3auth(client, "", options, eo)
}
func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error {
// Override the generated service endpoint with the one returned by the version endpoint.
v3Client, err := NewIdentityV3(client, eo)
if err != nil {
return err
}
if endpoint != "" {
v3Client.Endpoint = endpoint
}
result := tokens3.Create(v3Client, opts)
token, err := result.ExtractToken()
if err != nil {
return err
}
catalog, err := result.ExtractServiceCatalog()
if err != nil {
return err
}
client.TokenID = token.ID
if opts.CanReauth() {
// here we're creating a throw-away client (tac). it's a copy of the user's provider client, but
// with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`,
// this should retry authentication only once
tac := *client
tac.ReauthFunc = nil
tac.TokenID = ""
var tao tokens3.AuthOptionsBuilder
switch ot := opts.(type) {
case *gophercloud.AuthOptions:
o := *ot
o.AllowReauth = false
tao = &o
case *tokens3.AuthOptions:
o := *ot
o.AllowReauth = false
tao = &o
default:
tao = opts
}
client.ReauthFunc = func() error {
err := v3auth(&tac, endpoint, tao, eo)
if err != nil {
return err
}
client.TokenID = tac.TokenID
return nil
}
}
client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
return V3EndpointURL(catalog, opts)
}
return nil
}
// NewIdentityV2 creates a ServiceClient that may be used to interact with the
// v2 identity service.
func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
endpoint := client.IdentityBase + "v2.0/"
clientType := "identity"
var err error
if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) {
eo.ApplyDefaults(clientType)
endpoint, err = client.EndpointLocator(eo)
if err != nil {
return nil, err
}
}
return &gophercloud.ServiceClient{
ProviderClient: client,
Endpoint: endpoint,
Type: clientType,
}, nil
}
// NewIdentityV3 creates a ServiceClient that may be used to access the v3
// identity service.
func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
endpoint := client.IdentityBase + "v3/"
clientType := "identity"
var err error
if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) {
eo.ApplyDefaults(clientType)
endpoint, err = client.EndpointLocator(eo)
if err != nil {
return nil, err
}
}
// Ensure endpoint still has a suffix of v3.
// This is because EndpointLocator might have found a versionless
// endpoint or the published endpoint is still /v2.0. In both
// cases, we need to fix the endpoint to point to /v3.
base, err := utils.BaseEndpoint(endpoint)
if err != nil {
return nil, err
}
base = gophercloud.NormalizeURL(base)
endpoint = base + "v3/"
return &gophercloud.ServiceClient{
ProviderClient: client,
Endpoint: endpoint,
Type: clientType,
}, nil
}
func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) {
sc := new(gophercloud.ServiceClient)
eo.ApplyDefaults(clientType)
url, err := client.EndpointLocator(eo)
if err != nil {
return sc, err
}
sc.ProviderClient = client
sc.Endpoint = url
sc.Type = clientType
return sc, nil
}
// NewObjectStorageV1 creates a ServiceClient that may be used with the v1
// object storage package.
func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "object-store")
}
// NewComputeV2 creates a ServiceClient that may be used with the v2 compute
// package.
func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "compute")
}
// NewNetworkV2 creates a ServiceClient that may be used with the v2 network
// package.
func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "network")
sc.ResourceBase = sc.Endpoint + "v2.0/"
return sc, err
}
// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1
// block storage service.
func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "volume")
}
// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2
// block storage service.
func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "volumev2")
}
// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service.
func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "volumev3")
}
// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service.
func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "sharev2")
}
// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1
// CDN service.
func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "cdn")
}
// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1
// orchestration service.
func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "orchestration")
}
// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service.
func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "database")
}
// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS
// service.
func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "dns")
sc.ResourceBase = sc.Endpoint + "v2/"
return sc, err
}
// NewImageServiceV2 creates a ServiceClient that may be used to access the v2
// image service.
func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "image")
sc.ResourceBase = sc.Endpoint + "v2/"
return sc, err
}
// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2
// load balancer service.
func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "load-balancer")
sc.ResourceBase = sc.Endpoint + "v2.0/"
return sc, err
}
// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering
// package.
func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "clustering")
}
// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging
// service.
func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "messaging")
sc.MoreHeaders = map[string]string{"Client-ID": clientID}
return sc, err
}
// NewContainerV1 creates a ServiceClient that may be used with v1 container package
func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "container")
}
// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key
// manager service.
func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
sc, err := initClientOpts(client, eo, "key-manager")
sc.ResourceBase = sc.Endpoint + "v1/"
return sc, err
}
// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management
// package.
func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
return initClientOpts(client, eo, "container-infra")
}
|
[
"\"OS_REGION_NAME\""
] |
[] |
[
"OS_REGION_NAME"
] |
[]
|
["OS_REGION_NAME"]
|
go
| 1 | 0 | |
test/functional/test_framework/test_node.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sublime/Packages/Minify/Minify.py
|
import sublime, sublime_plugin, re, os, subprocess, platform, ntpath, shlex
PLUGIN_DIR = os.getcwd() if int(sublime.version()) < 3000 else os.path.dirname(__file__)
SUBL_ASYNC = callable(getattr(sublime, 'set_timeout_async', None))
USE_SHELL = sublime.platform() == 'windows'
POPEN_ENV = ({'PATH': ':'.join(['/usr/local/bin', os.environ['PATH']])}) if sublime.platform() == 'osx' and os.path.isdir('/usr/local/bin') else None
if sublime.load_settings('Minify.sublime-settings').get('debug_mode'):
print('Minify: Sublime Platform:' + str(sublime.platform()))
print('Minify: Sublime Version:' + str(sublime.version()))
print('Minify: Python Version:' + str(platform.python_version()))
print('Minify: PLUGIN_DIR:' + str(PLUGIN_DIR))
print('Minify: SUBL_ASYNC:' + str(SUBL_ASYNC))
print('Minify: USE_SHELL:' + str(USE_SHELL))
print('Minify: POPEN_ENV:' + str(POPEN_ENV))
class MinifyUtils():
def fixStr(self, s):
return s.encode('utf8') if (type(s).__name__ == 'unicode') else s
def quoteChrs(self, s):
return s.replace("(", "^^(").replace(")", "^^)") if USE_SHELL else s
def runProgram(self, cmd, cwd = False):
if '>' in cmd:
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=USE_SHELL, env=POPEN_ENV)
output = p.communicate()[1]
else:
if cwd:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=USE_SHELL, env=POPEN_ENV, cwd=cwd)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=USE_SHELL, env=POPEN_ENV)
output = p.communicate()[0]
return p.returncode, output
def get_setting(self, key):
settings = self.view.settings().get('Minify')
if settings is None or settings.get(key) is None:
settings = sublime.load_settings('Minify.sublime-settings')
return settings.get(key)
if not SUBL_ASYNC:
import threading
class RunCmdInOtherThread(MinifyUtils, threading.Thread):
def __init__(self, cmd, cwd = False):
self.cmd = cmd
self.retCode = 1
self.output = ''
self.cwd = cwd;
threading.Thread.__init__(self)
def run(self):
if not SUBL_ASYNC and self.cwd:
old_cwd = os.getcwd()
os.chdir(self.cwd)
self.retCode, self.output = self.runProgram(self.cmd)
if not SUBL_ASYNC and self.cwd:
os.chdir(old_cwd)
class ThreadHandling(MinifyUtils):
def handle_result(self, cmd, outfile, retCode, output):
if retCode:
if output:
sublime.error_message(' '.join(cmd) + '\r\n\r\n' + output.decode('utf-8'))
else:
if self.get_setting('open_file'):
sublime.active_window().open_file(outfile)
def handle_thread(self, thread, outfile):
if thread.is_alive():
sublime.set_timeout(lambda: self.handle_thread(thread, outfile), 100)
else:
self.handle_result(thread.cmd, outfile, thread.retCode, thread.output)
def run_cmd(self, cmd, outfile, cwd=False):
if self.get_setting('debug_mode'):
print('Minify: Output file:' + str(outfile))
print('Minify: Command:' + str(cmd))
if SUBL_ASYNC:
retCode, output = self.runProgram(cmd, cwd)
self.handle_result(cmd, outfile, retCode, output)
else:
thread = RunCmdInOtherThread(cmd, cwd)
thread.start()
sublime.set_timeout(lambda: self.handle_thread(thread, outfile), 100)
class PluginBase(ThreadHandling):
def is_enabled(self):
filename = self.view.file_name()
return bool(type(filename).__name__ in ('str', 'unicode') and ((re.search(r'\.(?:css|js|json|html?|svg)$', filename)) or (re.search(r'(\.[^\.]+)$', filename) and re.search(r'/(?:CSS|JavaScript|JSON|HTML)\.tmLanguage$', self.view.settings().get('syntax')))))
def run(self, edit):
if SUBL_ASYNC:
sublime.set_timeout_async(lambda: self.do_action(), 0)
else:
self.do_action()
class MinifyClass(MinifyUtils):
def minify(self):
inpfile = self.view.file_name()
cwd = False
if type(inpfile).__name__ in ('str', 'unicode') and re.search(r'\.[^\.]+$', inpfile):
if self.view.is_dirty() and self.get_setting('save_first'):
self.view.run_command('save')
if self.get_setting('auto_minify_on_save'):
return
outfile = re.sub(r'(\.[^\.]+)$', r'.min\1', inpfile, 1)
syntax = self.view.settings().get('syntax')
if self.get_setting('debug_mode'):
print('Minify: Syntax: ' + str(syntax))
if re.search(r'\.js$', inpfile) or re.search(r'/JavaScript\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('uglifyjs_command') or 'uglifyjs').split()
cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile), '-m', '-c'])
eo = self.get_setting('uglifyjs_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
if self.get_setting('source_map'):
directory, rfile = ntpath.split(outfile)
mapfile = rfile or ntpath.basename(directory)
content = ''
if self.get_setting('js_map_content'):
content = ',content="' + (self.quoteChrs(inpfile + '.map') if os.path.isfile(inpfile + '.map') else 'inline') + '"'
cmd.extend(['--source-map', "url='" + self.quoteChrs(mapfile) + ".map'" + content + ",root='',base='" + self.quoteChrs(directory) + "'"])
if self.get_setting('keep_comments'):
cmd.extend(['--comments'])
eo = self.get_setting('comments_to_keep')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend([eo])
elif re.search(r'\.json$', inpfile) or re.search(r'/JSON\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('minjson_command') or 'minjson').split()
cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
elif re.search(r'\.css$', inpfile) or re.search(r'/CSS\.tmLanguage$', syntax):
minifier = self.get_setting('cssminifier') or 'clean-css'
if minifier == 'uglifycss':
cmd = self.fixStr(self.get_setting('uglifycss_command') or 'uglifycss').split()
eo = self.get_setting('uglifycss_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend([self.quoteChrs(inpfile), '>', self.quoteChrs(outfile)])
elif minifier == 'yui':
cmd = self.fixStr(self.get_setting('java_command') or 'java').split()
yui_compressor = self.get_setting('yui_compressor') or 'yuicompressor-2.4.7.jar'
cmd.extend(['-jar', PLUGIN_DIR + '/bin/' + str(yui_compressor), self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
eo = self.get_setting('yui_charset')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(['--charset', eo])
eo = self.get_setting('yui_line_break')
if type(eo).__name__ in ('int', 'str', 'unicode'):
cmd.extend(['--line-break', str(eo)])
else:
cmd = self.fixStr(self.get_setting('cleancss_command') or 'cleancss').split()
eo = self.get_setting('cleancss_options') or '-O2 --skip-rebase'
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
if self.get_setting('css_source_map'):
cmd.extend(['--source-map'])
cwd = os.path.dirname(outfile)
cmd.extend(['-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
elif re.search(r'\.html?$', inpfile) or re.search(r'/HTML\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('html-minifier_command') or 'html-minifier').split()
eo = self.get_setting('html-minifier_options') or '--collapse-boolean-attributes --collapse-whitespace --html5 --minify-css --minify-js --preserve-line-breaks --process-conditional-comments --remove-comments --remove-empty-attributes --remove-redundant-attributes --remove-script-type-attributes --remove-style-link-type-attributes'
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend(['-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
elif re.search(r'\.svg$', inpfile):
cmd = self.fixStr(self.get_setting('svgo_command') or 'svgo').split()
eo = self.get_setting('svgo_min_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend(['-i', self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
else:
cmd = False
if cmd:
print('Minify: Minifying file:' + str(inpfile))
self.run_cmd(cmd, outfile, cwd)
class BeautifyClass(MinifyUtils):
def beautify(self):
inpfile = self.view.file_name()
if type(inpfile).__name__ in ('str', 'unicode') and re.search(r'\.[^\.]+$', inpfile):
if self.view.is_dirty() and self.get_setting('save_first'):
self.view.run_command('save')
outfile = re.sub(r'(?:\.min)?(\.[^\.]+)$', r'.beautified\1', inpfile, 1)
syntax = self.view.settings().get('syntax')
if re.search(r'\.js$', inpfile) or re.search(r'/JavaScript\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('uglifyjs_command') or 'uglifyjs').split()
cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile), '--comments', 'all', '-b'])
eo = self.get_setting('uglifyjs_pretty_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
elif re.search(r'\.json$', inpfile) or re.search(r'/JSON\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('minjson_command') or 'minjson').split()
cmd.extend([self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile), '-b'])
elif re.search(r'\.css$', inpfile) or re.search(r'/CSS\.tmLanguage$', syntax):
cmd = self.fixStr(self.get_setting('js-beautify_command') or 'js-beautify').split()
eo = self.get_setting('js-beautify_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend(['--css', '-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
elif re.search(r'\.html?$', inpfile) or re.search(r'/HTML\.tmLanguage$', syntax):
outfile = re.sub(r'(?:\.min)?(\.[^\.]+)$', r'.pretty\1', inpfile, 1)
cmd = self.fixStr(self.get_setting('js-beautify_command') or 'js-beautify').split()
eo = self.get_setting('js-beautify_html_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend(['--html', '-o', self.quoteChrs(outfile), self.quoteChrs(inpfile)])
elif re.search(r'\.svg$', inpfile):
outfile = re.sub(r'(?:\.min)?(\.[^\.]+)$', r'.pretty\1', inpfile, 1)
cmd = self.fixStr(self.get_setting('svgo_command') or 'svgo').split()
eo = self.get_setting('svgo_pretty_options')
if type(eo).__name__ in ('str', 'unicode'):
cmd.extend(self.fixStr(eo).split())
cmd.extend(['--pretty', '-i', self.quoteChrs(inpfile), '-o', self.quoteChrs(outfile)])
if cmd:
print('Minify: Beautifying file:' + str(inpfile))
self.run_cmd(cmd, outfile)
class MinifyCommand(PluginBase, MinifyClass, sublime_plugin.TextCommand):
def do_action(self):
self.minify()
class BeautifyCommand(PluginBase, BeautifyClass, sublime_plugin.TextCommand):
def do_action(self):
self.beautify()
class RunAfterSave(ThreadHandling, MinifyClass, sublime_plugin.EventListener):
def on_post_save(self, view):
self.view = view
if self.get_setting('auto_minify_on_save'):
filename = self.view.file_name()
syntax = self.view.settings().get('syntax')
if type(filename).__name__ in ('str', 'unicode') and ((re.search(r'\.(?:css|js|json|html?|svg)$', filename)) or (re.search(r'(\.[^\.]+)$', filename) and re.search(r'/(?:CSS|JavaScript|JSON|HTML)\.tmLanguage$', syntax))):
searchFName = ''
searchSyntax = ''
if 'css' in self.get_setting('allowed_file_types'):
searchFName += 'css|'
searchSyntax += 'CSS|'
if 'js' in self.get_setting('allowed_file_types'):
searchFName += 'js|'
searchSyntax += 'JavaScript|'
if 'json' in self.get_setting('allowed_file_types'):
searchFName += 'json|'
searchSyntax += 'JSON|'
if 'html' in self.get_setting('allowed_file_types'):
searchFName += 'html?|'
searchSyntax += 'HTML|'
if 'svg' in self.get_setting('allowed_file_types'):
searchFName += 'svg|'
searchFNameRegEx = r'\.(?:' + searchFName.rstrip('|') + ')$'
searchSyntaxRegEx = r'/(?:' + searchSyntax.rstrip('|') + ')\.tmLanguage$'
if re.search(searchFNameRegEx, filename) or (re.search(r'(\.[^\.]+)$', filename) and re.search(searchSyntaxRegEx, syntax)):
if re.search(r'\.min\.[^\.]+$', filename):
if self.get_setting('debug_mode'):
print('Minify: Skipping file ' + filename + ' - already minified')
else:
if SUBL_ASYNC:
sublime.set_timeout_async(lambda: self.minify(), 0)
else:
self.minify()
else:
if self.get_setting('debug_mode'):
print('Minify: Skipping file ' + filename + ' - not in allowed_file_types')
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
user-service/database.go
|
package main
import (
"fmt"
"os"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
func CreateConnection() (*gorm.DB, error) {
// Get database details from environment variables
host := os.Getenv("DB_HOST")
user := os.Getenv("DB_USER")
DBName := os.Getenv("DB_NAME")
password := os.Getenv("DB_PASSWORD")
return gorm.Open(
"postgres",
fmt.Sprintf(
"postgres://%s:%s@%s/%s?sslmode=disable",
user, password, host, DBName,
),
)
}
|
[
"\"DB_HOST\"",
"\"DB_USER\"",
"\"DB_NAME\"",
"\"DB_PASSWORD\""
] |
[] |
[
"DB_PASSWORD",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASSWORD", "DB_USER", "DB_NAME", "DB_HOST"]
|
go
| 4 | 0 | |
appengine/experimental/crbadge/app.py
|
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import sys
import webapp2
from webapp2_extras import jinja2
from google.appengine.ext import ndb
from model import Badge, UserData, Settings
class BadgeView(object):
def __init__(self, user_data, badge_def_dict):
self.user_data = user_data
self.badge_def = badge_def_dict.get(user_data.badge_name)
if not self.badge_def:
self.show = False
return
if self.badge_def.show_number:
self.show = user_data.value > 0
self.level = user_data.value
else:
self.show = user_data.value >= self.badge_def.level_1
if user_data.value >= self.badge_def.level_3:
self.level = 3
elif user_data.value >= self.badge_def.level_2:
self.level = 2
else:
self.level = None # Don't show level for 1
class BaseHandler(webapp2.RequestHandler):
"""Provide a cached Jinja environment to each request."""
def __init__(self, *args, **kwargs):
webapp2.RequestHandler.__init__(self, *args, **kwargs)
@staticmethod
def jinja2_factory(app):
template_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates'))
config = {'template_path': template_dir}
jinja = jinja2.Jinja2(app, config=config)
return jinja
@webapp2.cached_property
def jinja2(self):
# Returns a Jinja2 renderer cached in the app registry.
return jinja2.get_jinja2(app=self.app, factory=BaseHandler.jinja2_factory)
def render_response(self, _template, **context):
# Renders a template and writes the result to the response.
context.update({
'app_version': os.environ.get('CURRENT_VERSION_ID'),
})
rv = self.jinja2.render_template(_template, **context)
self.response.write(rv)
class UserPage(BaseHandler):
"""Show all (non-hidden) chromium badges for the viewed user."""
def get(self, viewed_user_email, *args):
if '@' not in viewed_user_email:
viewed_user_email += '@chromium.org'
user_data_list = UserData.query(UserData.email == viewed_user_email).fetch()
badge_def_list = Badge.query().fetch()
badge_def_dict = {b.badge_name: b for b in badge_def_list}
badge_views = [
BadgeView(ud, badge_def_dict)
for ud in user_data_list]
context = {
'viewed_user_email': viewed_user_email,
'badges': badge_views,
}
self.render_response('user.html', **context)
class MainPage(BaseHandler):
"""The default page shows the signed in user their own badges."""
def get(self, *args):
# TODO: redirect to signed in user... what if not signed in?
self.redirect('/[email protected]')
class BadgePage(BaseHandler):
"""Display page description, level thresholds, and times awarded."""
def get(self, badge_name, *args):
logging.info('badge_name = %r', badge_name)
badge_def = Badge.get_by_id(badge_name)
if not badge_def:
logging.info('badge def was %r', badge_def)
self.abort(404)
awarded_count = UserData.query(
UserData.badge_name == badge_name,
UserData.value >= badge_def.level_1 or 0).count()
l1_count = UserData.query(
UserData.badge_name == badge_name,
UserData.value >= badge_def.level_1 or 0,
UserData.value < badge_def.level_2 or sys.maxint).count()
l2_count = UserData.query(
UserData.badge_name == badge_name,
UserData.value >= badge_def.level_2 or 0,
UserData.value < badge_def.level_3 or sys.maxint).count()
l3_count = UserData.query(
UserData.badge_name == badge_name,
UserData.value >= badge_def.level_3 or 0).count()
context = {
'badge_def': badge_def,
'awarded_count': awarded_count,
'l1_count': l1_count,
'l2_count': l2_count,
'l3_count': l3_count
}
self.render_response('badge.html', **context)
class Update(BaseHandler):
"""Update badge data.
The expected format is:
[
{
"badge_name": <str>,
"level_1": <int>, # Optional
"level_2": <int>, # Optional
"level_3": <int>, # Optional
"show_number": <bool>, # Optional
"title": <str>, # Optional
"description": <str>, # Optional
"icon": <str>, # URI, Optional
"data": {
{
"email": <str>,
"value": <int>,
}
}
}
]
"""
def post(self):
password = self.request.POST.getone('password')
settings = Settings.get_by_id('1')
if password != settings.password:
self.response.set_status(403)
self.response.write('invalid password')
data = self.request.POST.getone('data')
if not data:
self.response.set_status(400)
self.response.write('no data given')
return
o = json.loads(data)
for badge in o:
logging.info('Updating %s' % badge)
b = self.update_badge_entity(badge)
self.update_user_data(badge, b)
@staticmethod
def update_badge_entity(badge):
name = badge['badge_name']
level_1 = badge.get('level_1', None)
level_2 = badge.get('level_2', None)
level_3 = badge.get('level_3', None)
show_number = badge.get('show_number', None)
title = badge.get('title', None)
description = badge.get('description', None)
icon = badge.get('icon', None)
b = Badge.get_by_id(id=name)
if not b:
b = Badge(id=name, badge_name=name)
if level_1 is not None:
b.level_1 = level_1
if level_2 is not None:
b.level_2 = level_2
if level_3 is not None:
b.level_3 = level_3
if show_number is not None:
b.show_number = show_number
if title is not None:
b.title = title
if description is not None:
b.description = description
if icon is not None:
b.icon = icon
b.put()
return b
@staticmethod
def update_user_data(badge, b):
data = badge.get('data', [])
# There might be a max batch size? We'll find out.
to_put = []
for item in data:
email = item['email']
value = int(item['value']) # JSON might turn it into a float.
uid = '%s:%s' % (b.badge_name, email)
d = UserData.get_by_id(id=uid)
if d and not d.visible:
continue
d = UserData(
badge_name=b.badge_name, email=email, value=value,
visible=True, id=uid)
to_put.append(d)
ndb.put_multi(to_put)
app = webapp2.WSGIApplication([
(r'/system/update', Update),
(r'/b/([-_.A-Za-z0-9]+)', BadgePage),
(r'/([-_+A-Za-z0-9]+(@[.A-Za-z]+)?)', UserPage),
('/', MainPage),
])
|
[] |
[] |
[
"CURRENT_VERSION_ID"
] |
[]
|
["CURRENT_VERSION_ID"]
|
python
| 1 | 0 | |
rap/app.go
|
//The RAP site is composed of three parts:
//-static pages - for the public
//-API - for RESTful CRUD ops on Datastore
//-logged in pages - as a web based way access the CRUD ops - not in the initial release
//-an import page for the bulk updates
//google app engine handles auth fairly well on its own
/* sometime performance, json, and geocode info
talks.golang.or/2015/json.slide#1
https://github.com/nf/geocode
https://github.com/nf/geocode
talks.golang.org/2013/highperf.slide#1
github.com/mjibson/appstats
*/
package rap
import (
"appengine"
"net/http"
"os"
"time"
)
const basePath = "rap"
var recaptchaServerKey, recaptchaBrowserKey, mapsBrowserKey, geocodingServerKey string
func init() {
//load the api keys
recaptchaBrowserKey = os.Getenv("RECAPTCHA_SITE_KEY")
recaptchaServerKey = os.Getenv("RECAPTCHA_SECRET_KEY")
mapsBrowserKey = os.Getenv("MAPS_BROWSER_KEY")
geocodingServerKey = os.Getenv("GECODING_SERVER_KEY")
//basePath = "rapdemo"
fs := http.FileServer(http.Dir(basePath + "/static"))
http.Handle("/static/", http.StripPrefix("/static", fs))
http.Handle("/auth", appHandler(authdemo))
//bulk import from csv
http.Handle("/csvimport", appHandler(csvimport))
//api
http.Handle("/resources", appHandler(resources))
//feedback page submit
http.Handle("/feedback", appHandler(feedback))
//handles the templated but otherwise mostly static html pages
http.Handle("/", appHandler(serveTemplate))
}
//The resource type is what most of the application will focus on.
type resource struct {
ID int64 //db id
//display fields
Category string //Medical, Food, et cetera
OrganizationName string
Address string
ZipCode string
Days string
TimeOpen string
TimeClose string
PeopleServed string
Description string
PhoneNumber string
Location appengine.GeoPoint //lng lat
//audit fields
LastUpdatedTime time.Time `datastore:",noindex"`
LastUpdatedBy string `datastore:",noindex"`
IsActive bool
}
//following the error pattern suggested in the Go Blog
//http://blog.golang.org/error-handling-and-go
type appError struct {
Error error
Message string
Code int
}
type appHandler func(http.ResponseWriter, *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if e := fn(w, r); e != nil { // e is *appError, not os.Error.
c := appengine.NewContext(r)
c.Errorf("%v", e.Error)
http.Error(w, e.Message, e.Code)
}
}
|
[
"\"RECAPTCHA_SITE_KEY\"",
"\"RECAPTCHA_SECRET_KEY\"",
"\"MAPS_BROWSER_KEY\"",
"\"GECODING_SERVER_KEY\""
] |
[] |
[
"RECAPTCHA_SITE_KEY",
"GECODING_SERVER_KEY",
"RECAPTCHA_SECRET_KEY",
"MAPS_BROWSER_KEY"
] |
[]
|
["RECAPTCHA_SITE_KEY", "GECODING_SERVER_KEY", "RECAPTCHA_SECRET_KEY", "MAPS_BROWSER_KEY"]
|
go
| 4 | 0 | |
convertmask/utils/backup/getArea.py
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-08-17 13:48:46
LastEditors: xiaoshuyui
LastEditTime: 2020-10-10 15:41:28
'''
def getAreaOfPolyGonbyVector(points):
# 基于向量叉乘计算多边形面积
area = 0
if (len(points) < 3):
raise Exception("points is not enough to calculate area!")
for i in range(0, len(points) - 1):
p1 = points[i]
# print(p1)
p2 = points[i + 1]
triArea = (p1[0][0] * p2[0][1] - p2[0][0] * p1[0][1]) / 2
area += triArea
return abs(area)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
modules/pymol/parser.py
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
# parser.py
# Python parser module for PyMol
#
from __future__ import absolute_import
# Don't import __future__.print_function
class SecurityException(Exception):
pass
SCRIPT_TOPLEVEL = 'toplevel'
if True:
import pymol
import traceback
import collections
import re
import glob
import sys
import os
from . import parsing
from . import colorprinting
from .cmd import _feedback,fb_module,fb_mask,exp_path
QuietException = parsing.QuietException
CmdException = pymol.CmdException
py_delims = { '=' : 1, '+=' : 1, '-=' : 1, '*=' : 1,
'/=' :1, '//=' : 1, '%=' : 1, '&=' : 1,
'|=' :1, '^=' : 1, '>>=' : 1,'<<=' : 1,
'**=':1 }
remove_lists_re = re.compile("\[[^\]]*\]")
def complete_sc(st,sc,type_name,postfix, mode=0):
result = None
try:
sc=sc() # invoke lambda functions (if any)
except:
traceback.print_exc()
amb = sc.interpret(st, mode)
if amb==None:
colorprinting.warning(" parser: no matching %s."%type_name)
elif isinstance(amb, str):
result = amb+postfix
else:
amb.sort()
colorprinting.suggest(" parser: matching %s:"%type_name)
flist = [x for x in amb if x[0]!='_']
lst = parsing.list_to_str_list(flist)
for a in lst:
colorprinting.suggest(a)
# now append up to point of ambiguity
if not len(flist):
css = []
else:
css = list(flist[0]) # common sub-string (css)
for a in flist:
ac = list(a)
tmp = css
css = []
for c in range(len(tmp)):
if tmp[c]!=ac[c]:
break
css.append(tmp[c])
css = [_f for _f in css if _f]
css = ''.join(css)
if len(css)>len(st):
result = css
return result
class NestLayer:
def __init__(self):
self.cont = ""
self.com0 = ""
self.sc_path = SCRIPT_TOPLEVEL
self.lineno = 0
self.literal_python_fallback = False
self.embed_sentinel = None
self.embed_dict = {}
self.next = []
class Parser:
def __init__(self,cmd):
cmd = cmd._weakrefproxy
self.cmd = cmd
self.nest = 0
self.layer = collections.defaultdict(NestLayer)
self.pymol_names = self.cmd._pymol.__dict__
# parsing state implemented with dictionaries to enable safe recursion
# to arbitrary depths
#com0 = {} # verbose line, as read in
#com1 = {} # line w/o trailing whitespace
#com2 = {} # non-compound command
#cont = {} # continued characters from previous lines (i.e., before \ )
#script = {} # file handles
#sc_path = {} # file paths
#kw = {} # row in the keyword table for the current command
#input = {} # list of length two - command and unparsed arguments string
#next = {} # characters for next command (i.e., after ; )
#args = {} # parsed non-keyword argument string
#kw_args = {} # parser keyword argument string
#embed_dict = {}
#embed_list = {}
#embed_sentinel = {}
#embed_type = {}
#embed_line = {}
# The resulting value from a pymol command (if any) is stored in the
# parser.result global variable. However, script developers will
# geerally want to switch to the Python API for any of this kind of
# stuff.
self.result = None
# initialize parser
self.cmd._pymol.__script__ = SCRIPT_TOPLEVEL
def exec_python(self, s, secure=False, fallback=False):
if secure:
raise SecurityException('Python expressions disallowed in this file')
layer = self.layer[self.nest]
layer.literal_python_fallback = fallback
# for meaningful line number in error messages
blanklines = layer.lineno - 1 - s.count('\n')
s = '\n' * blanklines + s + '\n'
s = compile(s, layer.sc_path, 'exec')
exec(s, self.pymol_names, self.pymol_names)
# main parser routine
def parse(self,s,secure=0):
try:
self.nest += 1
return self._parse(s, secure)
finally:
self.nest -= 1
def _parse(self, s, secure):
layer = self.layer[self.nest]
self.result = None
# report any uncaught errors...
# WLD: this is problematic if parse is called inside an exception...removed.
# if sys.exc_info()!=(None,None,None):
# traceback.print_exc()
# sys.exc_clear()
def parse_embed():
if s.strip() == layer.embed_sentinel:
etn = layer.embed_type
if etn == 0: # embedded data
colorprinting.parrot(" Embed: read %d lines."%(len(layer.embed_list)))
layer.embed_sentinel=None
elif etn == 1: # python block
colorprinting.parrot("PyMOL>"+s.rstrip())
py_block = ''.join(layer.embed_list)
del layer.embed_list
layer.embed_sentinel=None
self.exec_python(py_block)
elif etn == 2: # skip block
colorprinting.parrot(" Skip: skipped %d lines."%(layer.embed_line))
layer.embed_sentinel=None
else:
etn = layer.embed_type
if etn == 0: # normal embedded data
layer.embed_list.append(s.rstrip()+"\n")
elif etn == 1: # python block
el = layer.embed_line + 1
colorprinting.parrot("%5d:%s"%(el,s.rstrip()))
layer.embed_line = el
layer.embed_list.append(s.rstrip()+"\n")
elif etn == 2:
layer.embed_line = layer.embed_line + 1
p_result = 1
layer.com0 = s
try:
if layer.embed_sentinel is not None:
parse_embed()
return 1
layer.com1 = layer.com0.rstrip() # strips trailing whitespace
if len(layer.com1) > 0:
if str(layer.com1[-1]) == "\\":
# prepend leftovers
if layer.cont != '':
layer.cont = layer.cont + "\n" + layer.com1[:-1]
else:
layer.cont = layer.com1[:-1]
else:
# prepend leftovers
if layer.cont != '':
layer.com1 = layer.cont + "\n" + layer.com1
layer.cont = ''
# this routine splits up the line first based on semicolon
layer.next = parsing.split(layer.com1,';',1) + layer.next[1:]
# layer.com2 now a full non-compound command
layer.com2 = layer.next[0]
layer.input = layer.com2.split(' ',1)
lin = len(layer.input)
if lin:
layer.input[0] = layer.input[0].strip()
com = layer.input[0]
if (com[0:1]=='/'):
# explicit literal python
layer.com2 = layer.com2[1:].strip()
if len(layer.com2)>0:
self.exec_python(layer.com2, secure)
elif lin>1 and layer.input[-1:][0].split(' ',1)[0] in py_delims:
self.exec_python(layer.com2, secure)
else:
# try to find a keyword which matches
if com in self.cmd.kwhash:
amb = self.cmd.kwhash.interpret(com)
if amb == None:
com = self.cmd.kwhash[com]
elif not isinstance(amb, str):
colorprinting.warning('Error: ambiguous command: ')
amb.sort()
amb = parsing.list_to_str_list(amb)
for a in amb:
colorprinting.warning(a)
raise QuietException
com = amb
if com in self.cmd.keyword:
# here is the command and argument handling section
layer.kw = self.cmd.keyword[com]
if layer.kw[4]>=parsing.NO_CHECK:
# stricter, Python-based argument parsing
# remove line breaks (only important for Python expressions)
layer.com2=layer.com2.replace('\n','')
if layer.kw[4]>=parsing.LITERAL: # treat literally
layer.next = []
if not secure:
layer.com2=layer.com1
else:
raise SecurityException('Python expressions disallowed in this file')
if secure and (layer.kw[4]==parsing.SECURE):
layer.next = []
raise SecurityException('Command disallowed in this file')
else:
(layer.args, layer.kw_args) = \
parsing.prepare_call(
layer.kw[0],
parsing.parse_arg(layer.com2,mode=layer.kw[4],_self=self.cmd),
layer.kw[4], _self=self.cmd) # will raise exception on failure
self.result=layer.kw[0](*layer.args, **layer.kw_args)
elif layer.kw[4]==parsing.PYTHON:
# handle python keyword
layer.com2 = layer.com2.strip()
if len(layer.com2)>0:
self.exec_python(layer.com2, secure)
else:
# remove line breaks (only important for Python expressions)
layer.com2=layer.com2.replace('\n','')
# old parsing style, being phased out
if layer.kw[4]==parsing.ABORT:
return None # SCRIPT ABORT EXIT POINT
if layer.kw[4]==parsing.MOVIE: # copy literal single line, no breaks
layer.next = []
if not secure:
layer.input = layer.com1.split(' ',1)
else:
raise SecurityException('Movie commands disallowed in this file')
if len(layer.input)>1:
layer.args = parsing.split(layer.input[1],layer.kw[3])
while 1:
nArg = len(layer.args) - 1
c = 0
while c < nArg:
if ( layer.args[c].count('(')!=
layer.args[c].count(')')):
tmp=layer.args[c+1]
layer.args.remove(tmp)
layer.args[c]=layer.args[c].strip()+\
','+tmp.strip()
nArg = nArg-1
break;
c = c + 1
if c == nArg:
break;
if len(layer.args)==1 and len(layer.args[0])==0:
layer.args = []
else:
layer.args = []
if layer.kw[1]<= len(layer.args) <= layer.kw[2]:
layer.args = [a.strip() for a in layer.args]
if layer.kw[4]<parsing.RUN:
#
# this is where old-style commands are invoked
#
self.result=layer.kw[0](*layer.args)
#
elif (layer.kw[4]==parsing.EMBED):
layer.next = []
if secure or self.nest==0: # only legal on top level and p1m files
l = len(layer.args)
if l>0:
key = layer.args[0]
else:
key = self.get_default_key()
if l>1:
format = layer.args[1]
else:
format = 'pdb'
if l>2:
layer.embed_sentinel = layer.args[2]
else:
layer.embed_sentinel = "embed end"
list = []
layer.embed_dict[key] = ( format, list )
layer.embed_list = list
layer.embed_type = 0 # not a python block
else:
print('Error: embed only legal in special files (e.g. p1m)')
raise None
elif (layer.kw[4]==parsing.SKIP):
layer.next = []
arg = parsing.apply_arg(
parsing.parse_arg(layer.com2,_self=self.cmd),
('sentinel',),
{'sentinel':'skip end'})
print(arg) # ???
if len(layer.args):
if layer.args[0]=='end': # probable 'skip end' to ignore
arg = []
if len(arg):
layer.embed_sentinel = arg[0]
layer.embed_type = 2 # skip block
layer.embed_line = 0
elif (layer.kw[4]==parsing.PYTHON_BLOCK):
layer.next = []
if not secure:
arg = parsing.apply_arg(
parsing.parse_arg(layer.com2,_self=self.cmd),
('sentinel','skip'),
{'sentinel':'python end','skip':0})
layer.embed_sentinel = arg[0]
list = []
layer.embed_list = list
if arg[1]:
layer.embed_type = 2 # skip block
else:
layer.embed_type = 1 # python block
layer.embed_line = 0
else:
print('Error: Python blocks disallowed in this file.')
raise None
else:
print('Error: unknown keyword mode: '+str(layer.kw[4]))
raise QuietException
else:
print('Error: invalid arguments for %s command.' % com)
#
# non-keyword command handling
#
elif len(layer.input[0]):
if layer.input[0][0]=='@':
path = exp_path(layer.com2[1:].strip())
if path[-3:].lower()=='p1m':
nest_securely = 1
else:
nest_securely = secure
if re.search("\.py$|\.pym$",path) != None:
if self.cmd._feedback(fb_module.parser,fb_mask.warnings):
print("Warning: use 'run' instead of '@' with Python files?")
layer.script = open(path,'rU')
self.cmd._pymol.__script__ = path
self.nest=self.nest+1
self.layer[self.nest] = NestLayer()
layer = self.layer[self.nest]
layer.cont=''
layer.sc_path=path
layer.embed_sentinel=None
while 1:
layer.com0 = self.layer[self.nest-1].script.readline()
self.layer[self.nest].lineno += 1
if not layer.com0: break
inp_cmd = layer.com0
tmp_cmd = inp_cmd.strip()
if len(tmp_cmd):
if tmp_cmd[0] not in ['#','_','/']: # suppress comments, internals, python
if layer.embed_sentinel==None:
colorprinting.parrot("PyMOL>"+tmp_cmd)
elif tmp_cmd[0]=='_' and \
tmp_cmd[1:2] in [' ','']: # "_ " remove echo suppression signal
inp_cmd=inp_cmd[2:]
pp_result = self.parse(inp_cmd,nest_securely)
if pp_result==None: # RECURSION
break # abort command gets us out
elif pp_result==0: # QuietException
if self.cmd.get_setting_boolean("stop_on_exceptions"):
p_result = 0 # signal an error occurred
colorprinting.error("PyMOL: stopped on exception.")
break;
self.nest=self.nest-1
layer=self.layer[self.nest]
layer.script.close()
self.cmd._pymol.__script__ = layer.sc_path
else: # nothing found, try literal python
layer.com2 = layer.com2.strip()
if len(layer.com2)>0:
if not secure:
self.exec_python(layer.com2, fallback=True)
elif layer.input[0][0:1]!='#':
colorprinting.error('Error: unrecognized keyword: '+layer.input[0])
if (len(layer.next)>1) and p_result:
# continue parsing if no error or break has occurred
self.nest=self.nest+1
self.layer[self.nest] = NestLayer()
layer=self.layer[self.nest]
layer.com0 = self.layer[self.nest-1].next[1]
self.layer[self.nest-1].next=[]
layer.cont=''
layer.embed_sentinel=None
p_result = self.parse(layer.com0,secure) # RECURSION
self.nest=self.nest-1
layer=self.layer[self.nest]
except (QuietException, CmdException) as e:
if e.args:
colorprinting.error(e)
if self.cmd._feedback(fb_module.parser,fb_mask.blather):
print("Parser: caught " + type(e).__name__)
p_result = 0
except SecurityException as e:
colorprinting.error('Error: %s' % (e,))
p_result = None
except:
exc_type, exc_value, tb = colorprinting.print_exc(
[__file__, SCRIPT_TOPLEVEL])
p_result = 0 # notify caller that an error was encountered
if not p_result and self.cmd._pymol.invocation.options.exit_on_error:
self.cmd.quit(1)
return p_result # 0 = Exception, None = abort, 1 = ok
def get_embedded(self,key=None):
layer = self.layer[self.nest]
dict = layer.embed_dict
if key==None:
key = self.get_default_key()
return dict.get(key,None)
def get_default_key(self):
layer = self.layer[self.nest]
return os.path.splitext(os.path.basename(layer.sc_path))[0]
def stdin_reader(self): # dedicated thread for reading standard input
import sys
while 1:
try:
l = sys.stdin.readline()
except IOError:
continue
if l!="":
if self.nest==0:
# if we're reading embedded input on stdin
# then bypass PyMOL C code altogether
if self.layer[0].embed_sentinel!=None:
self.parse(l)
else:
self.cmd.do(l, flush=True)
else:
self.cmd.do(l, flush=True)
elif not self.cmd._pymol.invocation.options.keep_thread_alive:
self.cmd.quit()
else:
import time
time.sleep(.1)
self.cmd._pymol._stdin_reader_thread = None
def complete(self,st):
with self.cmd.lockcm:
return self._complete(st)
def _complete(self,st):
result = None
pre = ''
flag = 0
if not (' ' in st or '@' in st):
try:
result = complete_sc(st, self.cmd.kwhash, 'commands',' ', 1)
except:
traceback.print_exc()
else:
full = self.cmd.kwhash.interpret(re.sub(r" .*","",st))
st_no_lists = remove_lists_re.sub("",st)
count = st_no_lists.count(',') # which argument are we on
if self.cmd.is_string(full):
try:
if count<len(self.cmd.auto_arg):
if full in self.cmd.auto_arg[count]: # autocomplete arguments
flag = 1
pre = re.sub(r"^[^ ]* ",' ',st,count=1) # trim command
if re.search(r",",pre)!=None:
pre = re.sub(r"[^\, ]*$","",pre,count=1)
pre = re.sub(r",\s*[^\, ]*$",", ",pre,count=1) # trim 1 arg
else:
pre = re.sub("[^ ]*$","",pre,count=1) # trim 1 arg
pre = re.sub(r"^ *",'',pre)
pre = full+' '+pre
pat = re.sub(r".*[\, ]",'',st)
# print ":"+pre+":"+pat+":"
# print tuple([pat] + self.cmd.auto_arg[count][full])
result = complete_sc(*tuple([pat] + self.cmd.auto_arg[count][full]), **{})
except:
traceback.print_exc()
if not flag: # otherwise fallback onto filename completion
st = self.cmd.as_pathstr(st)
loc = 1 + max(map(st.rfind, ',@'))
if not loc:
loc = 1 + st.find(' ')
pre = st[:loc]
st3 = st[loc:].lstrip()
flist = glob.glob(exp_path(st3)+"*")
# environment variable completion
if not flist and st3.startswith('$'):
flist = ['$' + var for var in os.environ
if var.startswith(st3[1:])]
lf = len(flist)
if lf == 0:
print(" parser: no matching files.")
elif lf==1:
result = flist[0]
if os.path.isdir(flist[0]):
result += '/' # do not use os.path.sep here
else:
flist.sort()
print(" parser: matching files:")
lst = parsing.list_to_str_list(flist)
for a in lst:
print(a)
# now append as much up to point of ambiguity
css = os.path.commonprefix(flist)
if len(css)>len(st3):
result = css
if result!=None:
result = pre+result
return result
def new_parse_closure(self_cmd): # create parser and return an instance-specific parse function closure
try:
p = Parser(self_cmd)
except:
traceback.print_exc()
self_cmd._parser = p
return lambda s,secure,p=p:p.parse(s,secure)
def new_complete_closure(self_cmd): # return an instance-specific complete function closure
return lambda st,p=self_cmd._parser:p.complete(st)
# unused code?
#
# def _same_(a,b):
# if a==b:
# return a
# else:
# return None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lib/spack/spack/test/llnl/util/lock.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""These tests ensure that our lock works correctly.
This can be run in two ways.
First, it can be run as a node-local test, with a typical invocation like
this::
spack test lock
You can *also* run it as an MPI program, which allows you to test locks
across nodes. So, e.g., you can run the test like this::
mpirun -n 7 spack test lock
And it will test locking correctness among MPI processes. Ideally, you
want the MPI processes to span across multiple nodes, so, e.g., for SLURM
you might do this::
srun -N 7 -n 7 -m cyclic spack test lock
You can use this to test whether your shared filesystem properly supports
POSIX reader-writer locking with byte ranges through fcntl.
If you want to test on multiple filesystems, you can modify the
``locations`` list below. By default it looks like this::
locations = [
tempfile.gettempdir(), # standard tmp directory (potentially local)
'/nfs/tmp2/%u', # NFS tmp mount
'/p/lscratch*/%u' # Lustre scratch mount
]
Add names and paths for your preferred filesystem mounts to test on them;
the tests are parametrized to run on all the filesystems listed in this
dict. Note that 'tmp' will be skipped for MPI testing, as it is often a
node-local filesystem, and multi-node tests will fail if the locks aren't
actually on a shared filesystem.
"""
import collections
import errno
import fcntl
import os
import socket
import shutil
import tempfile
import traceback
import glob
import getpass
from contextlib import contextmanager
from multiprocessing import Process, Queue
import pytest
import llnl.util.lock as lk
import llnl.util.multiproc as mp
from llnl.util.filesystem import touch
#
# This test can be run with MPI. MPI is "enabled" if we can import
# mpi4py and the number of total MPI processes is greater than 1.
# Otherwise it just runs as a node-local test.
#
# NOTE: MPI mode is different from node-local mode in that node-local
# mode will spawn its own test processes, while MPI mode assumes you've
# run this script as a SPMD application. In MPI mode, no additional
# processes are spawned, and you need to ensure that you mpirun the
# script with enough processes for all the multiproc_test cases below.
#
# If you don't run with enough processes, tests that require more
# processes than you currently have will be skipped.
#
mpi = False
comm = None
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
if comm.size > 1:
mpi = True
except ImportError:
pass
"""This is a list of filesystem locations to test locks in. Paths are
expanded so that %u is replaced with the current username. '~' is also
legal and will be expanded to the user's home directory.
Tests are skipped for directories that don't exist, so you'll need to
update this with the locations of NFS, Lustre, and other mounts on your
system.
"""
locations = [
tempfile.gettempdir(),
os.path.join('/nfs/tmp2/', getpass.getuser()),
os.path.join('/p/lscratch*/', getpass.getuser()),
]
"""This is the longest a failed multiproc test will take.
Barriers will time out and raise an exception after this interval.
In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
"""
barrier_timeout = 5
"""This is the lock timeout for expected failures.
This may need to be higher for some filesystems."""
lock_fail_timeout = 0.1
def make_readable(*paths):
for path in paths:
mode = 0o555 if os.path.isdir(path) else 0o444
os.chmod(path, mode)
def make_writable(*paths):
for path in paths:
mode = 0o755 if os.path.isdir(path) else 0o744
os.chmod(path, mode)
@contextmanager
def read_only(*paths):
modes = [os.stat(p).st_mode for p in paths]
make_readable(*paths)
yield
for path, mode in zip(paths, modes):
os.chmod(path, mode)
@pytest.fixture(scope='session', params=locations)
def lock_test_directory(request):
"""This fixture causes tests to be executed for many different mounts.
See the ``locations`` dict above for details.
"""
return request.param
@pytest.fixture(scope='session')
def lock_dir(lock_test_directory):
parent = next((p for p in glob.glob(lock_test_directory)
if os.path.exists(p) and os.access(p, os.W_OK)), None)
if not parent:
# Skip filesystems that don't exist or aren't writable
pytest.skip("requires filesystem: '%s'" % lock_test_directory)
elif mpi and parent == tempfile.gettempdir():
# Skip local tmp test for MPI runs
pytest.skip("skipping local tmp directory for MPI test.")
tempdir = None
if not mpi or comm.rank == 0:
tempdir = tempfile.mkdtemp(dir=parent)
if mpi:
tempdir = comm.bcast(tempdir)
yield tempdir
if mpi:
# rank 0 may get here before others, in which case it'll try to
# remove the directory while other processes try to re-create the
# lock. This will give errno 39: directory not empty. Use a
# barrier to ensure everyone is done first.
comm.barrier()
if not mpi or comm.rank == 0:
make_writable(tempdir)
shutil.rmtree(tempdir)
@pytest.fixture
def private_lock_path(lock_dir):
"""In MPI mode, this is a private lock for each rank in a multiproc test.
For other modes, it is the same as a shared lock.
"""
lock_file = os.path.join(lock_dir, 'lockfile')
if mpi:
lock_file += '.%s' % comm.rank
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
@pytest.fixture
def lock_path(lock_dir):
"""This lock is shared among all processes in a multiproc test."""
lock_file = os.path.join(lock_dir, 'lockfile')
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
def test_poll_interval_generator():
interval_iter = iter(
lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))
intervals = list(next(interval_iter) for i in range(100))
assert intervals == [1] * 20 + [2] * 40 + [3] * 40
def local_multiproc_test(*functions, **kwargs):
"""Order some processes using simple barrier synchronization."""
b = mp.Barrier(len(functions), timeout=barrier_timeout)
args = (b,) + tuple(kwargs.get('extra_args', ()))
procs = [Process(target=f, args=args, name=f.__name__)
for f in functions]
for p in procs:
p.start()
for p in procs:
p.join()
assert all(p.exitcode == 0 for p in procs)
def mpi_multiproc_test(*functions):
"""SPMD version of multiproc test.
This needs to be run like so:
srun spack test lock
Each process executes its corresponding function. This is different
from ``multiproc_test`` above, which spawns the processes. This will
skip tests if there are too few processes to run them.
"""
procs = len(functions)
if procs > comm.size:
pytest.skip("requires at least %d MPI processes" % procs)
comm.Barrier() # barrier before each MPI test
include = comm.rank < len(functions)
subcomm = comm.Split(include)
class subcomm_barrier(object):
"""Stand-in for multiproc barrier for MPI-parallel jobs."""
def wait(self):
subcomm.Barrier()
if include:
try:
functions[subcomm.rank](subcomm_barrier())
except BaseException:
# aborting is the best we can do for MPI tests without
# hanging, since we're using MPI barriers. This will fail
# early and it loses the nice pytest output, but at least it
# gets use a stacktrace on the processes that failed.
traceback.print_exc()
comm.Abort()
subcomm.Free()
comm.Barrier() # barrier after each MPI test.
"""``multiproc_test()`` should be called by tests below.
``multiproc_test()`` will work for either MPI runs or for local runs.
"""
multiproc_test = mpi_multiproc_test if mpi else local_multiproc_test
#
# Process snippets below can be composed into tests.
#
class AcquireWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class AcquireRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class TimeoutWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
barrier.wait()
class TimeoutRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait()
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
#
def test_write_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_write_ranges_2(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 64),
AcquireWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 63, 1))
def test_write_lock_timeout_on_write_ranges_3(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges_4(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
AcquireWrite(lock_path, 2, 456),
AcquireWrite(lock_path, 500, 64),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
#
# Test that shared locks on other processes time out when an
# exclusive lock is held.
#
def test_read_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges(lock_path):
"""small write lock, read whole file."""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges_2(lock_path):
"""small write lock, small read lock"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path, 0, 1))
def test_read_lock_timeout_on_write_ranges_3(lock_path):
"""two write locks, overlapping read locks"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 64, 128),
TimeoutRead(lock_path, 0, 1),
TimeoutRead(lock_path, 128, 256))
#
# Test that exclusive locks time out when shared locks are held.
#
def test_write_lock_timeout_on_read(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_3(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges_2(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_read_ranges_3(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
AcquireRead(lock_path, 10, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 10, 1))
def test_write_lock_timeout_on_read_ranges_4(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 64),
TimeoutWrite(lock_path, 10, 1),
TimeoutWrite(lock_path, 32, 1))
def test_write_lock_timeout_on_read_ranges_5(lock_path):
multiproc_test(
AcquireRead(lock_path, 64, 128),
TimeoutWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 127, 1),
TimeoutWrite(lock_path, 90, 10))
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 0.5, 10),
TimeoutWrite(lock_path, 5, 5))
def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 5, 15),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 11, 3),
TimeoutWrite(lock_path, 7, 1))
def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 0, 15))
def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 3, 10),
TimeoutWrite(lock_path, 5, 1))
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_on_read_only_lockfile(lock_dir, lock_path):
"""read-only directory, read-only lockfile."""
touch(lock_path)
with read_only(lock_path, lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.LockROFileError):
with lk.WriteTransaction(lock):
pass
def test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):
"""read-only directory, writable lockfile."""
touch(lock_path)
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_no_lockfile(lock_dir, lock_path):
"""read-only directory, no lockfile (so can't create)."""
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with pytest.raises(lk.CantCreateLockError):
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.CantCreateLockError):
with lk.WriteTransaction(lock):
pass
def test_upgrade_read_to_write(private_lock_path):
"""Test that a read lock can be upgraded to a write lock.
Note that to upgrade a read lock to a write lock, you have the be the
only holder of a read lock. Client code needs to coordinate that for
shared locks. For this test, we use a private lock just to test that an
upgrade is possible.
"""
# ensure lock file exists the first time, so we open it read-only
# to begin wtih.
touch(private_lock_path)
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.acquire_write()
assert lock._reads == 1
assert lock._writes == 1
assert lock._file.mode == 'r+'
lock.release_write()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.release_read()
assert lock._reads == 0
assert lock._writes == 0
assert lock._file is None
@pytest.mark.skipif(
os.environ.get('SPACK_TEST_SOLVER') == 'clingo',
reason='Test for Clingo are run in a container with root permissions'
)
def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):
"""Test that read-only file can be read-locked but not write-locked."""
# ensure lock file exists the first time
touch(private_lock_path)
# open it read-only to begin wtih.
with read_only(private_lock_path):
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r'
# upgrade to writ here
with pytest.raises(lk.LockROFileError):
lock.acquire_write()
class ComplexAcquireAndRelease(object):
def __init__(self, lock_path):
self.lock_path = lock_path
def p1(self, barrier):
lock = lk.Lock(self.lock_path)
lock.acquire_write()
barrier.wait() # ---------------------------------------- 1
# others test timeout
barrier.wait() # ---------------------------------------- 2
lock.release_write() # release and others acquire read
barrier.wait() # ---------------------------------------- 3
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
lock.acquire_read()
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write and read
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
# others release reads
barrier.wait() # ---------------------------------------- 5
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
# others test timeout
barrier.wait() # ---------------------------------------- 7
lock.release_write() # release read AND write (need both)
lock.release_read()
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write & read
barrier.wait() # ---------------------------------------- 8
lock.acquire_read()
barrier.wait() # ---------------------------------------- 9
lock.acquire_write()
barrier.wait() # ---------------------------------------- 10
# others test timeout
barrier.wait() # ---------------------------------------- 11
lock.release_read() # release read AND write in opposite
lock.release_write() # order from before on p2
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
#
# Longer test case that ensures locks are reusable. Ordering is
# enforced by barriers throughout -- steps are shown with numbers.
#
def test_complex_acquire_and_release_chain(lock_path):
test_chain = ComplexAcquireAndRelease(lock_path)
multiproc_test(test_chain.p1,
test_chain.p2,
test_chain.p3)
class AssertLock(lk.Lock):
"""Test lock class that marks acquire/release events."""
def __init__(self, lock_path, vals):
super(AssertLock, self).__init__(lock_path)
self.vals = vals
# assert hooks for subclasses
assert_acquire_read = lambda self: None
assert_acquire_write = lambda self: None
assert_release_read = lambda self: None
assert_release_write = lambda self: None
def acquire_read(self, timeout=None):
self.assert_acquire_read()
result = super(AssertLock, self).acquire_read(timeout)
self.vals['acquired_read'] = True
return result
def acquire_write(self, timeout=None):
self.assert_acquire_write()
result = super(AssertLock, self).acquire_write(timeout)
self.vals['acquired_write'] = True
return result
def release_read(self, release_fn=None):
self.assert_release_read()
result = super(AssertLock, self).release_read(release_fn)
self.vals['released_read'] = True
return result
def release_write(self, release_fn=None):
self.assert_release_write()
result = super(AssertLock, self).release_write(release_fn)
self.vals['released_write'] = True
return result
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
# assert enter_fn is called while lock is held
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
# assert exit_fn is called while lock is held
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=enter_fn, release=exit_fn):
assert vals['acquired_%s' % type]
assert not vals['released_%s' % type]
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['acquired_%s' % type]
assert vals['released_%s' % type]
assert not vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_exception(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
return exit_result
exit_result = False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with pytest.raises(Exception):
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
# test suppression of exceptions from exit_fn
exit_result = True
vals.clear()
# should not raise now.
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_context_manager(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_read(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
def assert_acquire_write(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_write(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
class TestContextManager(object):
def __enter__(self):
vals['entered_ctx'] = True
def __exit__(self, t, v, tb):
assert not vals['released_%s' % type]
vals['exited_ctx'] = True
vals['exception_ctx'] = (t or v or tb)
return exit_ctx_result
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
return exit_fn_result
exit_fn_result, exit_ctx_result = False, False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=TestContextManager, release=exit_fn):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
vals.clear()
with transaction(lock, acquire=TestContextManager):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
# below are tests for exceptions with and without suppression
def assert_ctx_and_fn_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
else:
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert vals['exception_ctx']
assert vals['exception_fn']
def assert_only_ctx_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert vals['exception_ctx']
assert not vals['exception_fn']
# no suppression
assert_ctx_and_fn_exception(raises=True)
assert_only_ctx_exception(raises=True)
# suppress exception only in function
exit_fn_result, exit_ctx_result = True, False
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=True)
# suppress exception only in context
exit_fn_result, exit_ctx_result = False, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
# suppress exception in function and context
exit_fn_result, exit_ctx_result = True, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
def test_nested_write_transaction(lock_path):
"""Ensure that the outermost write transaction writes."""
def write(t, v, tb):
vals['wrote'] = True
vals = collections.defaultdict(lambda: False)
lock = AssertLock(lock_path, vals)
# write/write
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write
vals.clear()
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert vals['wrote']
# write/read/write
vals.clear()
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write/read/write
vals.clear()
with lk.ReadTransaction(lock):
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
def test_nested_reads(lock_path):
"""Ensure that write transactions won't re-read data."""
def read():
vals['read'] += 1
vals = collections.defaultdict(lambda: 0)
lock = AssertLock(lock_path, vals)
# read/read
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/read/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write/read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
class LockDebugOutput(object):
def __init__(self, lock_path):
self.lock_path = lock_path
self.host = socket.getfqdn()
def p1(self, barrier, q1, q2):
# exchange pids
p1_pid = os.getpid()
q1.put(p1_pid)
p2_pid = q2.get()
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
with lk.WriteTransaction(lock):
# p1 takes write lock and writes pid/host to file
barrier.wait() # ------------------------------------ 1
assert lock.pid == p1_pid
assert lock.host == self.host
# wait for p2 to verify contents of file
barrier.wait() # ---------------------------------------- 2
# wait for p2 to take a write lock
barrier.wait() # ---------------------------------------- 3
# verify pid/host info again
with lk.ReadTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 4
def p2(self, barrier, q1, q2):
# exchange pids
p2_pid = os.getpid()
p1_pid = q1.get()
q2.put(p2_pid)
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
# p1 takes write lock and writes pid/host to file
barrier.wait() # ---------------------------------------- 1
# verify that p1 wrote information to lock file
with lk.ReadTransaction(lock):
assert lock.pid == p1_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 2
# take a write lock on the file and verify pid/host info
with lk.WriteTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ------------------------------------ 3
# wait for p1 to verify pid/host info
barrier.wait() # ---------------------------------------- 4
def test_lock_debug_output(lock_path):
test_debug = LockDebugOutput(lock_path)
q1, q2 = Queue(), Queue()
local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))
def test_lock_with_no_parent_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
lock = lk.Lock('foo/bar/baz/lockfile')
with lk.WriteTransaction(lock):
pass
def test_lock_in_current_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
# test we can create a lock in the current directory
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
# and that we can do the same thing after it's already there
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
def test_attempts_str():
assert lk._attempts_str(0, 0) == ''
assert lk._attempts_str(0.12, 1) == ''
assert lk._attempts_str(12.345, 2) == ' after 12.35s and 2 attempts'
def test_lock_str():
lock = lk.Lock('lockfile')
lockstr = str(lock)
assert 'lockfile[0:0]' in lockstr
assert 'timeout=None' in lockstr
assert '#reads=0, #writes=0' in lockstr
def test_downgrade_write_okay(tmpdir):
"""Test the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
lock.downgrade_write_to_read()
assert lock._reads == 1
assert lock._writes == 0
def test_downgrade_write_fails(tmpdir):
"""Test failing the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
msg = 'Cannot downgrade lock from write to read on file: lockfile'
with pytest.raises(lk.LockDowngradeError, match=msg):
lock.downgrade_write_to_read()
@pytest.mark.parametrize("err_num,err_msg",
[(errno.EACCES, "Fake EACCES error"),
(errno.EAGAIN, "Fake EAGAIN error"),
(errno.ENOENT, "Fake ENOENT error")])
def test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):
"""Test poll lock exception handling."""
def _lockf(fd, cmd, len, start, whence):
raise IOError(err_num, err_msg)
with tmpdir.as_cwd():
lockfile = 'lockfile'
lock = lk.Lock(lockfile)
touch(lockfile)
monkeypatch.setattr(fcntl, 'lockf', _lockf)
if err_num in [errno.EAGAIN, errno.EACCES]:
assert not lock._poll_lock(fcntl.LOCK_EX)
else:
with pytest.raises(IOError, match=err_msg):
lock._poll_lock(fcntl.LOCK_EX)
def test_upgrade_read_okay(tmpdir):
"""Test the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
lock.upgrade_read_to_write()
assert lock._reads == 0
assert lock._writes == 1
def test_upgrade_read_fails(tmpdir):
"""Test failing the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
msg = 'Cannot upgrade lock from read to write on file: lockfile'
with pytest.raises(lk.LockUpgradeError, match=msg):
lock.upgrade_read_to_write()
|
[] |
[] |
[
"SPACK_TEST_SOLVER"
] |
[]
|
["SPACK_TEST_SOLVER"]
|
python
| 1 | 0 | |
parsers/http/actions/sc-inc-gpc0.go
|
/*
Copyright 2019 HAProxy Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:dupl
package actions
import (
"fmt"
"strings"
"github.com/deyunluo/config-parser/v4/common"
)
type ScIncGpc0 struct {
ID string
Cond string
CondTest string
Comment string
}
func (f *ScIncGpc0) Parse(parts []string, comment string) error {
if comment != "" {
f.Comment = comment
}
f.ID = strings.TrimPrefix(parts[1], "sc-inc-gpc0(")
f.ID = strings.TrimRight(f.ID, ")")
if len(parts) >= 4 {
_, condition := common.SplitRequest(parts[2:])
if len(condition) > 1 {
f.Cond = condition[0]
f.CondTest = strings.Join(condition[1:], " ")
}
return nil
} else if len(parts) == 2 {
return nil
}
return fmt.Errorf("not enough params")
}
func (f *ScIncGpc0) String() string {
var result strings.Builder
result.WriteString("sc-inc-gpc0(")
result.WriteString(f.ID)
result.WriteString(")")
if f.Cond != "" {
result.WriteString(" ")
result.WriteString(f.Cond)
result.WriteString(" ")
result.WriteString(f.CondTest)
}
return result.String()
}
func (f *ScIncGpc0) GetComment() string {
return f.Comment
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
forgerock/forgerock_test.go
|
package forgerock
import (
"fmt"
"os"
"testing"
)
func TestLogin(t *testing.T) {
appUrl := os.Getenv("APP_URL")
forgerockUsername := os.Getenv("FORGEROCK_USERNAME")
forgerockPassword := os.Getenv("FORGEROCK_PASSWORD")
type args struct {
appUrl string
credentials Credentials
}
tests := []struct {
name string
args args
wantRestyClient bool
wantErr bool
}{
{
name: "Successful login",
args: args{
appUrl: appUrl,
credentials: Credentials{
Username: forgerockUsername,
Password: forgerockPassword,
},
},
wantRestyClient: true,
wantErr: false,
},
{
name: "Wrong credentials",
args: args{
appUrl: appUrl,
credentials: Credentials{
Username: "forgerock-go-test-username",
Password: "forgerock-go-test-password",
},
},
wantRestyClient: false,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
forgerock, err := New(os.Getenv("FORGEROCK_BASE_URL"))
if err != nil {
t.Errorf("New() error = %v", err)
return
}
restyClient, err := forgerock.Login(tt.args.appUrl, tt.args.credentials)
if (err != nil) != tt.wantErr {
t.Errorf("Login() error = %v, wantErr %v", err, tt.wantErr)
return
}
if (restyClient != nil) != tt.wantRestyClient {
t.Errorf("Login() restyClient = %v, wantRestyClient %v", restyClient, tt.wantRestyClient)
}
})
}
}
func TestZuulTenants(t *testing.T) {
forgerockBaseUrl := os.Getenv("FORGEROCK_BASE_URL")
appUrl := os.Getenv("APP_URL")
credentials := Credentials{
Username: os.Getenv("FORGEROCK_USERNAME"),
Password: os.Getenv("FORGEROCK_PASSWORD"),
}
forgerock, err := New(forgerockBaseUrl)
if err != nil {
t.Errorf("New() error = %v", err)
return
}
client, err := forgerock.Login(appUrl, credentials)
if err != nil {
t.Errorf("Login() error = %v", err)
return
}
resp, err := client.R().
SetHeader("Accept", "application/json").
Get(appUrl + "/api/tenants")
if err != nil {
t.Errorf("Tenants error = %v", err)
return
}
fmt.Println(resp.String())
}
|
[
"\"APP_URL\"",
"\"FORGEROCK_USERNAME\"",
"\"FORGEROCK_PASSWORD\"",
"\"FORGEROCK_BASE_URL\"",
"\"FORGEROCK_BASE_URL\"",
"\"APP_URL\"",
"\"FORGEROCK_USERNAME\"",
"\"FORGEROCK_PASSWORD\""
] |
[] |
[
"FORGEROCK_BASE_URL",
"APP_URL",
"FORGEROCK_USERNAME",
"FORGEROCK_PASSWORD"
] |
[]
|
["FORGEROCK_BASE_URL", "APP_URL", "FORGEROCK_USERNAME", "FORGEROCK_PASSWORD"]
|
go
| 4 | 0 | |
lib/partial_mock.py
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains functionality used to implement a partial mock."""
import collections
import logging
import mock
import os
import re
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class Comparator(object):
"""Base class for all comparators."""
def Match(self, arg):
"""Match the comparator against an argument."""
raise NotImplementedError, 'method must be implemented by a subclass.'
def Equals(self, rhs):
"""Returns whether rhs compares the same thing."""
return type(self) == type(rhs) and self.__dict__ == rhs.__dict__
def __eq__(self, rhs):
return self.Equals(rhs)
def __ne__(self, rhs):
return not self.Equals(rhs)
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter."""
def __init__(self, key):
"""Initialize.
Arguments:
key: Any thing that could be in a list or a key in a dict
"""
Comparator.__init__(self)
self._key = key
def Match(self, arg):
try:
return self._key in arg
except TypeError:
return False
def __repr__(self):
return '<sequence or map containing %r>' % str(self._key)
class Regex(Comparator):
"""Checks if a string matches a regular expression."""
def __init__(self, pattern, flags=0):
"""Initialize.
Arguments:
pattern: is the regular expression to search for
flags: passed to re.compile function as the second argument
"""
Comparator.__init__(self)
self.pattern = pattern
self.flags = flags
self.regex = re.compile(pattern, flags=flags)
def Match(self, arg):
try:
return self.regex.search(arg) is not None
except TypeError:
return False
def __repr__(self):
s = '<regular expression %r' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class ListRegex(Regex):
"""Checks if an iterable of strings matches a regular expression."""
@staticmethod
def _ProcessArg(arg):
if not isinstance(arg, basestring):
return ' '.join(arg)
return arg
def Match(self, arg):
try:
return self.regex.search(self._ProcessArg(arg)) is not None
except TypeError:
return False
class Ignore(Comparator):
"""Used when we don't care about an argument of a method call."""
def Match(self, _arg):
return True
def __repr__(self):
return '<IgnoreArg>'
def _RecursiveCompare(lhs, rhs):
"""Compare parameter specs recursively.
Arguments:
lhs, rhs: Function parameter specs to compare.
equality: In the case of comparing Comparator objects, True means we call
the Equals() function. We call Match() if set to False (default).
"""
if isinstance(lhs, Comparator):
return lhs.Match(rhs)
elif type(lhs) != type(rhs):
return False
elif isinstance(lhs, (tuple, list)):
return (len(lhs) == len(rhs) and
all(_RecursiveCompare(i, j) for i, j in zip(lhs, rhs)))
elif isinstance(lhs, dict):
return _RecursiveCompare(sorted(lhs.iteritems()), sorted(rhs.iteritems()))
else:
return lhs == rhs
def ListContains(small, big, strict=False):
"""Looks for a sublist within a bigger list.
Arguments:
small: The sublist to search for.
big: The list to search in.
strict: If True, all items in list must be adjacent.
"""
if strict:
for i in xrange(len(big) - len(small) + 1):
if _RecursiveCompare(small, big[i:i + len(small)]):
return True
return False
else:
j = 0
for i in xrange(len(small)):
for j in xrange(j, len(big)):
if _RecursiveCompare(small[i], big[j]):
j += 1
break
else:
return False
return True
def DictContains(small, big):
"""Looks for a subset within a dictionary.
Arguments:
small: The sub-dict to search for.
big: The dict to search in.
"""
for k, v in small.iteritems():
if k not in big or not _RecursiveCompare(v, big[k]):
return False
return True
class MockedCallResults(object):
"""Implements internal result specification for partial mocks.
Used with the PartialMock class.
Internal results are different from external results (return values,
side effects, exceptions, etc.) for functions. Internal results are
*used* by the partial mock to generate external results. Often internal
results represent the external results of the dependencies of the function
being partially mocked. Of course, the partial mock can just pass through
the internal results to become external results.
"""
Params = collections.namedtuple('Params', ['args', 'kwargs'])
MockedCall = collections.namedtuple(
'MockedCall', ['params', 'strict', 'result', 'side_effect'])
def __init__(self, name):
"""Initialize.
Arguments:
name: The name given to the mock. Will be used in debug output.
"""
self.name = name
self.mocked_calls = []
self.default_result, self.default_side_effect = None, None
@staticmethod
def AssertArgs(args, kwargs):
"""Verify arguments are of expected type."""
assert isinstance(args, (tuple))
if kwargs:
assert isinstance(kwargs, dict)
def AddResultForParams(self, args, result, kwargs=None, side_effect=None,
strict=True):
"""Record the internal results of a given partial mock call.
Arguments:
args: A list containing the positional args an invocation must have for
it to match the internal result. The list can contain instances of
meta-args (such as IgnoreArg, Regex, In, etc.). Positional argument
matching is always *strict*, meaning extra positional arguments in
the invocation are not allowed.
result: The internal result that will be matched for the command
invocation specified.
kwargs: A dictionary containing the keyword args an invocation must have
for it to match the internal result. The dictionary can contain
instances of meta-args (such as IgnoreArg, Regex, In, etc.). Keyword
argument matching is by default *strict*, but can be modified by the
|strict| argument.
side_effect: A functor that gets called every time a partially mocked
function is invoked. The arguments the partial mock is invoked with are
passed to the functor. This is similar to how side effects work for
mocks.
strict: Specifies whether keyword are matched strictly. With strict
matching turned on, any keyword args a partial mock is invoked with that
are not specified in |kwargs| will cause the match to fail.
"""
self.AssertArgs(args, kwargs)
if kwargs is None:
kwargs = {}
params = self.Params(args=args, kwargs=kwargs)
dup, filtered = cros_build_lib.PredicateSplit(
lambda mc: mc.params == params, self.mocked_calls)
new = self.MockedCall(params=params, strict=strict, result=result,
side_effect=side_effect)
filtered.append(new)
self.mocked_calls = filtered
if dup:
logging.debug('%s: replacing mock for arguments %r:\n%r -> %r',
self.name, params, dup, new)
def SetDefaultResult(self, result, side_effect=None):
"""Set the default result for an unmatched partial mock call.
Arguments:
result, side_effect: See AddResultsForParams.
"""
self.default_result, self.default_side_effect = result, side_effect
def LookupResult(self, args, kwargs=None, hook_args=None, hook_kwargs=None):
"""For a given mocked function call lookup the recorded internal results.
args: A list containing positional args the function was called with.
kwargs: A dict containing keyword args the function was called with.
hook_args: A list of positional args to call the hook with.
hook_kwargs: A dict of key/value args to call the hook with.
Returns:
The recorded result for the invocation.
Raises:
AssertionError when the call is not mocked, or when there is more
than one mock that matches.
"""
def filter_fn(mc):
if mc.strict:
return _RecursiveCompare(mc.params, params)
return (DictContains(mc.params.kwargs, kwargs) and
_RecursiveCompare(mc.params.args, args))
self.AssertArgs(args, kwargs)
if kwargs is None:
kwargs = {}
params = self.Params(args, kwargs)
matched, _ = cros_build_lib.PredicateSplit(filter_fn, self.mocked_calls)
if len(matched) > 1:
raise AssertionError(
"%s: args %r matches more than one mock:\n%s"
% (self.name, params, '\n'.join([repr(c) for c in matched])))
elif matched:
side_effect, result = matched[0].side_effect, matched[0].result
elif (self.default_result, self.default_side_effect) != (None, None):
side_effect, result = self.default_side_effect, self.default_result
else:
raise AssertionError("%s: %r not mocked!" % (self.name, params))
if side_effect:
assert(hook_args is not None)
assert(hook_kwargs is not None)
hook_result = side_effect(*hook_args, **hook_kwargs)
if hook_result is not None:
return hook_result
return result
class PartialMock(object):
"""Provides functionality for partially mocking out a function or method.
Partial mocking is useful in cases where the side effects of a function or
method are complex, and so re-using the logic of the function with
*dependencies* mocked out is preferred over mocking out the entire function
and re-implementing the side effect (return value, state modification) logic
in the test. It is also useful for creating re-usable mocks.
"""
TARGET = None
ATTRS = None
def __init__(self, create_tempdir=False):
"""Initialize.
Arguments:
create_tempdir: If set to True, the partial mock will create its own
temporary directory when start() is called, and will set self.tempdir to
the path of the directory. The directory is deleted when stop() is
called.
"""
self.backup = {}
self.patchers = {}
self.patched = {}
self.create_tempdir = create_tempdir
# Set when start() is called.
self.tempdir = None
self.__saved_env__ = None
self.started = False
self._results = {}
for attr in self.ATTRS:
self._results[attr] = MockedCallResults(attr)
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def PreStart(self):
"""Called at the beginning of start(). Child classes can override this.
If __init__ was called with |create_tempdir| set, then self.tempdir will
point to an existing temporary directory when this function is called.
"""
def PreStop(self):
"""Called at the beginning of stop(). Child classes can override this.
If __init__ was called with |create_tempdir| set, then self.tempdir will
not be deleted until after this function returns.
"""
def _start(self):
chunks = self.TARGET.rsplit('.', 1)
module = cros_build_lib.load_module(chunks[0])
cls = getattr(module, chunks[1])
for attr in self.ATTRS:
self.backup[attr] = getattr(cls, attr)
src_attr = '_target%s' % attr if attr.startswith('__') else attr
if hasattr(self.backup[attr], 'reset_mock'):
raise AssertionError(
'You are trying to nest mock contexts - this is currently '
'unsupported by PartialMock.')
if callable(self.backup[attr]):
patcher = mock.patch.object(cls, attr, autospec=True,
side_effect=getattr(self, src_attr))
else:
patcher = mock.patch.object(cls, attr, getattr(self, src_attr))
self.patched[attr] = patcher.start()
self.patchers[attr] = patcher
return self
def start(self):
"""Activates the mock context."""
# pylint: disable=W0212
try:
if self.create_tempdir:
osutils._TempDirSetup(self)
self.__saved_env__ = os.environ.copy()
self.started = True
self.PreStart()
return self._start()
except:
self.stop()
raise
def _stop(self):
cros_build_lib.SafeRun([p.stop for p in self.patchers.itervalues()])
def stop(self):
"""Restores namespace to the unmocked state."""
# pylint: disable=W0212
try:
if self.__saved_env__ is not None:
osutils.SetEnvironment(self.__saved_env__)
if self.started:
cros_build_lib.SafeRun([self.PreStop, self._stop])
finally:
self.started = False
if getattr(self, 'tempdir', None):
osutils._TempDirTearDown(self, False)
def UnMockAttr(self, attr):
"""Unsetting the mock of an attribute/function."""
self.patchers.pop(attr).stop()
def CheckAttr(f):
"""Automatically set mock_attr based on class default.
This function decorator automatically sets the mock_attr keyword argument
based on the class default. The mock_attr specifies which mocked attribute
a given function is referring to.
Raises an AssertionError if mock_attr is left unspecified.
"""
def new_f(self, *args, **kwargs):
mock_attr = kwargs.pop('mock_attr', None)
if mock_attr is None:
mock_attr = self.DEFAULT_ATTR
if self.DEFAULT_ATTR is None:
raise AssertionError(
'mock_attr not specified, and no default configured.')
return f(self, *args, mock_attr=mock_attr, **kwargs)
return new_f
class PartialCmdMock(PartialMock):
"""Base class for mocking functions that wrap command line functionality.
Implements mocking for functions that shell out. The internal results are
'returncode', 'output', 'error'.
"""
CmdResult = collections.namedtuple(
'MockResult', ['returncode', 'output', 'error'])
DEFAULT_ATTR = None
@CheckAttr
def SetDefaultCmdResult(self, returncode=0, output='', error='',
side_effect=None, mock_attr=None):
"""Specify the default command result if no command is matched.
Arguments:
returncode, output, error: See AddCmdResult.
side_effect: See MockedCallResults.AddResultForParams
"""
result = self.CmdResult(returncode, output, error)
self._results[mock_attr].SetDefaultResult(result, side_effect)
@CheckAttr
def AddCmdResult(self, cmd, returncode=0, output='', error='',
kwargs=None, strict=False, side_effect=None, mock_attr=None):
"""Specify the result to simulate for a given command.
Arguments:
cmd: The command string or list to record a result for.
returncode: The returncode of the command (on the command line).
output: The stdout output of the command.
error: The stderr output of the command.
kwargs: Keyword arguments that the function needs to be invoked with.
strict: Defaults to False. See MockedCallResults.AddResultForParams.
side_effect: See MockedCallResults.AddResultForParams
"""
result = self.CmdResult(returncode, output, error)
self._results[mock_attr].AddResultForParams(
(cmd,), result, kwargs=kwargs, side_effect=side_effect, strict=strict)
@CheckAttr
def CommandContains(self, args, cmd_arg_index=-1, mock_attr=None, **kwargs):
"""Verify that at least one command contains the specified args.
Arguments:
args: Set of expected command-line arguments.
cmd_arg_index: The index of the command list in the positional call_args.
Defaults to the last positional argument.
kwargs: Set of expected keyword arguments.
"""
for call_args, call_kwargs in self.patched[mock_attr].call_args_list:
if (ListContains(args, call_args[cmd_arg_index]) and
DictContains(kwargs, call_kwargs)):
return True
return False
@CheckAttr
def assertCommandContains(self, args=(), expected=True, mock_attr=None,
**kwargs):
"""Assert that RunCommand was called with the specified args.
This verifies that at least one of the RunCommand calls contains the
specified arguments on the command line.
Arguments:
args: Set of expected command-line arguments.
expected: If False, instead verify that none of the RunCommand calls
contained the specified arguments.
**kwargs: Set of expected keyword arguments.
"""
if expected != self.CommandContains(args, **kwargs):
if expected:
msg = 'Expected to find %r in any of:\n%s'
else:
msg = 'Expected to not find %r in any of:\n%s'
patched = self.patched[mock_attr]
cmds = '\n'.join(repr(x) for x in patched.call_args_list)
raise AssertionError(msg % (mock.call(args, **kwargs), cmds))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/unit/test_runner_config.py
|
import os
import re
from functools import partial
from six import string_types, b
from pexpect import TIMEOUT, EOF
from pytest import raises
from mock import patch
from mock import Mock
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.loader import ArtifactLoader
from ansible_runner.exceptions import ConfigurationError
try:
Pattern = re._pattern_type
except AttributeError:
# Python 3.7
Pattern = re.Pattern
def load_file_side_effect(path, value=None, *args, **kwargs):
if args[0] == path:
if value:
return value
raise ConfigurationError
def test_runner_config_init_defaults():
rc = RunnerConfig('/')
assert rc.private_data_dir == '/'
assert rc.ident is not None
assert rc.playbook is None
assert rc.inventory is None
assert rc.limit is None
assert rc.module is None
assert rc.module_args is None
assert rc.artifact_dir == os.path.join('/artifacts/%s' % rc.ident)
assert isinstance(rc.loader, ArtifactLoader)
def test_runner_config_with_artifact_dir():
rc = RunnerConfig('/', artifact_dir='/this-is-some-dir')
assert rc.artifact_dir == os.path.join('/this-is-some-dir', 'artifacts/%s' % rc.ident)
def test_runner_config_init_with_ident():
rc = RunnerConfig('/', ident='test')
assert rc.private_data_dir == '/'
assert rc.ident == 'test'
assert rc.playbook is None
assert rc.inventory is None
assert rc.limit is None
assert rc.module is None
assert rc.module_args is None
assert rc.artifact_dir == os.path.join('/artifacts/test')
assert isinstance(rc.loader, ArtifactLoader)
def test_prepare_environment_vars_only_strings():
rc = RunnerConfig(private_data_dir="/")
value = dict(A=1, B=True, C="foo")
envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
with patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect):
rc.prepare_env()
assert 'A' in rc.env
assert isinstance(rc.env['A'], string_types)
assert 'B' in rc.env
assert isinstance(rc.env['B'], string_types)
assert 'C' in rc.env
assert isinstance(rc.env['C'], string_types)
def test_prepare_env_ad_hoc_command():
rc = RunnerConfig(private_data_dir="/")
value = {'AD_HOC_COMMAND_ID': 'teststring'}
envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
with patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect):
rc.prepare_env()
assert rc.cwd == '/'
def test_prepare_environment_pexpect_defaults():
rc = RunnerConfig(private_data_dir="/")
rc.prepare_env()
assert len(rc.expect_passwords) == 2
assert TIMEOUT in rc.expect_passwords
assert rc.expect_passwords[TIMEOUT] is None
assert EOF in rc.expect_passwords
assert rc.expect_passwords[EOF] is None
def test_prepare_env_passwords():
rc = RunnerConfig(private_data_dir='/')
value = {'^SSH [pP]assword.*$': 'secret'}
password_side_effect = partial(load_file_side_effect, 'env/passwords', value)
with patch.object(rc.loader, 'load_file', side_effect=password_side_effect):
rc.prepare_env()
rc.expect_passwords.pop(TIMEOUT)
rc.expect_passwords.pop(EOF)
assert len(rc.expect_passwords) == 1
assert isinstance(list(rc.expect_passwords.keys())[0], Pattern)
assert 'secret' in rc.expect_passwords.values()
def test_prepare_env_extra_vars_defaults():
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.extra_vars is None
def test_prepare_env_extra_vars():
rc = RunnerConfig('/')
with patch.object(rc.loader, 'isfile', side_effect=lambda x: True):
rc.prepare_env()
assert rc.extra_vars == '/env/extravars'
def test_prepare_env_settings_defaults():
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.settings == {}
def test_prepare_env_settings():
rc = RunnerConfig('/')
value = {'test': 'string'}
settings_side_effect = partial(load_file_side_effect, 'env/settings', value)
with patch.object(rc.loader, 'load_file', side_effect=settings_side_effect):
rc.prepare_env()
assert rc.settings == value
def test_prepare_env_sshkey_defaults():
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.ssh_key_data is None
def test_prepare_env_sshkey():
rc = RunnerConfig('/')
value = '01234567890'
sshkey_side_effect = partial(load_file_side_effect, 'env/ssh_key', value)
with patch.object(rc.loader, 'load_file', side_effect=sshkey_side_effect):
rc.prepare_env()
assert rc.ssh_key_data == value
def test_prepare_env_defaults():
with patch('os.path.exists') as path_exists:
path_exists.return_value=True
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.idle_timeout is None
assert rc.job_timeout is None
assert rc.pexpect_timeout == 5
assert rc.cwd == '/project'
def test_prepare_inventory():
rc = RunnerConfig(private_data_dir='/')
rc.prepare_inventory()
assert rc.inventory == '/inventory'
rc.inventory = '/tmp/inventory'
rc.prepare_inventory()
assert rc.inventory == '/tmp/inventory'
def test_generate_ansible_command():
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
rc.prepare_inventory()
rc.extra_vars = None
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', 'main.yaml']
rc.extra_vars = '/env/extravars'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', 'main.yaml']
rc.extra_vars = None
rc.verbosity = 3
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-vvv', 'main.yaml']
rc.verbosity = None
rc.limit = 'hosts'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '--limit', 'hosts', 'main.yaml']
rc.limit = None
rc.module = 'setup'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup']
rc.module = None
rc.module = 'setup'
rc.module_args = 'test=string'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup', '-a', 'test=string']
rc.module_args = None
def test_generate_ansible_command_with_api_extravars():
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml', extravars={"foo":"bar"})
rc.prepare_inventory()
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '\'foo="bar"\'', 'main.yaml']
def test_generate_ansible_command_with_cmdline_args():
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
rc.prepare_inventory()
rc.extra_vars = {}
cmdline_side_effect = partial(load_file_side_effect, 'env/cmdline', b('--tags foo --skip-tags'))
with patch.object(rc.loader, 'load_file', side_effect=cmdline_side_effect):
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '--tags', 'foo', '--skip-tags', '-i', '/inventory', 'main.yaml']
def test_prepare_command_defaults():
rc = RunnerConfig('/')
cmd_side_effect = partial(load_file_side_effect, 'args')
def generate_side_effect():
return 'test string'
with patch.object(rc.loader, 'load_file', side_effect=cmd_side_effect):
with patch.object(rc, 'generate_ansible_command', side_effect=generate_side_effect):
rc.prepare_command()
rc.command == 'test string'
def test_prepare_command_with_args():
rc = RunnerConfig('/')
value = 'test string'
args_side_effect = partial(load_file_side_effect, 'args', value)
with patch.object(rc.loader, 'load_file', side_effect=args_side_effect):
rc.prepare_command()
assert rc.command == value
def test_prepare_with_defaults():
rc = RunnerConfig('/')
rc.prepare_inventory = Mock()
rc.prepare_env = Mock()
rc.prepare_command = Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
with raises(ConfigurationError) as exc:
rc.prepare()
assert str(exc) == 'Runner playbook is not defined'
def test_prepare():
rc = RunnerConfig('/')
rc.prepare_inventory = Mock()
rc.prepare_env = Mock()
rc.prepare_command = Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
rc.playbook = 'main.yaml'
os.environ['AWX_LIB_DIRECTORY'] = '/'
rc.prepare()
assert rc.prepare_inventory.called
assert rc.prepare_env.called
assert rc.prepare_command.called
assert not hasattr(rc, 'ssh_key_path')
assert not hasattr(rc, 'command')
assert rc.env['ANSIBLE_STDOUT_CALLBACK'] == 'awx_display'
assert rc.env['ANSIBLE_RETRY_FILES_ENABLED'] == 'False'
assert rc.env['ANSIBLE_HOST_KEY_CHECKING'] == 'False'
assert rc.env['AWX_ISOLATED_DATA_DIR'] == '/'
assert rc.env['PYTHONPATH'] == '/:'
os.environ['PYTHONPATH'] = "/foo/bar"
rc.prepare()
assert rc.env['PYTHONPATH'] == "/foo/bar:/:"
def test_prepare_with_ssh_key():
rc = RunnerConfig('/')
rc.prepare_inventory = Mock()
rc.prepare_env = Mock()
rc.prepare_command = Mock()
rc.wrap_args_with_ssh_agent = Mock()
rc.open_fifo_write = Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
rc.playbook = 'main.yaml'
rc.ssh_key_data = '01234567890'
rc.command = 'ansible-playbook'
os.environ['AWX_LIB_DIRECTORY'] = '/'
rc.prepare()
assert rc.ssh_key_path == '/ssh_key_data'
assert rc.wrap_args_with_ssh_agent.called
assert rc.open_fifo_write.called
def test_wrap_args_with_ssh_agent_defaults():
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey')
assert res == ['ssh-agent', 'sh', '-c', 'ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml']
def test_wrap_args_with_ssh_agent_with_auth():
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', '/tmp/sshauth')
assert res == ['ssh-agent', '-a', '/tmp/sshauth', 'sh', '-c', 'ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml']
def test_wrap_args_with_ssh_agent_silent():
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', silence_ssh_add=True)
assert res == ['ssh-agent', 'sh', '-c', 'ssh-add /tmp/sshkey 2>/dev/null && rm -f /tmp/sshkey && ansible-playbook main.yaml']
def test_fifo_write():
pass
def test_args2cmdline():
rc = RunnerConfig('/')
res = rc.args2cmdline('ansible', '-m', 'setup', 'localhost')
assert res == 'ansible -m setup localhost'
|
[] |
[] |
[
"AWX_LIB_DIRECTORY",
"PYTHONPATH"
] |
[]
|
["AWX_LIB_DIRECTORY", "PYTHONPATH"]
|
python
| 2 | 0 | |
saminda/cipres-airavata/sdk/scripts/remote_resource/trestles/test_gordon_lib.py
|
import os
import string
import math
import re
import subprocess
# I didn't implement getProperties, found it somewhere, just reads a java style
# properties file into a dictionary.
def getProperties(filename):
propFile= file( filename, "rU" )
propDict= dict()
for propLine in propFile:
propDef= propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in ':= ' ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(":= ").rstrip()
propDict[name]= value
propFile.close()
# print propDict
return propDict
def getToolType(commandlineString):
if re.search(r'garli', "".join(commandlineString).lower()):
return "garli"
elif re.search(r'raxml', "".join(commandlineString).lower()):
return "raxml"
elif re.search('mb.+3\.2\.1.*', "".join(commandlineString).lower()):
return "mrbayes_3.2.1"
elif re.search(r'mb', "".join(commandlineString).lower()):
return "mrbayes"
elif re.search(r'beast', "".join(commandlineString).lower()):
return "beast"
return None
# Max runtime should be 2 weeks hrs (which is 336 hrs or 20160 minutes) for user cipres. Not
# sure what it is for other users. CHANGE: also using "shared" queue now.
shared_queue = "shared"
shared_queue_limit = 20160.0
short_queue = "normal"
# max run time for "cipres" is 7 days = 10080 minutes
queues = (("normal", 10080.0), )
cores_per_node = 16
# Effectively get rid of max_nodes by setting it to 5000
max_nodes = 5000
max_cores = max_nodes * cores_per_node
default_cores = cores_per_node
account = "TG-DEB090011"
# account = "sds121"
# account = "ddp116"
scheduler_file = "scheduler.conf"
email = "[email protected]"
jobname = ""
runfile = "./batch_command.run"
statusfile = "./batch_command.status"
cmdfile = "./batch_command.cmdline"
jobdir = os.getcwd()
local_jobdir = "/scratch/cipres/$PBS_JOBID"
jobname = os.environ.get("WB_JOBID", "cipres")
def schedulerInfo(properties, tooltype):
""" properties is a dictionary containing keys:
jobtype, mpi_processes, threads_per_process, nodes, runhours.
Based on properties and hardcoded info about the resource this returns a dictionary
containing:
is_direct, is_mpi, queue, runtime, mpi_processes, nodes, ppn"""
# get runhours from properties and convert it to minutes, default to zero if not specified.
try:
runtime = properties.get("runhours", 0.0)
runtime = math.ceil(float(runtime) * 60 )
except:
runtime = 0.0
qname = 0
qlimit = 1
# if runtime is 0 (which isn't really valid), change it to limit for the shortest queue
# so we have something reasonable to work with.
if runtime == 0.0:
runtime = queues[qname][qlimit]
# based on runtime, figure out which queue we should be using.
queue = None
for entry in queues:
if runtime <= entry[qlimit]:
queue = entry[qname]
break
if queue == None:
queue = queues[-1][qname]
runtime = queues[-1][qlimit]
# Create retval and set values we just determined for runtime and queue. Set defaults for some
# of the other retvals which may be overriden below.
#
# With the recent OS upgrade on gordon, ppn should always be 8 if running in shared Q and cores_per_node (ie. 16)
# in the regular Q.
retval = {"runtime":runtime, "queue":queue, "threads_per_process":int(properties.get("threads_per_process", 0)),
"nodes": int(properties.get("nodes", 1)), "ppn": int(cores_per_node), "qos": int(5),
"mpi_processes": int(properties.get("mpi_processes", 1)) }
if properties.get("jobtype") == "direct":
retval["is_direct"] = True
return retval
else:
retval["is_direct"] = False
if properties.get("jobtype", "") == "mpi":
retval["is_mpi"] = True
else:
retval["is_mpi"] = False
if (retval["is_mpi"] == True):
# Some of our pise xml interfaces just specify the number of mpi processes they want.
# We round it down to a multiple of the number of cores per node and request enough nodes
# so that each mpi process has its own core.
#
# Not sure if we still have any interfaces like I just described but it's definitely not
# how we want to run garli here, so explicitly exclude it. Garli just specifies
# the number of mpi processes but we always want to use a single node for it.
if (properties.get("nodes", "") == "") and (properties.get("thread_per_process", "") == "") and tooltype != "garli":
processes = int(processes / cores_per_node) * cores_per_node
processes = min(max(processes, default_cores), max_cores)
retval["nodes"] = processes / cores_per_node
retval["mpi_processes"] = processes
# Pise interfaces that have more knowledge of the specific machine explicitly specify
# the number of nodes as well as the number of mpi processes; we don't 2nd guess them.
else:
retval["nodes"] = int(properties.get("nodes", 1));
# Special case for garli. Run small jobs in shared queue.
if (tooltype == "garli") and (retval["mpi_processes"] < cores_per_node):
useSharedQueue(runtime, retval)
# Run these mrbayes job in small queue.
if (tooltype == "mrbayes_3.2.1") and (retval["mpi_processes"] <= 8):
useSharedQueue(runtime, retval)
else: # Non mpi jobs, makes no sense to request more than one node.
retval["nodes"] = 1
# Special case for small, non-mpi raxml jobs, run in the shared queue. Also for beast
if ((retval["threads_per_process"] == 8) and (tooltype == "raxml")) or (tooltype == "beast"):
queue = shared_queue
useSharedQueue(runtime, retval)
return retval
def useSharedQueue(runtime, properties):
properties["queue"] = shared_queue
properties["qos"] = int(10)
if runtime > shared_queue_limit:
runtime = shared_queue_limit
retval["runtime"] = runtime
properties["ppn"] = 8
def log(filename, message):
f = open(filename, "a")
f.write(message)
f.close()
def deleteJob(jobid, workingdir):
if os.path.isfile(workingdir + "/cancelJobs"):
os.chdir(workingdir)
cmd = "./cancelJobs %d" % jobid
else:
cmd = "qdel %d" % jobid
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (p.returncode != 0):
raise SystemError("Error running '%s', return code is %d. stdout is '%s', stderr is '%s'" % (cmd,
p.returncode, output, err))
def jobInQueue():
cmd = "qstat"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (p.returncode != 0):
raise SystemError("Error running qstat, return code is %d. stderr is %s" % (p.returncode, err))
if (len(err) != 0):
raise SystemError("Error running qstat, stderr is %s" % (err))
if (len(output) < 5):
raise SystemError("Error running qstat, output looks wrong: %s" % (output))
# cmd = 'echo "%s" | grep `whoami`' % output
cmd = 'grep `whoami`'
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate(output)
output = outerr[0]
err = outerr[1]
if (len(err) != 0):
raise SystemError("Error piping qstat thru grep: %s" % (err))
output_rows = output.split("\n")
jobs = []
for row in output_rows:
r = row.split()
if len(r) > 4 and r[4] != "C":
r[0] = r[0].split(".", 1)[0]
jobs.append(r[0])
return jobs
# To do: modify RAxML-Light.sh to accept --url argument and pass it here, like --account. Decide whether
# to use --email too, maybe just on the last job? Or ask Mark if he wants all the emails?
def submitDirectJob(account, url, email, jobname, commandline):
# Not exactly a general purpose solution but for raxml-light we can just add account, email and url
# arguments to the command line.
rfile = open(cmdfile, "w")
rfile.write("#!/bin/sh\n")
rfile.write(" ".join(commandline))
rfile.write(" --account %s" % account)
rfile.write(" --url %s" % url)
rfile.write(" --email %s" % email)
rfile.write("\n")
rfile.close()
os.chmod(cmdfile, 0744);
cmd = cmdfile
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
retval = p.returncode
if retval != 0:
print "Error submitting job:\n"
print output
log(statusfile, "submitDirectJob is returning %d.\nStdout/stderr is:%s\n" % (retval, output))
# When there's a bash syntax error in a script it exits with 2, but if we return 2, we've
# defined that to mean "too many jobs queued" and cipres will print a special message.
if (retval == 2):
retval = 1
return retval
log(statusfile, "Job submission stdout/stderr is: %s\n" % output)
# output should be just the full job id, <id>.gordon-fe1.sdsc.edu:
firstline = output.splitlines()
if len(firstline) == 1:
firstline = firstline[0]
p = re.compile(r"^(\d+).gordon.\S+", re.M)
m = p.search(output)
if m != None:
jobid = m.group(0)
short_jobid = m.group(1)
print "jobid=%d" % int(short_jobid)
log(statusfile, "JOBID is %s\n" % jobid)
log("./_JOBINFO.TXT", "\nJOBID=%s\n" % jobid)
return 0
print "Error, job submission says: %s" % output
log(statusfile, "can't find jobid, submitDirectJob is returning 1\n")
return 1
# Returns 0 on success, 2 means too many jobs queued.
def submitJob():
cmd = "qsub %s 2>> %s" % (runfile, statusfile)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0]
retval = p.returncode
if retval != 0:
# read whatever qsub wrote to the statusfile and print it to stdout
print "Error submitting job:\n"
f = open(statusfile, "r"); print f.read(), "\n\n"; f.close()
print output
# When we return 2 it means too many jobs are queued. qstat returns -226 on abe
# in this situation ... not sure if that's true here, on gordon as well.
if retval == -226:
retval = 2
log(statusfile, "submit_job is returning %d\n" % retval)
return retval
log(statusfile, "qsub output is: " + output + "\n" +
"======================================================================" + "\n")
# output from qsub should on gordon is just the full job id, <id>.gordon-fe1.sdsc.edu:
p = re.compile(r"^(\d+).gordon.\S+", re.M)
m = p.search(output)
if m != None:
jobid = m.group(0)
short_jobid = m.group(1)
print "jobid=%d" % int(short_jobid)
log(statusfile, "JOBID is %s\n" % jobid)
log("./_JOBINFO.TXT", "\nJOBID=%s\n" % jobid)
return 0
else:
print "Error, qsub says: %s" % output
log(statusfile, "can't get jobid, submit_job is returning 1\n")
return 1
|
[] |
[] |
[
"WB_JOBID"
] |
[]
|
["WB_JOBID"]
|
python
| 1 | 0 | |
agent/taskresource/cgroup/control/init_linux_test.go
|
// +build linux,unit
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package control
import (
"errors"
"testing"
mock_cgroups "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock"
"github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock_factory"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestInitHappyCase(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockCgroup := mock_cgroups.NewMockCgroup(ctrl)
mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl)
mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockCgroup, nil)
control := newControl(mockCgroupFactory)
assert.NoError(t, control.Init())
}
func TestInitErrorCase(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl)
mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("cgroup error"))
control := newControl(mockCgroupFactory)
assert.Error(t, control.Init())
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test/e2e/autoscaling/cluster_size_autoscaling.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
schedulerapi "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
const (
defaultTimeout = 3 * time.Minute
resizeTimeout = 5 * time.Minute
manualResizeTimeout = 6 * time.Minute
scaleUpTimeout = 5 * time.Minute
scaleUpTriggerTimeout = 2 * time.Minute
scaleDownTimeout = 20 * time.Minute
podTimeout = 2 * time.Minute
nodesRecoverTimeout = 5 * time.Minute
rcCreationRetryTimeout = 4 * time.Minute
rcCreationRetryDelay = 20 * time.Second
makeSchedulableTimeout = 10 * time.Minute
makeSchedulableDelay = 20 * time.Second
freshStatusLimit = 20 * time.Second
gkeUpdateTimeout = 15 * time.Minute
gkeNodepoolNameKey = "cloud.google.com/gke-nodepool"
disabledTaint = "DisabledForAutoscalingTest"
criticalAddonsOnlyTaint = "CriticalAddonsOnly"
newNodesForScaledownTests = 2
unhealthyClusterThreshold = 4
caNoScaleUpStatus = "NoActivity"
caOngoingScaleUpStatus = "InProgress"
timestampFormat = "2006-01-02 15:04:05 -0700 MST"
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var c clientset.Interface
var nodeCount int
var coreCount int64
var memAllocatableMb int
var originalSizes map[string]int
BeforeEach(func() {
c = f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
originalSizes = make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quantity := node.Status.Allocatable[v1.ResourceCPU]
coreCount += quantity.Value()
}
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
Expect(nodeCount).NotTo(BeZero())
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
Expect(nodeCount).Should(Equal(sum))
if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(5)
framework.ExpectNoError(err)
if !val {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
}
})
AfterEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
for _, n := range nodes.Items {
err = makeNodeSchedulable(c, &n, true)
switch err.(type) {
case CriticalAddonsOnlyError:
continue makeSchedulableLoop
default:
framework.ExpectNoError(err)
}
}
break
}
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated
eventFound := false
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
By("NotTriggerScaleUp event found")
eventFound = true
break EventsLoop
}
}
}
Expect(eventFound).Should(Equal(true))
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second))
})
simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout, unready))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
gpuType := os.Getenv("TESTED_GPU_TYPE")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
})
It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.ready == s.target && s.ready <= nodeCount
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
unmanagedNodes := nodeCount - status.ready
By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
target := status.target
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("Expect no more scale-up to be happening after all pods are scheduled")
// wait for a while until scale-up finishes; we cannot read CA status immediately
// after pods are scheduled as status config map is updated by CA once every loop iteration
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caNoScaleUpStatus
}, 2*freshStatusLimit)
framework.ExpectNoError(err)
if status.target != target {
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
}
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
Expect(status.status).Should(Equal(caNoScaleUpStatus))
Expect(status.ready).Should(Equal(status.target))
Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(Equal(status.target + unmanagedNodes))
})
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Getting memory available on new nodes, so we can account for it when creating RC")
nodes := getPoolNodes(f, extraPoolName)
Expect(len(nodes)).Should(Equal(extraNodes))
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Allocatable[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
pods := nodeCount
newPods := 2
labels := map[string]string{
"anti-affinity": "yes",
}
By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("creating pods")
pods := nodeCount
newPods := 1
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
volumeLabels := labels.Set{
framework.VolumeSelectorKey: f.Namespace.Name,
}
selector := metav1.SetAsLabelSelector(volumeLabels)
By("creating volume & pvc")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volumeLabels,
PVSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
defer func() {
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
}
}()
By("creating pods")
pods := nodeCount
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
klog.Infof("RC and pods not using volume deleted")
}()
By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("creating a pod requesting PVC")
pvcPodName := "pvc-pod"
newPods := 1
volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
labelKey := "cluster-autoscaling-test.special-node"
labelValue := "true"
By("Finding the smallest MIG")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
if minSize == 0 {
newSizes := make(map[string]int)
for mig, size := range originalSizes {
newSizes[mig] = size
}
newSizes[minMig] = 1
setMigSizes(newSizes)
}
removeLabels := func(nodesToClean sets.String) {
By("Removing labels from nodes")
for node := range nodesToClean {
framework.RemoveLabelOffNode(c, node, labelKey)
}
}
nodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
nodesSet := sets.NewString(nodes...)
defer removeLabels(nodesSet)
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
for node := range nodesSet {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
By("Waiting for new node to appear and annotating it")
framework.WaitForGroupSize(minMig, int32(minSize+1))
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
newNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
newNodesSet := sets.NewString(newNodes...)
newNodesSet.Delete(nodes...)
if len(newNodesSet) > 1 {
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
klog.Infof("Usually only 1 new node is expected, investigating")
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
klog.Infof("Gcloud compute instances list: %s", output)
} else {
klog.Errorf("Failed to get instances list: %v", err)
}
for newNode := range newNodesSet {
if output, err := execCmd("gcloud", "compute", "instances", "describe",
newNode,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
klog.Infof("Gcloud compute instances describe: %s", output)
} else {
klog.Errorf("Failed to get instances describe: %v", err)
}
}
// TODO: possibly remove broken node from newNodesSet to prevent removeLabel from crashing.
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
// rare behavior, but only the broken ones.
}
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
klog.Errorf("Failed to get node %v: %v", nodeName, err)
}
}
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
for node := range registeredNodes {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
defer removeLabels(registeredNodes)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
})
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
defer disableAutoscaler(extraPoolName, 1, 2)
extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
})
simpleScaleDownTest := func(unready int) {
cleanup, err := addKubeSystemPdbs(f)
defer cleanup()
framework.ExpectNoError(err)
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2 + unready
increasedSize += val + 2 + unready
}
setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready))
}
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
func() { simpleScaleDownTest(0) })
It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
func() {
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
})
It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
framework.SkipUnlessProviderIs("gke")
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
By("Some node should be removed")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
})
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
By("No nodes should be removed")
time.Sleep(scaleDownTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).Should(Equal(increasedSize))
})
})
It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Provider-specific setup
if framework.ProviderIs("gke") {
// GKE-specific setup
By("Add a new node pool with 0 nodes and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 0)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
} else {
// on GCE, run only if there are already at least 2 node groups
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
By("Manually scale smallest node group to 0")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
}
By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
for _, node := range nodes.Items {
err = makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
framework.ExpectNoError(err)
}
By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= len(nodes.Items)+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
// Scale to 0 test is split into two functions (for GKE & GCE.)
// The reason for it is that scenario is exactly the same,
// but setup & verification use different APIs.
//
// Scenario:
// (GKE only) add an extra node pool with size 1 & enable autoscaling for it
// (GCE only) find the smallest MIG & resize it to 1
// manually drain the single node from this node pool/MIG
// wait for cluster size to decrease
// verify the targeted node pool/MIG is of size 0
gkeScaleToZero := func() {
// GKE-specific setup
By("Add a new node pool with size 1 and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
ngNodes := getPoolNodes(f, extraPoolName)
Expect(len(ngNodes)).To(Equal(extraNodes))
for _, node := range ngNodes {
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
}
for _, node := range ngNodes {
drainNode(f, node)
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
// GKE-specific check
newSize := getPoolSize(f, extraPoolName)
Expect(newSize).Should(Equal(0))
}
gceScaleToZero := func() {
// non-GKE only
By("Find smallest node group and manually scale it to a single node")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
Expect(len(ngNodes) == 1).To(BeTrue())
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
framework.ExpectNoError(err)
// this part is identical
drainNode(f, node)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < nodeCount-minSize+1 }, scaleDownTimeout))
// non-GKE only
newSize, err := framework.GroupSize(minMig)
framework.ExpectNoError(err)
Expect(newSize).Should(Equal(0))
}
It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
gkeScaleToZero()
} else if len(originalSizes) >= 2 {
gceScaleToZero()
} else {
framework.Skipf("At least 2 node groups are needed for scale-to-0 tests")
}
})
It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
clusterSize := nodeCount
for clusterSize < unhealthyClusterThreshold+1 {
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
}
// If new nodes are disconnected too soon, they'll be considered not started
// instead of unready, and cluster won't be considered unhealthy.
//
// More precisely, Cluster Autoscaler compares last transition time of
// several readiness conditions to node create time. If it's within
// 2 minutes, it'll assume node is just starting and not unhealthy.
//
// Nodes become ready in less than 1 minute after being created,
// so waiting extra 2 minutes before breaking them (which triggers
// readiness condition transition) should be sufficient, while
// making no assumptions about minimal node startup time.
time.Sleep(2 * time.Minute)
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
Expect(nodesToBreakCount <= len(nodes.Items)).To(BeTrue())
nodesToBreak := nodes.Items[:nodesToBreakCount]
// TestUnderTemporaryNetworkFailure only removes connectivity to a single node,
// and accepts func() callback. This is expanding the loop to recursive call
// to avoid duplicating TestUnderTemporaryNetworkFailure
var testFunction func()
testFunction = func() {
if len(nodesToBreak) > 0 {
ntb := &nodesToBreak[0]
nodesToBreak = nodesToBreak[1:]
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
status, err := getClusterwideStatus(c)
e2elog.Logf("Clusterwide status: %v", status)
framework.ExpectNoError(err)
Expect(status).Should(Equal("Unhealthy"))
}
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size > nodeCount }, time.Second))
})
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
defer cleanupFunc1()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. Pods created here should preempt pods created above.
cleanupFunc2 := ReserveMemoryWithPriority(f, "memory-reservation2", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, highPriorityClassName)
defer cleanupFunc2()
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
defer cleanupFunc()
By("Waiting for scale down")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
})
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
time.Sleep(scaleDownTimeout)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == increasedSize }, time.Second))
})
})
func installNvidiaDriversDaemonSet() {
By("Add daemonset which installs nvidia drivers")
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
}
func execCmd(args ...string) *exec.Cmd {
klog.Infof("Executing: %s", strings.Join(args, " "))
return exec.Command(args[0], args[1:]...)
}
func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) {
increasedSize := manuallyIncreaseClusterSize(f, migSizes)
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
numPods := len(nodes.Items) * podsPerNode
testID := string(uuid.NewUUID()) // So that we can label and find pods
labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize)
pdb := &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pdb",
Namespace: namespace,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
defer func() {
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
}()
framework.ExpectNoError(err)
verifyFunction(increasedSize)
}
func getGkeApiEndpoint() string {
gkeApiEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
if gkeApiEndpoint == "" {
gkeApiEndpoint = "https://test-container.sandbox.googleapis.com"
}
if strings.HasSuffix(gkeApiEndpoint, "/") {
gkeApiEndpoint = gkeApiEndpoint[:len(gkeApiEndpoint)-1]
}
return gkeApiEndpoint
}
func getGKEURL(apiVersion string, suffix string) string {
out, err := execCmd("gcloud", "auth", "print-access-token").Output()
framework.ExpectNoError(err)
token := strings.Replace(string(out), "\n", "", -1)
return fmt.Sprintf("%s/%s/%s?access_token=%s",
getGkeApiEndpoint(),
apiVersion,
suffix,
token)
}
func getGKEClusterURL(apiVersion string) string {
if isRegionalCluster() {
// TODO(bskiba): Use locations API for all clusters once it's graduated to v1.
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/locations/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Region,
framework.TestContext.CloudConfig.Cluster))
} else {
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster))
}
}
func getCluster(apiVersion string) (string, error) {
resp, err := http.Get(getGKEClusterURL(apiVersion))
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("error: %s %s", resp.Status, body)
}
return string(body), nil
}
func isAutoscalerEnabled(expectedMaxNodeCountInTargetPool int) (bool, error) {
apiVersion := "v1"
if isRegionalCluster() {
apiVersion = "v1beta1"
}
strBody, err := getCluster(apiVersion)
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"maxNodeCount\": "+strconv.Itoa(expectedMaxNodeCountInTargetPool)) {
return true, nil
}
return false, nil
}
func getClusterLocation() string {
if isRegionalCluster() {
return "--region=" + framework.TestContext.CloudConfig.Region
} else {
return "--zone=" + framework.TestContext.CloudConfig.Zone
}
}
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
command := []string{"gcloud"}
if commandTrack == "beta" || commandTrack == "alpha" {
command = append(command, commandTrack)
}
command = append(command, args...)
command = append(command, getClusterLocation())
command = append(command, "--project="+framework.TestContext.CloudConfig.ProjectID)
return command
}
func getGcloudCommand(args []string) []string {
track := ""
if isRegionalCluster() {
track = "beta"
}
return getGcloudCommandFromTrack(track, args)
}
func isRegionalCluster() bool {
// TODO(bskiba): Use an appropriate indicator that the cluster is regional.
return framework.TestContext.CloudConfig.MultiZone
}
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
klog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--enable-autoscaling",
"--min-nodes=" + strconv.Itoa(minCount),
"--max-nodes=" + strconv.Itoa(maxCount),
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler not enabled, last error: %v", finalErr)
}
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
klog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--no-enable-autoscaling",
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err)
}
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && !val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
args := []string{"beta", "container", "node-pools", "create", name, "--quiet",
"--accelerator", "type=" + gpuType + ",count=" + strconv.Itoa(gpuCount),
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func deleteNodePool(name string) {
klog.Infof("Deleting node pool %s", name)
args := []string{"container", "node-pools", "delete", name, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
err := wait.ExponentialBackoff(
wait.Backoff{Duration: 1 * time.Minute, Factor: float64(3), Steps: 3},
func() (bool, error) {
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
return false, nil
}
klog.Infof("Node-pool deletion output: %s", output)
return true, nil
})
framework.ExpectNoError(err)
}
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
nodes := make([]*v1.Node, 0, 1)
nodeList := framework.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
nodes = append(nodes, &node)
}
}
return nodes
}
// getPoolInitialSize returns the initial size of the node pool taking into
// account that it may span multiple zones. In that case, node pool consists of
// multiple migs all containing initialNodeCount nodes.
func getPoolInitialSize(poolName string) int {
// get initial node count
args := []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(initialNodeCount)"}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Node-pool initial size: %s", output)
framework.ExpectNoError(err, string(output))
fields := strings.Fields(string(output))
Expect(len(fields)).Should(Equal(1))
size, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err)
// get number of node pools
args = []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(instanceGroupUrls)"}
output, err = execCmd(getGcloudCommand(args)...).CombinedOutput()
framework.ExpectNoError(err, string(output))
nodeGroupCount := len(strings.Split(string(output), ";"))
return int(size) * nodeGroupCount
}
func getPoolSize(f *framework.Framework, poolName string) int {
size := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
size++
}
}
return size
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: f.Namespace.Name,
Timeout: timeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
err := framework.RunRC(*config)
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
klog.Warningf("Failed to create memory reservation: %v", err)
continue
}
if expectRunning {
framework.ExpectNoError(err)
}
return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}
framework.Failf("Failed to reserve memory within timeout")
return nil
}
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
return WaitForClusterSizeFuncWithUnready(c, sizeFunc, timeout, 0)
}
// WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes.
func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
klog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady+expectedUnready && sizeFunc(numNodes) {
klog.Infof("Cluster has reached the desired size")
return nil
}
klog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
notready = make([]string, 0)
for _, pod := range pods.Items {
ready := false
for _, c := range pod.Status.Conditions {
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true
}
}
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == v1.PodFailed {
klog.Warningf("Pod has failed: %v", pod)
}
if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
if len(notready) <= tolerateUnreadyCount {
klog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
return nil
}
klog.Infof("Too many pods are not ready yet: %v", notready)
}
klog.Info("Timeout on waiting for pods being ready")
klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready)
}
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
return waitForCaPodsReadyInNamespace(f, c, 0)
}
func getAnyNode(c clientset.Interface) *v1.Node {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
klog.Errorf("Failed to get node list: %v", err)
return nil
}
if len(nodes.Items) == 0 {
klog.Errorf("No nodes")
return nil
}
return &nodes.Items[0]
}
func setMigSizes(sizes map[string]int) bool {
madeChanges := false
for mig, desiredSize := range sizes {
currentSize, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
if desiredSize != currentSize {
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
err = framework.ResizeGroup(mig, int32(desiredSize))
framework.ExpectNoError(err)
madeChanges = true
}
}
return madeChanges
}
func drainNode(f *framework.Framework, node *v1.Node) {
By("Make the single node unschedulable")
makeNodeUnschedulable(f.ClientSet, node)
By("Manually drain the single node")
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
By(fmt.Sprintf("Taint node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
for _, taint := range freshNode.Spec.Taints {
if taint.Key == disabledTaint {
return nil
}
}
freshNode.Spec.Taints = append(freshNode.Spec.Taints, v1.Taint{
Key: disabledTaint,
Value: "DisabledForTest",
Effect: v1.TaintEffectNoSchedule,
})
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !errors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to taint node in allowed number of retries")
}
// CriticalAddonsOnlyError implements the `error` interface, and signifies the
// presence of the `CriticalAddonsOnly` taint on the node.
type CriticalAddonsOnlyError struct{}
func (CriticalAddonsOnlyError) Error() string {
return fmt.Sprintf("CriticalAddonsOnly taint found on node")
}
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
By(fmt.Sprintf("Remove taint from node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
var newTaints []v1.Taint
for _, taint := range freshNode.Spec.Taints {
if failOnCriticalAddonsOnly && taint.Key == criticalAddonsOnlyTaint {
return CriticalAddonsOnlyError{}
}
if taint.Key != disabledTaint {
newTaints = append(newTaints, taint)
}
}
if len(newTaints) == len(freshNode.Spec.Taints) {
return nil
}
freshNode.Spec.Taints = newTaints
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !errors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := framework.RunRC(*config)
if err != nil {
return err
}
return nil
}
// Create an RC running a given number of pods with anti-affinity
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Volumes: volumes,
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
var emptyDirVolumes = []v1.Volume{
{
Name: "empty-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
}
func buildVolumes(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []v1.Volume {
return []v1.Volume{
{
Name: pv.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
},
}
}
func buildAntiAffinity(labels map[string]string) *v1.Affinity {
return &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
// Create an RC running a given number of pods on each node without adding any constraint forcing
// such pod distribution. This is meant to create a bunch of underutilized (but not unused) nodes
// with pods that can be rescheduled on different nodes.
// This is achieved using the following method:
// 1. disable scheduling on each node
// 2. create an empty RC
// 3. for each node:
// 3a. enable scheduling on that node
// 3b. increase number of replicas in RC by podsPerNode
func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {
By("Run a pod on each node")
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
if err != nil {
return err
}
}
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: 0,
Labels: labels,
MemRequest: memRequest,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
for i, node := range nodes {
err = makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
// Update replicas count, to create new pods that will be allocated on node
// (we retry 409 errors in case rc reference got out of sync)
for j := 0; j < 3; j++ {
*rc.Spec.Replicas = int32((i + 1) * podsPerNode)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc)
if err == nil {
break
}
if !errors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
}
err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to coerce RC into spawning a pod on node %s within timeout", node.Name)
}
err = makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
// Increase cluster size by newNodesForScaledownTests to create some unused nodes
// that can be later removed by cluster autoscaler.
func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int {
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + newNodesForScaledownTests
increasedSize += val + newNodesForScaledownTests
}
setMigSizes(newSizes)
checkClusterSize := func(size int) bool {
if size >= increasedSize {
return true
}
resized := setMigSizes(newSizes)
if resized {
klog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
}
return false
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout))
return increasedSize
}
// Try to get clusterwide health from CA status configmap.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getClusterwideStatus(c clientset.Interface) (string, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return "", err
}
status, ok := configMap.Data["status"]
if !ok {
return "", fmt.Errorf("Status information not found in configmap")
}
matcher, err := regexp.Compile("Cluster-wide:\\s*\n\\s*Health:\\s*([A-Za-z]+)")
if err != nil {
return "", err
}
result := matcher.FindStringSubmatch(status)
if len(result) < 2 {
return "", fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
return result[1], nil
}
type scaleUpStatus struct {
status string
ready int
target int
timestamp time.Time
}
// Try to get timestamp from status.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getStatusTimestamp(status string) (time.Time, error) {
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+)")
if err != nil {
return time.Time{}, err
}
timestampMatch := timestampMatcher.FindStringSubmatch(status)
if len(timestampMatch) < 2 {
return time.Time{}, fmt.Errorf("Failed to parse CA status timestamp, raw status: %v", status)
}
timestamp, err := time.Parse(timestampFormat, timestampMatch[1])
if err != nil {
return time.Time{}, err
}
return timestamp, nil
}
// Try to get scaleup statuses of all node groups.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return nil, err
}
status, ok := configMap.Data["status"]
if !ok {
return nil, fmt.Errorf("Status information not found in configmap")
}
timestamp, err := getStatusTimestamp(status)
if err != nil {
return nil, err
}
matcher, err := regexp.Compile("s*ScaleUp:\\s*([A-Za-z]+)\\s*\\(ready=([0-9]+)\\s*cloudProviderTarget=([0-9]+)\\s*\\)")
if err != nil {
return nil, err
}
matches := matcher.FindAllStringSubmatch(status, -1)
if len(matches) < 1 {
return nil, fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
result := scaleUpStatus{
status: caNoScaleUpStatus,
ready: 0,
target: 0,
timestamp: timestamp,
}
for _, match := range matches {
if match[1] == caOngoingScaleUpStatus {
result.status = caOngoingScaleUpStatus
}
newReady, err := strconv.Atoi(match[2])
if err != nil {
return nil, err
}
result.ready += newReady
newTarget, err := strconv.Atoi(match[3])
if err != nil {
return nil, err
}
result.target += newTarget
}
klog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
return &result, nil
}
func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) {
var finalErr error
var status *scaleUpStatus
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
status, finalErr = getScaleUpStatus(c)
if finalErr != nil {
return false, nil
}
if status.timestamp.Add(freshStatusLimit).Before(time.Now()) {
// stale status
finalErr = fmt.Errorf("Status too old")
return false, nil
}
return cond(status), nil
})
if err != nil {
err = fmt.Errorf("Failed to find expected scale up status: %v, last status: %v, final err: %v", err, status, finalErr)
}
return status, err
}
// This is a temporary fix to allow CA to migrate some kube-system pods
// TODO: Remove this when the PDB is added for some of those components
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
var newPdbs []string
cleanup := func() {
var finalErr error
for _, newPdbName := range newPdbs {
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
if err != nil {
// log error, but attempt to remove other pdbs
klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
finalErr = err
}
}
if finalErr != nil {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}
type pdbInfo struct {
label string
minAvailable int
}
pdbsToAdd := []pdbInfo{
{label: "kube-dns", minAvailable: 1},
{label: "kube-dns-autoscaler", minAvailable: 0},
{label: "metrics-server", minAvailable: 0},
{label: "kubernetes-dashboard", minAvailable: 0},
{label: "glbc", minAvailable: 0},
}
for _, pdbData := range pdbsToAdd {
By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
labelMap := map[string]string{"k8s-app": pdbData.label}
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
minAvailable := intstr.FromInt(pdbData.minAvailable)
pdb := &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: pdbName,
Namespace: "kube-system",
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
newPdbs = append(newPdbs, pdbName)
if err != nil {
return cleanup, err
}
}
return cleanup, nil
}
func createPriorityClasses(f *framework.Framework) func() {
priorityClasses := map[string]int32{
expendablePriorityClassName: -15,
highPriorityClassName: 1000,
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
klog.Errorf("Error creating priority class: %v", err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
}
return func() {
for className := range priorityClasses {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(className, nil)
if err != nil {
klog.Errorf("Error deleting priority class: %v", err)
}
}
}
}
|
[
"\"TESTED_GPU_TYPE\"",
"\"CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER\""
] |
[] |
[
"CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER",
"TESTED_GPU_TYPE"
] |
[]
|
["CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", "TESTED_GPU_TYPE"]
|
go
| 2 | 0 | |
libfaceid/tacotron/train.py
|
import argparse
from datetime import datetime
import math
import os
import subprocess
import time
import tensorflow as tf
import traceback
from datasets.datafeeder import DataFeeder
from hparams import hparams, hparams_debug_string
from models import create_model
from text import sequence_to_text
from util import audio, infolog, plot, ValueWindow
log = infolog.log
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
commit = get_git_commit() if args.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
log('Checkpoint path: %s' % checkpoint_path)
log('Loading training data from: %s' % input_path)
log('Using model: %s' % args.model)
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = DataFeeder(coord, input_path, hparams)
# Set up model:
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)
model.add_loss()
model.add_optimizer(global_step)
stats = add_stats(model)
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
# Train!
with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if args.restore_step:
# Restore from a checkpoint if the user requested it.
restore_path = '%s-%d' % (checkpoint_path, args.restore_step)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
feeder.start_in_session(sess)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (
step, time_window.average, loss, loss_window.average)
log(message, slack=(step % args.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % args.summary_interval == 0:
log('Writing summary at step: %d' % step)
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving audio and alignment...')
input_seq, spectrogram, alignment = sess.run([
model.inputs[0], model.linear_outputs[0], model.alignments[0]])
waveform = audio.inv_spectrogram(spectrogram.T)
audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))
plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),
info='%s, %s, %s, step=%d, loss=%.5f' % (args.model, commit, time_string(), step, loss))
log('Input: %s' % sequence_to_text(input_seq))
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
parser.add_argument('--input', default='training/train.txt')
parser.add_argument('--model', default='tacotron')
parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops.')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints.')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
hparams.parse(args.hparams)
train(log_dir, args)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
backend/course/api/v1/serializers.py
|
from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
server/index.py
|
import base64
import datetime
import json
import os
import time
from flask import Flask, render_template, request, jsonify, send_file
import plaid
# Get Plaid API keys from https://dashboard.plaid.com/account/keys
PLAID_CLIENT_ID = os.getenv("PLAID_CLIENT_ID")
PLAID_SECRET = os.getenv("PLAID_SECRET")
# Use 'sandbox' to test with Plaid's Sandbox environment (username: user_good,
# password: pass_good)
# Use `development` to test with live users and credentials and `production`
# to go live
PLAID_ENV = os.getenv("PLAID_ENV", "sandbox")
# PLAID_PRODUCTS is a comma-separated list of products to use when initializing
# Link. Note that this list must contain 'assets' in order for the app to be
# able to create and retrieve asset reports.
PLAID_PRODUCTS = [
pp.strip() for pp in os.getenv("PLAID_PRODUCTS", "transactions").split(",")
]
# PLAID_COUNTRY_CODES is a comma-separated list of countries for which users
# will be able to select institutions from.
PLAID_COUNTRY_CODES = [
cc.strip() for cc in os.getenv("PLAID_COUNTRY_CODES", "US").split(",")
]
PLAID_CLIENT_NAME = "Bean Collector"
PLAID_LANGUAGE = "en"
client = plaid.Client(
client_id=PLAID_CLIENT_ID,
secret=PLAID_SECRET,
environment=PLAID_ENV,
api_version="2020-09-14",
)
app = Flask(__name__)
@app.route("/")
def index():
return send_file("static/index.html")
@app.route("/index.js")
def index_scripts():
return render_template(
"index.js",
plaid_products=",".join(PLAID_PRODUCTS),
)
# Exchange token flow - exchange a Link public_token for
# an API access_token
# https://plaid.com/docs/#exchange-token-flow
@app.route("/get_access_token", methods=["POST"])
def get_access_token():
public_token = request.form["public_token"]
try:
exchange_response = client.Item.public_token.exchange(public_token)
except plaid.errors.PlaidError as e:
return jsonify(format_error(e))
pretty_print_response(exchange_response)
return jsonify(exchange_response)
# Retrieve ACH or ETF account numbers for an Item
# https://plaid.com/docs/#auth
@app.route("/auth", methods=["GET"])
def get_auth():
try:
auth_response = client.Auth.get(request.args["access_token"])
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(auth_response)
return jsonify({"error": None, "auth": auth_response})
# Retrieve Transactions for an Item
# https://plaid.com/docs/#transactions
@app.route("/transactions", methods=["GET"])
def get_transactions():
# Pull transactions for the last 30 days
start_date = "{:%Y-%m-%d}".format(datetime.datetime.now() + datetime.timedelta(-30))
end_date = "{:%Y-%m-%d}".format(datetime.datetime.now())
try:
transactions_response = client.Transactions.get(
request.args["access_token"], start_date, end_date
)
except plaid.errors.PlaidError as e:
return jsonify(format_error(e))
pretty_print_response(transactions_response)
return jsonify({"error": None, "transactions": transactions_response})
# Retrieve Identity data for an Item
# https://plaid.com/docs/#identity
@app.route("/identity", methods=["GET"])
def get_identity():
try:
identity_response = client.Identity.get(request.args["access_token"])
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(identity_response)
return jsonify({"error": None, "identity": identity_response})
# Retrieve real-time balance data for each of an Item's accounts
# https://plaid.com/docs/#balance
@app.route("/balance", methods=["GET"])
def get_balance():
try:
balance_response = client.Accounts.balance.get(request.args["access_token"])
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(balance_response)
return jsonify({"error": None, "balance": balance_response})
# Retrieve an Item's accounts
# https://plaid.com/docs/#accounts
@app.route("/accounts", methods=["GET"])
def get_accounts():
try:
accounts_response = client.Accounts.get(request.args["access_token"])
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(accounts_response)
return jsonify({"error": None, "accounts": accounts_response})
# Create and then retrieve an Asset Report for one or more Items. Note that an
# Asset Report can contain up to 100 items, but for simplicity we're only
# including one Item here.
# https://plaid.com/docs/#assets
@app.route("/assets", methods=["GET"])
def get_assets():
try:
asset_report_create_response = client.AssetReport.create(
[request.args["access_token"]], 10
)
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(asset_report_create_response)
asset_report_token = asset_report_create_response["asset_report_token"]
# Poll for the completion of the Asset Report.
num_retries_remaining = 20
asset_report_json = None
while num_retries_remaining > 0:
try:
asset_report_get_response = client.AssetReport.get(asset_report_token)
asset_report_json = asset_report_get_response["report"]
break
except plaid.errors.PlaidError as e:
if e.code == "PRODUCT_NOT_READY":
num_retries_remaining -= 1
time.sleep(1)
continue
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
if asset_report_json == None:
return jsonify(
{
"error": {
"display_message": "Timed out when polling for Asset Report",
"error_code": e.code,
"error_type": e.type,
}
}
)
asset_report_pdf = None
try:
asset_report_pdf = client.AssetReport.get_pdf(asset_report_token)
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
return jsonify(
{
"error": None,
"json": asset_report_json,
"pdf": base64.b64encode(asset_report_pdf),
}
)
# Retrieve investment holdings data for an Item
# https://plaid.com/docs/#investments
@app.route("/holdings", methods=["GET"])
def get_holdings():
try:
holdings_response = client.Holdings.get(request.args["access_token"])
except plaid.errors.PlaidError as e:
return jsonify(
{
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
}
}
)
pretty_print_response(holdings_response)
return jsonify({"error": None, "holdings": holdings_response})
# Retrieve Investment Transactions for an Item
# https://plaid.com/docs/#investments
@app.route("/investment_transactions", methods=["GET"])
def get_investment_transactions():
# Pull transactions for the last 30 days
start_date = "{:%Y-%m-%d}".format(datetime.datetime.now() + datetime.timedelta(-30))
end_date = "{:%Y-%m-%d}".format(datetime.datetime.now())
try:
investment_transactions_response = client.InvestmentTransactions.get(
request.args["access_token"], start_date, end_date
)
except plaid.errors.PlaidError as e:
return jsonify(format_error(e))
pretty_print_response(investment_transactions_response)
return jsonify(
{"error": None, "investment_transactions": investment_transactions_response}
)
# Retrieve high-level information about an Item
# https://plaid.com/docs/#retrieve-item
@app.route("/item", methods=["GET"])
def item():
item_response = client.Item.get(request.args["access_token"])
institution_response = client.Institutions.get_by_id(
item_response["item"]["institution_id"], country_codes=PLAID_COUNTRY_CODES
)
pretty_print_response(item_response)
pretty_print_response(institution_response)
return jsonify(
{
"error": None,
"item": item_response["item"],
"institution": institution_response["institution"],
}
)
# Create link_token flow - make a temporary link_token
# that the client Link will use to talk to Plaid
# https://plaid.com/docs/api/tokens/#linktokencreate
@app.route("/create_link_token", methods=["POST"])
def create_link_token():
configs = {
"user": {"client_user_id": "1"},
"products": PLAID_PRODUCTS,
"client_name": PLAID_CLIENT_NAME,
"country_codes": PLAID_COUNTRY_CODES,
"language": PLAID_LANGUAGE,
}
if "access_token" in request.args:
configs["access_token"] = request.args["access_token"]
try:
create_response = client.LinkToken.create(configs)
except plaid.errors.PlaidError as e:
return jsonify(format_error(e))
pretty_print_response(create_response)
return jsonify(create_response)
def pretty_print_response(response):
print(json.dumps(response, indent=2, sort_keys=True))
def format_error(e):
return {
"error": {
"display_message": e.display_message,
"error_code": e.code,
"error_type": e.type,
"error_message": e.message,
}
}
if __name__ == "__main__":
app.run(port=os.getenv("PORT", 5000))
|
[] |
[] |
[
"PORT",
"PLAID_PRODUCTS",
"PLAID_COUNTRY_CODES",
"PLAID_CLIENT_ID",
"PLAID_ENV",
"PLAID_SECRET"
] |
[]
|
["PORT", "PLAID_PRODUCTS", "PLAID_COUNTRY_CODES", "PLAID_CLIENT_ID", "PLAID_ENV", "PLAID_SECRET"]
|
python
| 6 | 0 | |
edb/testbase/server.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import asyncio
import atexit
import collections
import contextlib
import decimal
import functools
import inspect
import json
import math
import os
import pprint
import re
import unittest
import uuid
from datetime import timedelta
import click.testing
import edgedb
from edb import cli
from edb.server import cluster as edgedb_cluster
from edb.server import defines as edgedb_defines
from edb.common import taskgroup
from edb.testbase import serutils
def get_test_cases(tests):
result = collections.OrderedDict()
for test in tests:
if isinstance(test, unittest.TestSuite):
result.update(get_test_cases(test._tests))
else:
cls = type(test)
try:
methods = result[cls]
except KeyError:
methods = result[cls] = []
methods.append(test)
return result
class TestCaseMeta(type(unittest.TestCase)):
_database_names = set()
@staticmethod
def _iter_methods(bases, ns):
for base in bases:
for methname in dir(base):
if not methname.startswith('test_'):
continue
meth = getattr(base, methname)
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
for methname, meth in ns.items():
if not methname.startswith('test_'):
continue
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
@classmethod
def wrap(mcls, meth):
@functools.wraps(meth)
def wrapper(self, *args, __meth__=meth, **kwargs):
try_no = 1
while True:
try:
# There might be unobvious serializability
# anomalies across the test suite, so, rather
# than hunting them down every time, simply
# retry the test.
self.loop.run_until_complete(
__meth__(self, *args, **kwargs))
except edgedb.TransactionSerializationError:
if try_no == 3:
raise
else:
self.loop.run_until_complete(self.con.execute(
'ROLLBACK;'
))
try_no += 1
else:
break
return wrapper
@classmethod
def add_method(mcls, methname, ns, meth):
ns[methname] = mcls.wrap(meth)
def __new__(mcls, name, bases, ns):
for methname, meth in mcls._iter_methods(bases, ns.copy()):
if methname in ns:
del ns[methname]
mcls.add_method(methname, ns, meth)
cls = super().__new__(mcls, name, bases, ns)
if not ns.get('BASE_TEST_CLASS') and hasattr(cls, 'get_database_name'):
dbname = cls.get_database_name()
if name in mcls._database_names:
raise TypeError(
f'{name} wants duplicate database name: {dbname}')
mcls._database_names.add(name)
return cls
class TestCase(unittest.TestCase, metaclass=TestCaseMeta):
@classmethod
def setUpClass(cls):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
cls.loop = loop
@classmethod
def tearDownClass(cls):
cls.loop.close()
asyncio.set_event_loop(None)
def add_fail_notes(self, **kwargs):
if not hasattr(self, 'fail_notes'):
self.fail_notes = {}
self.fail_notes.update(kwargs)
@contextlib.contextmanager
def annotate(self, **kwargs):
# Annotate the test in case the nested block of code fails.
try:
yield
except Exception:
self.add_fail_notes(**kwargs)
raise
@contextlib.contextmanager
def assertRaisesRegex(self, exception, regex, msg=None,
**kwargs):
with super().assertRaisesRegex(exception, regex, msg=msg):
try:
yield
except BaseException as e:
if isinstance(e, exception):
for attr_name, expected_val in kwargs.items():
val = getattr(e, attr_name)
if val != expected_val:
raise self.failureException(
f'{exception.__name__} context attribute '
f'{attr_name!r} is {val} (expected '
f'{expected_val!r})') from e
raise
_default_cluster = None
def _init_cluster(data_dir=None, *, cleanup_atexit=True, init_settings=None):
if init_settings is None:
init_settings = {}
if (not os.environ.get('EDGEDB_DEBUG_SERVER') and
not os.environ.get('EDGEDB_LOG_LEVEL')):
_env = {'EDGEDB_LOG_LEVEL': 'silent'}
else:
_env = {}
if data_dir is None:
cluster = edgedb_cluster.TempCluster(env=_env, testmode=True)
destroy = True
else:
cluster = edgedb_cluster.Cluster(data_dir=data_dir, env=_env)
destroy = False
if cluster.get_status() == 'not-initialized':
cluster.init(server_settings=init_settings)
cluster.start(port='dynamic')
cluster.set_superuser_password('test')
if cleanup_atexit:
atexit.register(_shutdown_cluster, cluster, destroy=destroy)
return cluster
def _start_cluster(*, cleanup_atexit=True):
global _default_cluster
if _default_cluster is None:
cluster_addr = os.environ.get('EDGEDB_TEST_CLUSTER_ADDR')
if cluster_addr:
conn_spec = json.loads(cluster_addr)
_default_cluster = edgedb_cluster.RunningCluster(**conn_spec)
else:
data_dir = os.environ.get('EDGEDB_TEST_DATA_DIR')
_default_cluster = _init_cluster(
data_dir=data_dir, cleanup_atexit=cleanup_atexit)
return _default_cluster
def _shutdown_cluster(cluster, *, destroy=True):
global _default_cluster
_default_cluster = None
if cluster is not None:
cluster.stop()
if destroy:
cluster.destroy()
class ClusterTestCase(TestCase):
BASE_TEST_CLASS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.cluster = _start_cluster(cleanup_atexit=True)
class RollbackChanges:
def __init__(self, test):
self._conn = test.con
async def __aenter__(self):
self._tx = self._conn.transaction()
await self._tx.start()
async def __aexit__(self, exc_type, exc, tb):
await self._tx.rollback()
class ConnectedTestCaseMixin:
@classmethod
async def connect(cls, *,
cluster=None,
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
user=edgedb_defines.EDGEDB_SUPERUSER,
password='test'):
conargs = cls.get_connect_args(
cluster=cluster, database=database, user=user, password=password)
return await edgedb.async_connect(**conargs)
@classmethod
def get_connect_args(cls, *,
cluster=None,
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
user=edgedb_defines.EDGEDB_SUPERUSER,
password='test'):
if cluster is None:
cluster = cls.cluster
conargs = cluster.get_connect_args().copy()
conargs.update(dict(user=user,
password=password,
database=database))
return conargs
def _run_and_rollback(self):
return RollbackChanges(self)
async def assert_query_result(self, query,
exp_result_json,
exp_result_binary=...,
*,
msg=None, sort=None, variables=None):
fetch_args = variables if isinstance(variables, tuple) else ()
fetch_kw = variables if isinstance(variables, dict) else {}
try:
tx = self.con.transaction()
await tx.start()
try:
res = await self.con.fetchall_json(query,
*fetch_args, **fetch_kw)
finally:
await tx.rollback()
res = json.loads(res)
if sort is not None:
self._sort_results(res, sort)
self._assert_data_shape(res, exp_result_json, message=msg)
except Exception:
self.add_fail_notes(serialization='json')
raise
if exp_result_binary is ...:
# The expected result is the same
exp_result_binary = exp_result_json
try:
res = await self.con.fetchall(query, *fetch_args, **fetch_kw)
res = serutils.serialize(res)
if sort is not None:
self._sort_results(res, sort)
self._assert_data_shape(res, exp_result_binary, message=msg)
except Exception:
self.add_fail_notes(serialization='binary')
raise
def _sort_results(self, results, sort):
if sort is True:
sort = lambda x: x
# don't bother sorting empty things
if results:
# sort can be either a key function or a dict
if isinstance(sort, dict):
# the keys in the dict indicate the fields that
# actually must be sorted
for key, val in sort.items():
# '.' is a special key referring to the base object
if key == '.':
self._sort_results(results, val)
else:
if isinstance(results, list):
for r in results:
self._sort_results(r[key], val)
else:
self._sort_results(results[key], val)
else:
results.sort(key=sort)
def _assert_data_shape(self, data, shape, message=None):
_void = object()
def _format_path(path):
if path:
return 'PATH: ' + ''.join(str(p) for p in path)
else:
return 'PATH: <top-level>'
def _assert_type_shape(path, data, shape):
if shape in (int, float):
if not isinstance(data, shape):
self.fail(
f'{message}: expected {shape}, got {data!r} '
f'{_format_path(path)}')
else:
try:
shape(data)
except (ValueError, TypeError):
self.fail(
f'{message}: expected {shape}, got {data!r} '
f'{_format_path(path)}')
def _assert_dict_shape(path, data, shape):
for sk, sv in shape.items():
if not data or sk not in data:
self.fail(
f'{message}: key {sk!r} '
f'is missing\n{pprint.pformat(data)} '
f'{_format_path(path)}')
_assert_generic_shape(path + (f'["{sk}"]',), data[sk], sv)
def _list_shape_iter(shape):
last_shape = _void
for item in shape:
if item is Ellipsis:
if last_shape is _void:
raise ValueError(
'invalid shape spec: Ellipsis cannot be the'
'first element')
while True:
yield last_shape
last_shape = item
yield item
def _assert_list_shape(path, data, shape):
if not isinstance(data, list):
self.fail(
f'{message}: expected list '
f'{_format_path(path)}')
if not data and shape:
self.fail(
f'{message}: expected non-empty list '
f'{_format_path(path)}')
shape_iter = _list_shape_iter(shape)
_data_count = 0
for _data_count, el in enumerate(data):
try:
el_shape = next(shape_iter)
except StopIteration:
self.fail(
f'{message}: unexpected trailing elements in list '
f'{_format_path(path)}')
_assert_generic_shape(
path + (f'[{_data_count}]',),
el,
el_shape)
if len(shape) > _data_count + 1:
if shape[_data_count + 1] is not Ellipsis:
self.fail(
f'{message}: expecting more elements in list '
f'{_format_path(path)}')
def _assert_set_shape(path, data, shape):
if not isinstance(data, (list, set)):
self.fail(
f'{message}: expected list or set '
f'{_format_path(path)}')
if not data and shape:
self.fail(
f'{message}: expected non-empty set '
f'{_format_path(path)}')
shape_iter = _list_shape_iter(sorted(shape))
_data_count = 0
for _data_count, el in enumerate(sorted(data)):
try:
el_shape = next(shape_iter)
except StopIteration:
self.fail(
f'{message}: unexpected trailing elements in set '
f'[path {_format_path(path)}]')
_assert_generic_shape(
path + (f'{{{_data_count}}}',), el, el_shape)
if len(shape) > _data_count + 1:
if Ellipsis not in shape:
self.fail(
f'{message}: expecting more elements in set '
f'{_format_path(path)}')
def _assert_generic_shape(path, data, shape):
if isinstance(shape, nullable):
if data is None:
return
else:
shape = shape.value
if isinstance(shape, list):
return _assert_list_shape(path, data, shape)
elif isinstance(shape, set):
return _assert_set_shape(path, data, shape)
elif isinstance(shape, dict):
return _assert_dict_shape(path, data, shape)
elif isinstance(shape, type):
return _assert_type_shape(path, data, shape)
elif isinstance(shape, float):
if not math.isclose(data, shape, rel_tol=1e-04):
self.fail(
f'{message}: not isclose({data}, {shape}) '
f'{_format_path(path)}')
elif isinstance(shape, uuid.UUID):
# since the data comes from JSON, it will only have a str
if data != str(shape):
self.fail(
f'{message}: {data!r} != {shape!r} '
f'{_format_path(path)}')
elif isinstance(shape, (str, int, timedelta, decimal.Decimal)):
if data != shape:
self.fail(
f'{message}: {data!r} != {shape!r} '
f'{_format_path(path)}')
elif shape is None:
if data is not None:
self.fail(
f'{message}: {data!r} is expected to be None '
f'{_format_path(path)}')
else:
raise ValueError(f'unsupported shape type {shape}')
message = message or 'data shape differs'
return _assert_generic_shape((), data, shape)
class CLITestCaseMixin:
def run_cli(self, *args, input=None):
conn_args = self.get_connect_args()
cmd_args = (
'--host', conn_args['host'],
'--port', conn_args['port'],
'--user', conn_args['user'],
) + args
if conn_args['password']:
cmd_args = ('--password-from-stdin',) + cmd_args
if input is not None:
input = f"{conn_args['password']}\n{input}"
else:
input = f"{conn_args['password']}\n"
runner = click.testing.CliRunner()
return runner.invoke(
cli.cli, args=cmd_args, input=input,
catch_exceptions=False)
class ConnectedTestCase(ClusterTestCase, ConnectedTestCaseMixin):
BASE_TEST_CLASS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.con = cls.loop.run_until_complete(cls.connect())
@classmethod
def tearDownClass(cls):
try:
cls.loop.run_until_complete(cls.con.aclose())
# Give event loop another iteration so that connection
# transport has a chance to properly close.
cls.loop.run_until_complete(asyncio.sleep(0))
cls.con = None
finally:
super().tearDownClass()
class DatabaseTestCase(ClusterTestCase, ConnectedTestCaseMixin):
SETUP = None
TEARDOWN = None
SCHEMA = None
SETUP_METHOD = None
TEARDOWN_METHOD = None
# Some tests may want to manage transactions manually,
# in which case ISOLATED_METHODS will be False.
ISOLATED_METHODS = True
# Turns on "EdgeDB developer" mode which allows using restricted
# syntax like USING SQL and similar. It allows modifying standard
# library (e.g. declaring casts).
INTERNAL_TESTMODE = True
BASE_TEST_CLASS = True
def setUp(self):
if self.INTERNAL_TESTMODE:
self.loop.run_until_complete(
self.con.execute(
'CONFIGURE SESSION SET __internal_testmode := true;'))
if self.ISOLATED_METHODS:
self.xact = self.con.transaction()
self.loop.run_until_complete(self.xact.start())
if self.SETUP_METHOD:
self.loop.run_until_complete(
self.con.execute(self.SETUP_METHOD))
super().setUp()
def tearDown(self):
try:
if self.TEARDOWN_METHOD:
self.loop.run_until_complete(
self.con.execute(self.TEARDOWN_METHOD))
finally:
try:
if self.ISOLATED_METHODS:
self.loop.run_until_complete(self.xact.rollback())
del self.xact
if self.con.is_in_transaction():
self.loop.run_until_complete(
self.con.execute('ROLLBACK'))
raise AssertionError(
'test connection is still in transaction '
'*after* the test')
if not self.ISOLATED_METHODS:
self.loop.run_until_complete(
self.con.execute('RESET ALIAS *;'))
finally:
super().tearDown()
@classmethod
def setUpClass(cls):
super().setUpClass()
dbname = cls.get_database_name()
cls.admin_conn = None
cls.con = None
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
# Only open an extra admin connection if necessary.
if not class_set_up:
script = f'CREATE DATABASE {dbname};'
cls.admin_conn = cls.loop.run_until_complete(cls.connect())
cls.loop.run_until_complete(cls.admin_conn.execute(script))
cls.con = cls.loop.run_until_complete(cls.connect(database=dbname))
if not class_set_up:
script = cls.get_setup_script()
if script:
cls.loop.run_until_complete(cls.con.execute(script))
@classmethod
def get_database_name(cls):
if cls.__name__.startswith('TestEdgeQL'):
dbname = cls.__name__[len('TestEdgeQL'):]
elif cls.__name__.startswith('Test'):
dbname = cls.__name__[len('Test'):]
else:
dbname = cls.__name__
return dbname.lower()
@classmethod
def get_setup_script(cls):
script = ''
# allow the setup script to also run in test mode
if cls.INTERNAL_TESTMODE:
script += '\nCONFIGURE SESSION SET __internal_testmode := true;'
# Look at all SCHEMA entries and potentially create multiple
# modules, but always create the 'test' module.
schema = ['\nmodule test {}']
for name, val in cls.__dict__.items():
m = re.match(r'^SCHEMA(?:_(\w+))?', name)
if m:
module_name = (m.group(1) or 'test').lower().replace(
'__', '.')
with open(val, 'r') as sf:
module = sf.read()
schema.append(f'\nmodule {module_name} {{ {module} }}')
script += f'\nCREATE MIGRATION test_migration'
script += f' TO {{ {"".join(schema)} }};'
script += f'\nCOMMIT MIGRATION test_migration;'
if cls.SETUP:
if not isinstance(cls.SETUP, (list, tuple)):
scripts = [cls.SETUP]
else:
scripts = cls.SETUP
for scr in scripts:
if '\n' not in scr and os.path.exists(scr):
with open(scr, 'rt') as f:
setup = f.read()
else:
setup = scr
script += '\n' + setup
# allow the setup script to also run in test mode
if cls.INTERNAL_TESTMODE:
script += '\nCONFIGURE SESSION SET __internal_testmode := false;'
return script.strip(' \n')
@classmethod
def tearDownClass(cls):
script = ''
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
if cls.TEARDOWN and not class_set_up:
script = cls.TEARDOWN.strip()
try:
if script:
cls.loop.run_until_complete(
cls.con.execute(script))
finally:
try:
cls.loop.run_until_complete(cls.con.aclose())
if not class_set_up:
dbname = cls.get_database_name()
script = f'DROP DATABASE {dbname};'
cls.loop.run_until_complete(
cls.admin_conn.execute(script))
finally:
try:
if cls.admin_conn is not None:
cls.loop.run_until_complete(
cls.admin_conn.aclose())
finally:
super().tearDownClass()
@contextlib.asynccontextmanager
async def assertRaisesRegexTx(self, exception, regex, msg=None, **kwargs):
"""A version of assertRaisesRegex with automatic transaction recovery
"""
with super().assertRaisesRegex(exception, regex, msg=msg):
try:
tx = self.con.transaction()
await tx.start()
yield
except BaseException as e:
if isinstance(e, exception):
for attr_name, expected_val in kwargs.items():
val = getattr(e, attr_name)
if val != expected_val:
raise self.failureException(
f'{exception.__name__} context attribute '
f'{attr_name!r} is {val} (expected '
f'{expected_val!r})') from e
raise
finally:
await tx.rollback()
class nullable:
def __init__(self, value):
self.value = value
class Error:
def __init__(self, cls, message, shape):
self._message = message
self._class = cls
self._shape = shape
@property
def message(self):
return self._message
@property
def cls(self):
return self._class
@property
def shape(self):
return self._shape
class BaseQueryTestCase(DatabaseTestCase):
BASE_TEST_CLASS = True
class DDLTestCase(BaseQueryTestCase):
# DDL test cases generally need to be serialized
# to avoid deadlocks in parallel execution.
SERIALIZED = True
class NonIsolatedDDLTestCase(DDLTestCase):
ISOLATED_METHODS = False
BASE_TEST_CLASS = True
class QueryTestCase(BaseQueryTestCase):
BASE_TEST_CLASS = True
def get_test_cases_setup(cases):
result = []
for case in cases:
if not hasattr(case, 'get_setup_script'):
continue
setup_script = case.get_setup_script()
if not setup_script:
continue
dbname = case.get_database_name()
result.append((case, dbname, setup_script))
return result
def setup_test_cases(cases, conn, num_jobs):
setup = get_test_cases_setup(cases)
async def _run():
if num_jobs == 1:
# Special case for --jobs=1
for _case, dbname, setup_script in setup:
await _setup_database(dbname, setup_script, conn)
else:
async with taskgroup.TaskGroup(name='setup test cases') as g:
# Use a semaphore to limit the concurrency of bootstrap
# tasks to the number of jobs (bootstrap is heavy, having
# more tasks than `--jobs` won't necessarily make
# things faster.)
sem = asyncio.BoundedSemaphore(num_jobs)
async def controller(coro, *args):
async with sem:
await coro(*args)
for _case, dbname, setup_script in setup:
g.create_task(controller(
_setup_database, dbname, setup_script, conn))
return asyncio.run(_run())
async def _setup_database(dbname, setup_script, conn_args):
default_args = {
'user': edgedb_defines.EDGEDB_SUPERUSER,
'password': 'test',
}
default_args.update(conn_args)
admin_conn = await edgedb.async_connect(
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
**default_args)
try:
await admin_conn.execute(f'CREATE DATABASE {dbname};')
finally:
await admin_conn.aclose()
dbconn = await edgedb.async_connect(database=dbname, **default_args)
try:
async with dbconn.transaction():
await dbconn.execute(setup_script)
finally:
await dbconn.aclose()
return dbname
_lock_cnt = 0
def gen_lock_key():
global _lock_cnt
_lock_cnt += 1
return os.getpid() * 1000 + _lock_cnt
|
[] |
[] |
[
"EDGEDB_TEST_DATA_DIR",
"EDGEDB_TEST_CLUSTER_ADDR",
"EDGEDB_DEBUG_SERVER",
"EDGEDB_TEST_CASES_SET_UP",
"EDGEDB_LOG_LEVEL"
] |
[]
|
["EDGEDB_TEST_DATA_DIR", "EDGEDB_TEST_CLUSTER_ADDR", "EDGEDB_DEBUG_SERVER", "EDGEDB_TEST_CASES_SET_UP", "EDGEDB_LOG_LEVEL"]
|
python
| 5 | 0 | |
cmd/deploy_masters.go
|
// Copyright © 2019 Red Hat <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"log"
"os"
"gerrit.akraino.org/kni/installer/pkg/site"
"github.com/spf13/cobra"
)
// deployMastersCmd represents the deploy_masters command
var deployMastersCmd = &cobra.Command{
Use: "deploy_masters siteName [--build_path=<local_build_path>]",
Short: "Command to automate the deployment of the master nodes of a previously-prepared site",
Long: ``,
TraverseChildren: true,
Run: func(cmd *cobra.Command, args []string) {
// retrieve config values and start fetching
var siteName string
if len(args) == 0 {
log.Fatalln("Please specify site name as first argument")
} else {
siteName = args[0]
}
buildPath, _ := cmd.Flags().GetString("build_path")
if len(buildPath) == 0 {
// will generate a temporary directory
buildPath = fmt.Sprintf("%s/.kni", os.Getenv("HOME"))
}
// This command is used after fetch_requirements and prepare_manifests,
// so the site directory should be available on disk already (if not,
// s.AutomateMastersDeployment will error-out appropriately)
s := site.NewWithName(siteName, buildPath)
s.AutomateMastersDeployment()
},
}
func init() {
rootCmd.AddCommand(deployMastersCmd)
deployMastersCmd.Flags().StringP("build_path", "", "", "Directory to use as build path. If that doesn't exist, the installer will generate a default directory")
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
ThirdParty/Twisted/twisted/conch/test/test_cftp.py
|
# -*- test-case-name: twisted.conch.test.test_cftp -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE file for details.
"""
Tests for L{twisted.conch.scripts.cftp}.
"""
import locale
import time, sys, os, operator, getpass, struct
from StringIO import StringIO
from twisted.conch.test.test_ssh import Crypto, pyasn1
_reason = None
if Crypto and pyasn1:
try:
from twisted.conch import unix
from twisted.conch.scripts import cftp
from twisted.conch.test.test_filetransfer import FileTransferForTestAvatar
except ImportError, e:
# Python 2.3 compatibility fix
sys.modules.pop("twisted.conch.unix", None)
unix = None
_reason = str(e)
del e
else:
unix = None
from twisted.python.fakepwd import UserDatabase
from twisted.trial.unittest import TestCase
from twisted.cred import portal
from twisted.internet import reactor, protocol, interfaces, defer, error
from twisted.internet.utils import getProcessOutputAndValue
from twisted.python import log
from twisted.conch import ls
from twisted.test.proto_helpers import StringTransport
from twisted.internet.task import Clock
from twisted.conch.test import test_ssh, test_conch
from twisted.conch.test.test_filetransfer import SFTPTestBase
from twisted.conch.test.test_filetransfer import FileTransferTestAvatar
class ListingTests(TestCase):
"""
Tests for L{lsLine}, the function which generates an entry for a file or
directory in an SFTP I{ls} command's output.
"""
if getattr(time, 'tzset', None) is None:
skip = "Cannot test timestamp formatting code without time.tzset"
def setUp(self):
"""
Patch the L{ls} module's time function so the results of L{lsLine} are
deterministic.
"""
self.now = 123456789
def fakeTime():
return self.now
self.patch(ls, 'time', fakeTime)
# Make sure that the timezone ends up the same after these tests as
# it was before.
if 'TZ' in os.environ:
self.addCleanup(operator.setitem, os.environ, 'TZ', os.environ['TZ'])
self.addCleanup(time.tzset)
else:
def cleanup():
# os.environ.pop is broken! Don't use it! Ever! Or die!
try:
del os.environ['TZ']
except KeyError:
pass
time.tzset()
self.addCleanup(cleanup)
def _lsInTimezone(self, timezone, stat):
"""
Call L{ls.lsLine} after setting the timezone to C{timezone} and return
the result.
"""
# Set the timezone to a well-known value so the timestamps are
# predictable.
os.environ['TZ'] = timezone
time.tzset()
return ls.lsLine('foo', stat)
def test_oldFile(self):
"""
A file with an mtime six months (approximately) or more in the past has
a listing including a low-resolution timestamp.
"""
# Go with 7 months. That's more than 6 months.
then = self.now - (60 * 60 * 24 * 31 * 7)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Apr 26 1973 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Apr 27 1973 foo')
def test_oldSingleDigitDayOfMonth(self):
"""
A file with a high-resolution timestamp which falls on a day of the
month which can be represented by one decimal digit is formatted with
one padding 0 to preserve the columns which come after it.
"""
# A point about 7 months in the past, tweaked to fall on the first of a
# month so we test the case we want to test.
then = self.now - (60 * 60 * 24 * 31 * 7) + (60 * 60 * 24 * 5)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 May 01 1973 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 May 02 1973 foo')
def test_newFile(self):
"""
A file with an mtime fewer than six months (approximately) in the past
has a listing including a high-resolution timestamp excluding the year.
"""
# A point about three months in the past.
then = self.now - (60 * 60 * 24 * 31 * 3)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Aug 28 17:33 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Aug 29 09:33 foo')
def test_localeIndependent(self):
"""
The month name in the date is locale independent.
"""
# A point about three months in the past.
then = self.now - (60 * 60 * 24 * 31 * 3)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
# Fake that we're in a language where August is not Aug (e.g.: Spanish)
currentLocale = locale.getlocale()
locale.setlocale(locale.LC_ALL, "es_AR.UTF8")
self.addCleanup(locale.setlocale, locale.LC_ALL, currentLocale)
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Aug 28 17:33 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Aug 29 09:33 foo')
# if alternate locale is not available, the previous test will be
# skipped, please install this locale for it to run
currentLocale = locale.getlocale()
try:
try:
locale.setlocale(locale.LC_ALL, "es_AR.UTF8")
except locale.Error:
test_localeIndependent.skip = "The es_AR.UTF8 locale is not installed."
finally:
locale.setlocale(locale.LC_ALL, currentLocale)
def test_newSingleDigitDayOfMonth(self):
"""
A file with a high-resolution timestamp which falls on a day of the
month which can be represented by one decimal digit is formatted with
one padding 0 to preserve the columns which come after it.
"""
# A point about three months in the past, tweaked to fall on the first
# of a month so we test the case we want to test.
then = self.now - (60 * 60 * 24 * 31 * 3) + (60 * 60 * 24 * 4)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Sep 01 17:33 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Sep 02 09:33 foo')
class StdioClientTests(TestCase):
"""
Tests for L{cftp.StdioClient}.
"""
def setUp(self):
"""
Create a L{cftp.StdioClient} hooked up to dummy transport and a fake
user database.
"""
class Connection:
pass
conn = Connection()
conn.transport = StringTransport()
conn.transport.localClosed = False
self.client = cftp.StdioClient(conn)
self.database = self.client._pwd = UserDatabase()
# Intentionally bypassing makeConnection - that triggers some code
# which uses features not provided by our dumb Connection fake.
self.client.transport = StringTransport()
def test_exec(self):
"""
The I{exec} command runs its arguments locally in a child process
using the user's shell.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar',
sys.executable)
d = self.client._dispatchCommand("exec print 1 + 2")
d.addCallback(self.assertEqual, "3\n")
return d
def test_execWithoutShell(self):
"""
If the local user has no shell, the I{exec} command runs its arguments
using I{/bin/sh}.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar', '')
d = self.client._dispatchCommand("exec echo hello")
d.addCallback(self.assertEqual, "hello\n")
return d
def test_bang(self):
"""
The I{exec} command is run for lines which start with C{"!"}.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar',
'/bin/sh')
d = self.client._dispatchCommand("!echo hello")
d.addCallback(self.assertEqual, "hello\n")
return d
def setKnownConsoleSize(self, width, height):
"""
For the duration of this test, patch C{cftp}'s C{fcntl} module to return
a fixed width and height.
@param width: the width in characters
@type width: C{int}
@param height: the height in characters
@type height: C{int}
"""
import tty # local import to avoid win32 issues
class FakeFcntl(object):
def ioctl(self, fd, opt, mutate):
if opt != tty.TIOCGWINSZ:
self.fail("Only window-size queries supported.")
return struct.pack("4H", height, width, 0, 0)
self.patch(cftp, "fcntl", FakeFcntl())
def test_progressReporting(self):
"""
L{StdioClient._printProgressBar} prints a progress description,
including percent done, amount transferred, transfer rate, and time
remaining, all based the given start time, the given L{FileWrapper}'s
progress information and the reactor's current time.
"""
# Use a short, known console width because this simple test doesn't need
# to test the console padding.
self.setKnownConsoleSize(10, 34)
clock = self.client.reactor = Clock()
wrapped = StringIO("x")
wrapped.name = "sample"
wrapper = cftp.FileWrapper(wrapped)
wrapper.size = 1024 * 10
startTime = clock.seconds()
clock.advance(2.0)
wrapper.total += 4096
self.client._printProgressBar(wrapper, startTime)
self.assertEqual(self.client.transport.value(),
"\rsample 40% 4.0kB 2.0kBps 00:03 ")
def test_reportNoProgress(self):
"""
L{StdioClient._printProgressBar} prints a progress description that
indicates 0 bytes transferred if no bytes have been transferred and no
time has passed.
"""
self.setKnownConsoleSize(10, 34)
clock = self.client.reactor = Clock()
wrapped = StringIO("x")
wrapped.name = "sample"
wrapper = cftp.FileWrapper(wrapped)
startTime = clock.seconds()
self.client._printProgressBar(wrapper, startTime)
self.assertEqual(self.client.transport.value(),
"\rsample 0% 0.0B 0.0Bps 00:00 ")
class FileTransferTestRealm:
def __init__(self, testDir):
self.testDir = testDir
def requestAvatar(self, avatarID, mind, *interfaces):
a = FileTransferTestAvatar(self.testDir)
return interfaces[0], a, lambda: None
class SFTPTestProcess(protocol.ProcessProtocol):
"""
Protocol for testing cftp. Provides an interface between Python (where all
the tests are) and the cftp client process (which does the work that is
being tested).
"""
def __init__(self, onOutReceived):
"""
@param onOutReceived: A L{Deferred} to be fired as soon as data is
received from stdout.
"""
self.clearBuffer()
self.onOutReceived = onOutReceived
self.onProcessEnd = None
self._expectingCommand = None
self._processEnded = False
def clearBuffer(self):
"""
Clear any buffered data received from stdout. Should be private.
"""
self.buffer = ''
self._linesReceived = []
self._lineBuffer = ''
def outReceived(self, data):
"""
Called by Twisted when the cftp client prints data to stdout.
"""
log.msg('got %s' % data)
lines = (self._lineBuffer + data).split('\n')
self._lineBuffer = lines.pop(-1)
self._linesReceived.extend(lines)
# XXX - not strictly correct.
# We really want onOutReceived to fire after the first 'cftp>' prompt
# has been received. (See use in TestOurServerCmdLineClient.setUp)
if self.onOutReceived is not None:
d, self.onOutReceived = self.onOutReceived, None
d.callback(data)
self.buffer += data
self._checkForCommand()
def _checkForCommand(self):
prompt = 'cftp> '
if self._expectingCommand and self._lineBuffer == prompt:
buf = '\n'.join(self._linesReceived)
if buf.startswith(prompt):
buf = buf[len(prompt):]
self.clearBuffer()
d, self._expectingCommand = self._expectingCommand, None
d.callback(buf)
def errReceived(self, data):
"""
Called by Twisted when the cftp client prints data to stderr.
"""
log.msg('err: %s' % data)
def getBuffer(self):
"""
Return the contents of the buffer of data received from stdout.
"""
return self.buffer
def runCommand(self, command):
"""
Issue the given command via the cftp client. Return a C{Deferred} that
fires when the server returns a result. Note that the C{Deferred} will
callback even if the server returns some kind of error.
@param command: A string containing an sftp command.
@return: A C{Deferred} that fires when the sftp server returns a
result. The payload is the server's response string.
"""
self._expectingCommand = defer.Deferred()
self.clearBuffer()
self.transport.write(command + '\n')
return self._expectingCommand
def runScript(self, commands):
"""
Run each command in sequence and return a Deferred that fires when all
commands are completed.
@param commands: A list of strings containing sftp commands.
@return: A C{Deferred} that fires when all commands are completed. The
payload is a list of response strings from the server, in the same
order as the commands.
"""
sem = defer.DeferredSemaphore(1)
dl = [sem.run(self.runCommand, command) for command in commands]
return defer.gatherResults(dl)
def killProcess(self):
"""
Kill the process if it is still running.
If the process is still running, sends a KILL signal to the transport
and returns a C{Deferred} which fires when L{processEnded} is called.
@return: a C{Deferred}.
"""
if self._processEnded:
return defer.succeed(None)
self.onProcessEnd = defer.Deferred()
self.transport.signalProcess('KILL')
return self.onProcessEnd
def processEnded(self, reason):
"""
Called by Twisted when the cftp client process ends.
"""
self._processEnded = True
if self.onProcessEnd:
d, self.onProcessEnd = self.onProcessEnd, None
d.callback(None)
class CFTPClientTestBase(SFTPTestBase):
def setUp(self):
f = open('dsa_test.pub','w')
f.write(test_ssh.publicDSA_openssh)
f.close()
f = open('dsa_test','w')
f.write(test_ssh.privateDSA_openssh)
f.close()
os.chmod('dsa_test', 33152)
f = open('kh_test','w')
f.write('127.0.0.1 ' + test_ssh.publicRSA_openssh)
f.close()
return SFTPTestBase.setUp(self)
def startServer(self):
realm = FileTransferTestRealm(self.testDir)
p = portal.Portal(realm)
p.registerChecker(test_ssh.ConchTestPublicKeyChecker())
fac = test_ssh.ConchTestServerFactory()
fac.portal = p
self.server = reactor.listenTCP(0, fac, interface="127.0.0.1")
def stopServer(self):
if not hasattr(self.server.factory, 'proto'):
return self._cbStopServer(None)
self.server.factory.proto.expectedLoseConnection = 1
d = defer.maybeDeferred(
self.server.factory.proto.transport.loseConnection)
d.addCallback(self._cbStopServer)
return d
def _cbStopServer(self, ignored):
return defer.maybeDeferred(self.server.stopListening)
def tearDown(self):
for f in ['dsa_test.pub', 'dsa_test', 'kh_test']:
try:
os.remove(f)
except:
pass
return SFTPTestBase.tearDown(self)
class TestOurServerCmdLineClient(CFTPClientTestBase):
def setUp(self):
CFTPClientTestBase.setUp(self)
self.startServer()
cmds = ('-p %i -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'--host-key-algorithms ssh-rsa '
'-i dsa_test '
'-a '
'-v '
'127.0.0.1')
port = self.server.getHost().port
cmds = test_conch._makeArgs((cmds % port).split(), mod='cftp')
log.msg('running %s %s' % (sys.executable, cmds))
d = defer.Deferred()
self.processProtocol = SFTPTestProcess(d)
d.addCallback(lambda _: self.processProtocol.clearBuffer())
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
reactor.spawnProcess(self.processProtocol, sys.executable, cmds,
env=env)
return d
def tearDown(self):
d = self.stopServer()
d.addCallback(lambda _: self.processProtocol.killProcess())
return d
def _killProcess(self, ignored):
try:
self.processProtocol.transport.signalProcess('KILL')
except error.ProcessExitedAlready:
pass
def runCommand(self, command):
"""
Run the given command with the cftp client. Return a C{Deferred} that
fires when the command is complete. Payload is the server's output for
that command.
"""
return self.processProtocol.runCommand(command)
def runScript(self, *commands):
"""
Run the given commands with the cftp client. Returns a C{Deferred}
that fires when the commands are all complete. The C{Deferred}'s
payload is a list of output for each command.
"""
return self.processProtocol.runScript(commands)
def testCdPwd(self):
"""
Test that 'pwd' reports the current remote directory, that 'lpwd'
reports the current local directory, and that changing to a
subdirectory then changing to its parent leaves you in the original
remote directory.
"""
# XXX - not actually a unit test, see docstring.
homeDir = os.path.join(os.getcwd(), self.testDir)
d = self.runScript('pwd', 'lpwd', 'cd testDirectory', 'cd ..', 'pwd')
d.addCallback(lambda xs: xs[:3] + xs[4:])
d.addCallback(self.assertEqual,
[homeDir, os.getcwd(), '', homeDir])
return d
def testChAttrs(self):
"""
Check that 'ls -l' output includes the access permissions and that
this output changes appropriately with 'chmod'.
"""
def _check(results):
self.flushLoggedErrors()
self.assertTrue(results[0].startswith('-rw-r--r--'))
self.assertEqual(results[1], '')
self.assertTrue(results[2].startswith('----------'), results[2])
self.assertEqual(results[3], '')
d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',
'ls -l testfile1', 'chmod 644 testfile1')
return d.addCallback(_check)
# XXX test chgrp/own
def testList(self):
"""
Check 'ls' works as expected. Checks for wildcards, hidden files,
listing directories and listing empty directories.
"""
def _check(results):
self.assertEqual(results[0], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
self.assertEqual(results[1], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
self.assertEqual(results[2], ['testRemoveFile', 'testRenameFile'])
self.assertEqual(results[3], ['.testHiddenFile', 'testRemoveFile',
'testRenameFile'])
self.assertEqual(results[4], [''])
d = self.runScript('ls', 'ls ../' + os.path.basename(self.testDir),
'ls *File', 'ls -a *File', 'ls -l testDirectory')
d.addCallback(lambda xs: [x.split('\n') for x in xs])
return d.addCallback(_check)
def testHelp(self):
"""
Check that running the '?' command returns help.
"""
d = self.runCommand('?')
d.addCallback(self.assertEqual,
cftp.StdioClient(None).cmd_HELP('').strip())
return d
def assertFilesEqual(self, name1, name2, msg=None):
"""
Assert that the files at C{name1} and C{name2} contain exactly the
same data.
"""
f1 = file(name1).read()
f2 = file(name2).read()
self.assertEqual(f1, f2, msg)
def testGet(self):
"""
Test that 'get' saves the remote file to the correct local location,
that the output of 'get' is correct and that 'rm' actually removes
the file.
"""
# XXX - not actually a unit test
expectedOutput = ("Transferred %s/%s/testfile1 to %s/test file2"
% (os.getcwd(), self.testDir, self.testDir))
def _checkGet(result):
self.assertTrue(result.endswith(expectedOutput))
self.assertFilesEqual(self.testDir + '/testfile1',
self.testDir + '/test file2',
"get failed")
return self.runCommand('rm "test file2"')
d = self.runCommand('get testfile1 "%s/test file2"' % (self.testDir,))
d.addCallback(_checkGet)
d.addCallback(lambda _: self.failIf(
os.path.exists(self.testDir + '/test file2')))
return d
def testWildcardGet(self):
"""
Test that 'get' works correctly when given wildcard parameters.
"""
def _check(ignored):
self.assertFilesEqual(self.testDir + '/testRemoveFile',
'testRemoveFile',
'testRemoveFile get failed')
self.assertFilesEqual(self.testDir + '/testRenameFile',
'testRenameFile',
'testRenameFile get failed')
d = self.runCommand('get testR*')
return d.addCallback(_check)
def testPut(self):
"""
Check that 'put' uploads files correctly and that they can be
successfully removed. Also check the output of the put command.
"""
# XXX - not actually a unit test
expectedOutput = ('Transferred %s/testfile1 to %s/%s/test"file2'
% (self.testDir, os.getcwd(), self.testDir))
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/testfile1',
self.testDir + '/test"file2')
self.failUnless(result.endswith(expectedOutput))
return self.runCommand('rm "test\\"file2"')
d = self.runCommand('put %s/testfile1 "test\\"file2"'
% (self.testDir,))
d.addCallback(_checkPut)
d.addCallback(lambda _: self.failIf(
os.path.exists(self.testDir + '/test"file2')))
return d
def test_putOverLongerFile(self):
"""
Check that 'put' uploads files correctly when overwriting a longer
file.
"""
# XXX - not actually a unit test
f = file(os.path.join(self.testDir, 'shorterFile'), 'w')
f.write("a")
f.close()
f = file(os.path.join(self.testDir, 'longerFile'), 'w')
f.write("bb")
f.close()
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/shorterFile',
self.testDir + '/longerFile')
d = self.runCommand('put %s/shorterFile longerFile'
% (self.testDir,))
d.addCallback(_checkPut)
return d
def test_putMultipleOverLongerFile(self):
"""
Check that 'put' uploads files correctly when overwriting a longer
file and you use a wildcard to specify the files to upload.
"""
# XXX - not actually a unit test
os.mkdir(os.path.join(self.testDir, 'dir'))
f = file(os.path.join(self.testDir, 'dir', 'file'), 'w')
f.write("a")
f.close()
f = file(os.path.join(self.testDir, 'file'), 'w')
f.write("bb")
f.close()
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/dir/file',
self.testDir + '/file')
d = self.runCommand('put %s/dir/*'
% (self.testDir,))
d.addCallback(_checkPut)
return d
def testWildcardPut(self):
"""
What happens if you issue a 'put' command and include a wildcard (i.e.
'*') in parameter? Check that all files matching the wildcard are
uploaded to the correct directory.
"""
def check(results):
self.assertEqual(results[0], '')
self.assertEqual(results[2], '')
self.assertFilesEqual(self.testDir + '/testRemoveFile',
self.testDir + '/../testRemoveFile',
'testRemoveFile get failed')
self.assertFilesEqual(self.testDir + '/testRenameFile',
self.testDir + '/../testRenameFile',
'testRenameFile get failed')
d = self.runScript('cd ..',
'put %s/testR*' % (self.testDir,),
'cd %s' % os.path.basename(self.testDir))
d.addCallback(check)
return d
def testLink(self):
"""
Test that 'ln' creates a file which appears as a link in the output of
'ls'. Check that removing the new file succeeds without output.
"""
def _check(results):
self.flushLoggedErrors()
self.assertEqual(results[0], '')
self.assertTrue(results[1].startswith('l'), 'link failed')
return self.runCommand('rm testLink')
d = self.runScript('ln testLink testfile1', 'ls -l testLink')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
def testRemoteDirectory(self):
"""
Test that we can create and remove directories with the cftp client.
"""
def _check(results):
self.assertEqual(results[0], '')
self.assertTrue(results[1].startswith('d'))
return self.runCommand('rmdir testMakeDirectory')
d = self.runScript('mkdir testMakeDirectory',
'ls -l testMakeDirector?')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
def test_existingRemoteDirectory(self):
"""
Test that a C{mkdir} on an existing directory fails with the
appropriate error, and doesn't log an useless error server side.
"""
def _check(results):
self.assertEqual(results[0], '')
self.assertEqual(results[1],
'remote error 11: mkdir failed')
d = self.runScript('mkdir testMakeDirectory',
'mkdir testMakeDirectory')
d.addCallback(_check)
return d
def testLocalDirectory(self):
"""
Test that we can create a directory locally and remove it with the
cftp client. This test works because the 'remote' server is running
out of a local directory.
"""
d = self.runCommand('lmkdir %s/testLocalDirectory' % (self.testDir,))
d.addCallback(self.assertEqual, '')
d.addCallback(lambda _: self.runCommand('rmdir testLocalDirectory'))
d.addCallback(self.assertEqual, '')
return d
def testRename(self):
"""
Test that we can rename a file.
"""
def _check(results):
self.assertEqual(results[0], '')
self.assertEqual(results[1], 'testfile2')
return self.runCommand('rename testfile2 testfile1')
d = self.runScript('rename testfile1 testfile2', 'ls testfile?')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
class TestOurServerBatchFile(CFTPClientTestBase):
def setUp(self):
CFTPClientTestBase.setUp(self)
self.startServer()
def tearDown(self):
CFTPClientTestBase.tearDown(self)
return self.stopServer()
def _getBatchOutput(self, f):
fn = self.mktemp()
open(fn, 'w').write(f)
port = self.server.getHost().port
cmds = ('-p %i -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'--host-key-algorithms ssh-rsa '
'-i dsa_test '
'-a '
'-v -b %s 127.0.0.1') % (port, fn)
cmds = test_conch._makeArgs(cmds.split(), mod='cftp')[1:]
log.msg('running %s %s' % (sys.executable, cmds))
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
self.server.factory.expectedLoseConnection = 1
d = getProcessOutputAndValue(sys.executable, cmds, env=env)
def _cleanup(res):
os.remove(fn)
return res
d.addCallback(lambda res: res[0])
d.addBoth(_cleanup)
return d
def testBatchFile(self):
"""Test whether batch file function of cftp ('cftp -b batchfile').
This works by treating the file as a list of commands to be run.
"""
cmds = """pwd
ls
exit
"""
def _cbCheckResult(res):
res = res.split('\n')
log.msg('RES %s' % str(res))
self.failUnless(res[1].find(self.testDir) != -1, repr(res))
self.assertEqual(res[3:-2], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
def testError(self):
"""Test that an error in the batch file stops running the batch.
"""
cmds = """chown 0 missingFile
pwd
exit
"""
def _cbCheckResult(res):
self.failIf(res.find(self.testDir) != -1)
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
def testIgnoredError(self):
"""Test that a minus sign '-' at the front of a line ignores
any errors.
"""
cmds = """-chown 0 missingFile
pwd
exit
"""
def _cbCheckResult(res):
self.failIf(res.find(self.testDir) == -1)
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
class TestOurServerSftpClient(CFTPClientTestBase):
"""
Test the sftp server against sftp command line client.
"""
def setUp(self):
CFTPClientTestBase.setUp(self)
return self.startServer()
def tearDown(self):
return self.stopServer()
def test_extendedAttributes(self):
"""
Test the return of extended attributes by the server: the sftp client
should ignore them, but still be able to parse the response correctly.
This test is mainly here to check that
L{filetransfer.FILEXFER_ATTR_EXTENDED} has the correct value.
"""
fn = self.mktemp()
open(fn, 'w').write("ls .\nexit")
port = self.server.getHost().port
oldGetAttr = FileTransferForTestAvatar._getAttrs
def _getAttrs(self, s):
attrs = oldGetAttr(self, s)
attrs["ext_foo"] = "bar"
return attrs
self.patch(FileTransferForTestAvatar, "_getAttrs", _getAttrs)
self.server.factory.expectedLoseConnection = True
cmds = ('-o', 'IdentityFile=dsa_test',
'-o', 'UserKnownHostsFile=kh_test',
'-o', 'HostKeyAlgorithms=ssh-rsa',
'-o', 'Port=%i' % (port,), '-b', fn, '[email protected]')
d = getProcessOutputAndValue("sftp", cmds)
def check(result):
self.assertEqual(result[2], 0)
for i in ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1']:
self.assertIn(i, result[0])
return d.addCallback(check)
if unix is None or Crypto is None or pyasn1 is None or interfaces.IReactorProcess(reactor, None) is None:
if _reason is None:
_reason = "don't run w/o spawnProcess or PyCrypto or pyasn1"
TestOurServerCmdLineClient.skip = _reason
TestOurServerBatchFile.skip = _reason
TestOurServerSftpClient.skip = _reason
StdioClientTests.skip = _reason
else:
from twisted.python.procutils import which
if not which('sftp'):
TestOurServerSftpClient.skip = "no sftp command-line client available"
|
[] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
python
| 1 | 0 | |
cti/rabbitmq/client.go
|
package rabbitmq
import (
"bytes"
"fmt"
"log"
"os"
"time"
"github.com/streadway/amqp"
)
var (
conn *amqp.Connection
//TODO implement pool
)
func init() {
user := os.Getenv("RABBITMQ_USER")
if user == "" {
user = "guest"
}
password := os.Getenv("RABBITMQ_PASS")
if password == "" {
password = "guest"
}
address := os.Getenv("RABBITMQ_PORT_5672_TCP_ADDR")
if address == "" {
address = "rabbitmq"
}
tcp := os.Getenv("RABBITMQ_PORT_5672_TCP_PORT")
if tcp == "" {
tcp = "5672"
}
url := fmt.Sprintf("amqp://%s:%s@%s:%s/", user, password, address, tcp)
log.Printf("Connecting to RabbitMQ: %s...\n", url)
max := 5
var err error
connectTicker := time.Tick(time.Second * 2)
LOOP:
for {
select {
case <-connectTicker:
conn, err = amqp.Dial(url)
if err == nil {
break LOOP
}
if max == 0 {
log.Fatalln("Failed to connect to RabbitMQ")
}
max--
}
}
failOnError(err, "Failed to connect to RabbitMQ")
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
// Send messages to the queue
func Send(queue string, message *bytes.Buffer) {
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
defer ch.Close()
err = ch.ExchangeDeclare(
queue, // name
"fanout", // type
true, // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare an exchange")
err = ch.Publish(
queue, // exchange
"", // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: message.Bytes(),
})
failOnError(err, "Failed to publish a message")
}
// DeleteQueue - delete queue
func DeleteQueue(queue string) {
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
defer ch.Close()
ch.ExchangeDelete(queue, false, false)
failOnError(err, "Failed to delete a queue")
}
//Close the connection
func Close() {
conn.Close()
}
|
[
"\"RABBITMQ_USER\"",
"\"RABBITMQ_PASS\"",
"\"RABBITMQ_PORT_5672_TCP_ADDR\"",
"\"RABBITMQ_PORT_5672_TCP_PORT\""
] |
[] |
[
"RABBITMQ_USER",
"RABBITMQ_PORT_5672_TCP_PORT",
"RABBITMQ_PASS",
"RABBITMQ_PORT_5672_TCP_ADDR"
] |
[]
|
["RABBITMQ_USER", "RABBITMQ_PORT_5672_TCP_PORT", "RABBITMQ_PASS", "RABBITMQ_PORT_5672_TCP_ADDR"]
|
go
| 4 | 0 | |
WebDemo/flask_app/main.py
|
from flask import render_template, request
from flask_script import Manager, Server
from app import app
from model import Content, Summary, Article
import app.static.summ as summarizationModel
import os, json, logging
@app.route('/', endpoint='ACCESS')
@app.route('/index.html', endpoint='ACCESSFILE')
def index():
try:
all_pairs = Article.objects.all()
return render_template('index.html', history=all_pairs)
except Exception as e:
logging.error(e)
raise e
@app.route('/run_decode', methods=['POST'])
def run_decode():
logging.debug('decode your input by our pretrained model')
try:
source = request.get_json()['source'] # GET request with String from frontend directly
logging.debug('input: {}'.format(source)) # GET String-type context from the backend
try:
logging.debug('using the pretrained model.')
sentNums, summary = summarizationModel.decode.run_(source)
except Exception as e:
logging.error(e)
else:
logging.debug('The number of sentences is {}'.format(sentNums))
logging.debug('The abstract is that {}'.format(summary))
results = {'sent_no': sentNums, 'final': summary}
try:
article = Content(text=source)
abstract = Summary(text=summary)
pair = Article(article=article.id, abstract=abstract.id)
article.save()
abstract.save()
pair.save()
except Exception as e:
logging.error(e)
return json.dumps(results)
except:
message = {'message' : 'Fail to catch the data from client.'}
return json.dumps(message)
manager = Manager(app)
manager.add_command('runserver', Server(
use_debugger = True,
use_reloader = True,
host = os.getenv('IP', '0.0.0.0'),
port = int(os.getenv('PORT', 5001))
))
if __name__ == "__main__":
manager.run()
|
[] |
[] |
[
"PORT",
"IP"
] |
[]
|
["PORT", "IP"]
|
python
| 2 | 0 | |
src/autoscaler/db/sqldb/appmetric_sqldb_test.go
|
package sqldb_test
import (
"autoscaler/db"
. "autoscaler/db/sqldb"
"autoscaler/models"
"code.cloudfoundry.org/lager"
"github.com/lib/pq"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
"time"
)
var _ = Describe("AppMetricSQLDB", func() {
var (
adb *AppMetricSQLDB
dbConfig db.DatabaseConfig
logger lager.Logger
err error
appMetrics []*models.AppMetric
start, end int64
before int64
appId, metricName string
testMetricName string = "Test-Metric-Name"
testMetricUnit string = "Test-Metric-Unit"
testAppId string = "Test-App-ID"
orderType db.OrderType
)
BeforeEach(func() {
logger = lager.NewLogger("appmetric-sqldb-test")
dbConfig = db.DatabaseConfig{
URL: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
})
Context("NewAppMetricSQLDB", func() {
JustBeforeEach(func() {
adb, err = NewAppMetricSQLDB(dbConfig, logger)
})
AfterEach(func() {
if adb != nil {
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
}
})
Context("when db url is not correct", func() {
BeforeEach(func() {
dbConfig.URL = "postgres://not-exist-user:not-exist-password@localhost/autoscaler?sslmode=disable"
})
It("should error", func() {
Expect(err).To(BeAssignableToTypeOf(&pq.Error{}))
})
})
Context("when db url is correct", func() {
It("should not error", func() {
Expect(err).NotTo(HaveOccurred())
Expect(adb).NotTo(BeNil())
})
})
})
Context("SaveAppMetric", func() {
BeforeEach(func() {
adb, err = NewAppMetricSQLDB(dbConfig, logger)
Expect(err).NotTo(HaveOccurred())
cleanAppMetricTable()
})
AfterEach(func() {
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
})
Context("When inserting an app_metric", func() {
BeforeEach(func() {
appMetric := &models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "300",
}
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
})
It("has the appMetric in database", func() {
Expect(hasAppMetric(testAppId, testMetricName, 11111111, "300")).To(BeTrue())
})
})
})
Context("SaveAppMetricsInBulk", func() {
BeforeEach(func() {
adb, err = NewAppMetricSQLDB(dbConfig, logger)
Expect(err).NotTo(HaveOccurred())
cleanAppMetricTable()
})
AfterEach(func() {
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
})
Context("When inserting an array of app_metric", func() {
BeforeEach(func() {
appMetrics := []*models.AppMetric{
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "300",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 22222222,
Value: "400",
},
}
err = adb.SaveAppMetricsInBulk(appMetrics)
Expect(err).NotTo(HaveOccurred())
})
It("has the array of app_metric in database", func() {
Expect(hasAppMetric(testAppId, testMetricName, 11111111, "300")).To(BeTrue())
Expect(hasAppMetric(testAppId, testMetricName, 22222222, "400")).To(BeTrue())
})
})
})
Context("RetrieveAppMetrics", func() {
BeforeEach(func() {
adb, err = NewAppMetricSQLDB(dbConfig, logger)
Expect(err).NotTo(HaveOccurred())
cleanAppMetricTable()
orderType = db.ASC
appMetric := &models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "100",
}
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
appMetric.Timestamp = 33333333
appMetric.Value = "200"
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
appMetric.Timestamp = 55555555
appMetric.Value = "300"
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
appId = testAppId
metricName = testMetricName
start = 0
end = -1
})
AfterEach(func() {
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
})
JustBeforeEach(func() {
appMetrics, err = adb.RetrieveAppMetrics(appId, metricName, start, end, orderType)
})
Context("The app has no metrics", func() {
BeforeEach(func() {
appId = "app-id-no-metrics"
})
It("returns empty metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(BeEmpty())
})
})
Context("when the app has no metrics with the given metric name", func() {
BeforeEach(func() {
metricName = "metric-name-no-metrics"
})
It("returns empty metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(BeEmpty())
})
})
Context("when end time is before all the metrics timestamps", func() {
BeforeEach(func() {
end = 11111110
})
It("returns empty metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(BeEmpty())
})
})
Context("when start time is after all the metrics timestamps", func() {
BeforeEach(func() {
start = 55555556
})
It("returns empty metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(BeEmpty())
})
})
Context("when start time > end time", func() {
BeforeEach(func() {
start = 33333333
end = 22222222
})
It("returns empty metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(BeEmpty())
})
})
Context("when retriving all the appMetrics)", func() {
It("returns all the appMetrics ordered by timestamp", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(Equal([]*models.AppMetric{
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "100",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 33333333,
Value: "200",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 55555555,
Value: "300",
}}))
})
})
Context("when retriving part of the appMetrics", func() {
BeforeEach(func() {
start = 22222222
end = 66666666
})
It("returns correct appMetrics ordered by timestamp", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(Equal([]*models.AppMetric{
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 33333333,
Value: "200",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 55555555,
Value: "300",
}}))
})
})
Context("when retriving the appMetrics with descending order)", func() {
BeforeEach(func() {
orderType = db.DESC
})
It("returns all the appMetrics ordered by timestamp with descending order", func() {
Expect(err).NotTo(HaveOccurred())
Expect(appMetrics).To(Equal([]*models.AppMetric{
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 55555555,
Value: "300",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 33333333,
Value: "200",
},
&models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "100",
},
}))
})
})
})
Context("PruneAppMetrics", func() {
BeforeEach(func() {
adb, err = NewAppMetricSQLDB(dbConfig, logger)
Expect(err).NotTo(HaveOccurred())
cleanAppMetricTable()
appMetric := &models.AppMetric{
AppId: testAppId,
MetricType: testMetricName,
Unit: testMetricUnit,
Timestamp: 11111111,
Value: "100",
}
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
appMetric.Timestamp = 55555555
appMetric.Value = "200"
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
appMetric.Timestamp = 33333333
appMetric.Value = "300"
err = adb.SaveAppMetric(appMetric)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
})
JustBeforeEach(func() {
err = adb.PruneAppMetrics(before)
})
Context("when pruning app metrics before all the timestamps of metrics", func() {
BeforeEach(func() {
before = 0
})
It("does not remove any metrics", func() {
Expect(err).NotTo(HaveOccurred())
Expect(getNumberOfAppMetrics()).To(Equal(3))
})
})
Context("when pruning all the metrics", func() {
BeforeEach(func() {
before = time.Now().UnixNano()
})
It("empties the app metrics table", func() {
Expect(err).NotTo(HaveOccurred())
Expect(getNumberOfAppMetrics()).To(Equal(0))
})
})
Context("when pruning part of the metrics", func() {
BeforeEach(func() {
before = 33333333
})
It("removes metrics before the time specified", func() {
Expect(err).NotTo(HaveOccurred())
Expect(getNumberOfAppMetrics()).To(Equal(1))
Expect(hasAppMetric(testAppId, testMetricName, 55555555, "200")).To(BeTrue())
})
})
Context("When not connected to the database", func() {
BeforeEach(func() {
before = 0
err = adb.Close()
Expect(err).NotTo(HaveOccurred())
})
It("should error", func() {
Expect(err).To(MatchError(MatchRegexp("sql: database is closed")))
})
})
})
})
|
[
"\"DBURL\""
] |
[] |
[
"DBURL"
] |
[]
|
["DBURL"]
|
go
| 1 | 0 | |
cmd/mavenBuild_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type mavenBuildOptions struct {
PomPath string `json:"pomPath,omitempty"`
Profiles []string `json:"profiles,omitempty"`
Flatten bool `json:"flatten,omitempty"`
Verify bool `json:"verify,omitempty"`
ProjectSettingsFile string `json:"projectSettingsFile,omitempty"`
GlobalSettingsFile string `json:"globalSettingsFile,omitempty"`
M2Path string `json:"m2Path,omitempty"`
LogSuccessfulMavenTransfers bool `json:"logSuccessfulMavenTransfers,omitempty"`
CreateBOM bool `json:"createBOM,omitempty"`
AltDeploymentRepositoryPassword string `json:"altDeploymentRepositoryPassword,omitempty"`
AltDeploymentRepositoryUser string `json:"altDeploymentRepositoryUser,omitempty"`
AltDeploymentRepositoryURL string `json:"altDeploymentRepositoryUrl,omitempty"`
AltDeploymentRepositoryID string `json:"altDeploymentRepositoryID,omitempty"`
CustomTLSCertificateLinks []string `json:"customTlsCertificateLinks,omitempty"`
Publish bool `json:"publish,omitempty"`
JavaCaCertFilePath string `json:"javaCaCertFilePath,omitempty"`
BuildSettingsInfo string `json:"buildSettingsInfo,omitempty"`
}
type mavenBuildCommonPipelineEnvironment struct {
custom struct {
buildSettingsInfo string
}
}
func (p *mavenBuildCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "custom", name: "buildSettingsInfo", value: p.custom.buildSettingsInfo},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// MavenBuildCommand This step will install the maven project into the local maven repository.
func MavenBuildCommand() *cobra.Command {
const STEP_NAME = "mavenBuild"
metadata := mavenBuildMetadata()
var stepConfig mavenBuildOptions
var startTime time.Time
var commonPipelineEnvironment mavenBuildCommonPipelineEnvironment
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createMavenBuildCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step will install the maven project into the local maven repository.",
Long: `This step will install the maven project into the local maven repository.
It will also prepare jacoco to record the code coverage and
supports ci friendly versioning by flattening the pom before installing.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.AltDeploymentRepositoryPassword)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
mavenBuild(stepConfig, &stepTelemetryData, &commonPipelineEnvironment)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addMavenBuildFlags(createMavenBuildCmd, &stepConfig)
return createMavenBuildCmd
}
func addMavenBuildFlags(cmd *cobra.Command, stepConfig *mavenBuildOptions) {
cmd.Flags().StringVar(&stepConfig.PomPath, "pomPath", `pom.xml`, "Path to the pom file which should be installed including all children.")
cmd.Flags().StringSliceVar(&stepConfig.Profiles, "profiles", []string{}, "Defines list of maven build profiles to be used.")
cmd.Flags().BoolVar(&stepConfig.Flatten, "flatten", true, "Defines if the pom files should be flattened to support ci friendly maven versioning.")
cmd.Flags().BoolVar(&stepConfig.Verify, "verify", false, "Instead of installing the artifact only the verify lifecycle phase is executed.")
cmd.Flags().StringVar(&stepConfig.ProjectSettingsFile, "projectSettingsFile", os.Getenv("PIPER_projectSettingsFile"), "Path to the mvn settings file that should be used as project settings file.")
cmd.Flags().StringVar(&stepConfig.GlobalSettingsFile, "globalSettingsFile", os.Getenv("PIPER_globalSettingsFile"), "Path to the mvn settings file that should be used as global settings file.")
cmd.Flags().StringVar(&stepConfig.M2Path, "m2Path", os.Getenv("PIPER_m2Path"), "Path to the location of the local repository that should be used.")
cmd.Flags().BoolVar(&stepConfig.LogSuccessfulMavenTransfers, "logSuccessfulMavenTransfers", false, "Configures maven to log successful downloads. This is set to `false` by default to reduce the noise in build logs.")
cmd.Flags().BoolVar(&stepConfig.CreateBOM, "createBOM", false, "Creates the bill of materials (BOM) using CycloneDX Maven plugin.")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryPassword, "altDeploymentRepositoryPassword", os.Getenv("PIPER_altDeploymentRepositoryPassword"), "Password for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This password will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryUser, "altDeploymentRepositoryUser", os.Getenv("PIPER_altDeploymentRepositoryUser"), "User for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This user will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryURL, "altDeploymentRepositoryUrl", os.Getenv("PIPER_altDeploymentRepositoryUrl"), "Url for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This Url will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryID, "altDeploymentRepositoryID", os.Getenv("PIPER_altDeploymentRepositoryID"), "Id for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This id will be updated in settings.xml and will be used as a flag with DaltDeploymentRepository along with mavenAltDeploymentRepositoryUrl during maven deploy . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringSliceVar(&stepConfig.CustomTLSCertificateLinks, "customTlsCertificateLinks", []string{}, "List of download links to custom TLS certificates. This is required to ensure trusted connections to instances with repositories (like nexus) when publish flag is set to true.")
cmd.Flags().BoolVar(&stepConfig.Publish, "publish", false, "Configures maven to run the deploy plugin to publish artifacts to a repository.")
cmd.Flags().StringVar(&stepConfig.JavaCaCertFilePath, "javaCaCertFilePath", os.Getenv("PIPER_javaCaCertFilePath"), "path to the cacerts file used by Java. When maven publish is set to True and customTlsCertificateLinks (to deploy the artifact to a repository with a self signed cert) are provided to trust the self signed certs, Piper will extend the existing Java cacerts to include the new self signed certs. if not provided Piper will search for the cacerts in $JAVA_HOME/jre/lib/security/cacerts")
cmd.Flags().StringVar(&stepConfig.BuildSettingsInfo, "buildSettingsInfo", os.Getenv("PIPER_buildSettingsInfo"), "build settings info is typically filled by the step automatically to create information about the build settings that were used during the maven build . This information is typically used for compliance related processes.")
}
// retrieve step metadata
func mavenBuildMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "mavenBuild",
Aliases: []config.Alias{{Name: "mavenExecute", Deprecated: false}},
Description: "This step will install the maven project into the local maven repository.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "altDeploymentRepositoryPasswordId", Description: "Jenkins credentials ID containing the artifact deployment repository password.", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "pomPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `pom.xml`,
},
{
Name: "profiles",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "GENERAL", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "flatten",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: true,
},
{
Name: "verify",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "projectSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/projectSettingsFile"}},
Default: os.Getenv("PIPER_projectSettingsFile"),
},
{
Name: "globalSettingsFile",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/mavenGlobalSettingsFile",
},
},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/globalSettingsFile"}},
Default: os.Getenv("PIPER_globalSettingsFile"),
},
{
Name: "m2Path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/m2Path"}},
Default: os.Getenv("PIPER_m2Path"),
},
{
Name: "logSuccessfulMavenTransfers",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/logSuccessfulMavenTransfers"}},
Default: false,
},
{
Name: "createBOM",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/createBOM"}},
Default: false,
},
{
Name: "altDeploymentRepositoryPassword",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryPassword",
},
{
Name: "altDeploymentRepositoryPasswordId",
Type: "secret",
},
{
Name: "altDeploymentRepositoryPasswordFileVaultSecretName",
Type: "vaultSecretFile",
Default: "alt-deployment-repository-passowrd",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryPassword"),
},
{
Name: "altDeploymentRepositoryUser",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryUsername",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryUser"),
},
{
Name: "altDeploymentRepositoryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryUrl"),
},
{
Name: "altDeploymentRepositoryID",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryId",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryID"),
},
{
Name: "customTlsCertificateLinks",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "publish",
ResourceRef: []config.ResourceReference{},
Scope: []string{"STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/publish"}},
Default: false,
},
{
Name: "javaCaCertFilePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/javaCaCertFilePath"}},
Default: os.Getenv("PIPER_javaCaCertFilePath"),
},
{
Name: "buildSettingsInfo",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/buildSettingsInfo",
},
},
Scope: []string{"STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_buildSettingsInfo"),
},
},
},
Containers: []config.Container{
{Name: "mvn", Image: "maven:3.6-jdk-8"},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "custom/buildSettingsInfo"},
},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\"",
"\"PIPER_altDeploymentRepositoryPassword\"",
"\"PIPER_altDeploymentRepositoryUser\"",
"\"PIPER_altDeploymentRepositoryUrl\"",
"\"PIPER_altDeploymentRepositoryID\"",
"\"PIPER_javaCaCertFilePath\"",
"\"PIPER_buildSettingsInfo\"",
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\"",
"\"PIPER_altDeploymentRepositoryPassword\"",
"\"PIPER_altDeploymentRepositoryUser\"",
"\"PIPER_altDeploymentRepositoryUrl\"",
"\"PIPER_altDeploymentRepositoryID\"",
"\"PIPER_javaCaCertFilePath\"",
"\"PIPER_buildSettingsInfo\""
] |
[] |
[
"PIPER_altDeploymentRepositoryID",
"PIPER_globalSettingsFile",
"PIPER_altDeploymentRepositoryUser",
"PIPER_altDeploymentRepositoryPassword",
"PIPER_javaCaCertFilePath",
"PIPER_m2Path",
"PIPER_altDeploymentRepositoryUrl",
"PIPER_buildSettingsInfo",
"PIPER_projectSettingsFile"
] |
[]
|
["PIPER_altDeploymentRepositoryID", "PIPER_globalSettingsFile", "PIPER_altDeploymentRepositoryUser", "PIPER_altDeploymentRepositoryPassword", "PIPER_javaCaCertFilePath", "PIPER_m2Path", "PIPER_altDeploymentRepositoryUrl", "PIPER_buildSettingsInfo", "PIPER_projectSettingsFile"]
|
go
| 9 | 0 | |
wal/reader_test.go
|
// Copyright 2019 The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"testing"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
tsdb_errors "github.com/m4ksio/wal/errors"
)
type reader interface {
Next() bool
Err() error
Record() []byte
Offset() int64
}
type rec struct {
t recType
b []byte
}
var readerConstructors = map[string]func(io.Reader) reader{
"Reader": func(r io.Reader) reader {
return NewReader(r)
},
}
var data = make([]byte, 100000)
var testReaderCases = []struct {
t []rec
exp [][]byte
fail bool
}{
// Sequence of valid records.
{
t: []rec{
{recFull, data[0:200]},
{recFirst, data[200:300]},
{recLast, data[300:400]},
{recFirst, data[400:800]},
{recMiddle, data[800:900]},
{recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary.
{recLast, data[900:900]},
{recFirst, data[900:1000]},
{recMiddle, data[1000:1200]},
{recMiddle, data[1200:30000]},
{recMiddle, data[30000:30001]},
{recMiddle, data[30001:30001]},
{recLast, data[30001:32000]},
},
exp: [][]byte{
data[0:200],
data[200:400],
data[400:900],
data[900:32000],
},
},
// Exactly at the limit of one page minus the header size
{
t: []rec{
{recFull, data[0 : pageSize-recordHeaderSize]},
},
exp: [][]byte{
data[:pageSize-recordHeaderSize],
},
},
// More than a full page, this exceeds our buffer and can never happen
// when written by the WAL.
{
t: []rec{
{recFull, data[0 : pageSize+1]},
},
fail: true,
},
// Two records the together are too big for a page.
// NB currently the non-live reader succeeds on this. I think this is a bug.
// but we've seen it in production.
{
t: []rec{
{recFull, data[:pageSize/2]},
{recFull, data[:pageSize/2]},
},
exp: [][]byte{
data[:pageSize/2],
data[:pageSize/2],
},
},
// Invalid orders of record types.
{
t: []rec{{recMiddle, data[:200]}},
fail: true,
},
{
t: []rec{{recLast, data[:200]}},
fail: true,
},
{
t: []rec{
{recFirst, data[:200]},
{recFull, data[200:400]},
},
fail: true,
},
{
t: []rec{
{recFirst, data[:100]},
{recMiddle, data[100:200]},
{recFull, data[200:400]},
},
fail: true,
},
// Non-zero data after page termination.
{
t: []rec{
{recFull, data[:100]},
{recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)},
},
exp: [][]byte{data[:100]},
fail: true,
},
}
func encodedRecord(t recType, b []byte) []byte {
if t == recPageTerm {
return append([]byte{0}, b...)
}
r := make([]byte, recordHeaderSize)
r[0] = byte(t)
binary.BigEndian.PutUint16(r[1:], uint16(len(b)))
binary.BigEndian.PutUint32(r[3:], crc32.Checksum(b, castagnoliTable))
return append(r, b...)
}
// TestReader feeds the reader a stream of encoded records with different types.
func TestReader(t *testing.T) {
for name, fn := range readerConstructors {
for i, c := range testReaderCases {
t.Run(fmt.Sprintf("%s/%d", name, i), func(t *testing.T) {
var buf []byte
for _, r := range c.t {
buf = append(buf, encodedRecord(r.t, r.b)...)
}
r := fn(bytes.NewReader(buf))
for j := 0; r.Next(); j++ {
t.Logf("record %d", j)
rec := r.Record()
if j >= len(c.exp) {
t.Fatal("received more records than expected")
}
assert.Equal(t, c.exp[j], rec, "Bytes within record did not match expected Bytes")
}
if !c.fail && r.Err() != nil {
t.Fatalf("unexpected error: %s", r.Err())
}
if c.fail && r.Err() == nil {
t.Fatalf("expected error but got none")
}
})
}
}
}
const fuzzLen = 500
func generateRandomEntries(w *WAL, records chan []byte) error {
var recs [][]byte
for i := 0; i < fuzzLen; i++ {
var sz int64
switch i % 5 {
case 0, 1:
sz = 50
case 2, 3:
sz = pageSize
default:
sz = pageSize * 8
}
rec := make([]byte, rand.Int63n(sz))
if _, err := rand.Read(rec); err != nil {
return err
}
records <- rec
// Randomly batch up records.
recs = append(recs, rec)
if rand.Intn(4) < 3 {
if _, err := w.Log(recs...); err != nil {
return err
}
recs = recs[:0]
}
}
_, err := w.Log(recs...)
return err
}
type multiReadCloser struct {
reader io.Reader
closers []io.Closer
}
func (m *multiReadCloser) Read(p []byte) (n int, err error) {
return m.reader.Read(p)
}
func (m *multiReadCloser) Close() error {
var merr tsdb_errors.MultiError
for _, closer := range m.closers {
merr.Add(closer.Close())
}
return merr.Err()
}
func allSegments(dir string) (io.ReadCloser, error) {
seg, err := listSegments(dir)
if err != nil {
return nil, err
}
var readers []io.Reader
var closers []io.Closer
for _, r := range seg {
f, err := os.Open(filepath.Join(dir, r.name))
if err != nil {
return nil, err
}
readers = append(readers, f)
closers = append(closers, f)
}
return &multiReadCloser{
reader: io.MultiReader(readers...),
closers: closers,
}, nil
}
func TestReaderFuzz(t *testing.T) {
for name, fn := range readerConstructors {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_fuzz_live")
assert.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(zerolog.Nop(), nil, dir, 128*pageSize, compress)
assert.NoError(t, err)
// Buffering required as we're not reading concurrently.
input := make(chan []byte, fuzzLen)
err = generateRandomEntries(w, input)
assert.NoError(t, err)
close(input)
err = w.Close()
assert.NoError(t, err)
sr, err := allSegments(w.Dir())
assert.NoError(t, err)
defer sr.Close()
reader := fn(sr)
for expected := range input {
assert.True(t, reader.Next(), "expected record: %v", reader.Err())
assert.Equal(t, expected, reader.Record(), "read wrong record")
}
assert.True(t, !reader.Next(), "unexpected record")
})
}
}
}
func TestReaderData(t *testing.T) {
dir := os.Getenv("WALDIR")
if dir == "" {
return
}
for name, fn := range readerConstructors {
t.Run(name, func(t *testing.T) {
w, err := New(zerolog.Nop(), nil, dir, true)
assert.NoError(t, err)
sr, err := allSegments(dir)
assert.NoError(t, err)
reader := fn(sr)
for reader.Next() {
}
assert.NoError(t, reader.Err())
err = w.Repair(reader.Err())
assert.NoError(t, err)
})
}
}
|
[
"\"WALDIR\""
] |
[] |
[
"WALDIR"
] |
[]
|
["WALDIR"]
|
go
| 1 | 0 | |
tests/test_env.py
|
# coding: utf-8
import os
import sys
from mock import patch
import pytest
from decouple import Config, RepositoryEnv, UndefinedValueError
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
else:
from io import BytesIO as StringIO
ENVFILE = '''
KeyTrue=True
KeyOne=1
KeyYes=yes
KeyOn=on
KeyY=y
KeyFalse=False
KeyZero=0
KeyNo=no
KeyN=n
KeyOff=off
KeyEmpty=
#CommentedKey=None
PercentNotEscaped=%%
NoInterpolation=%(KeyOff)s
IgnoreSpace = text
RespectSingleQuoteSpace = ' text'
RespectDoubleQuoteSpace = " text"
KeyOverrideByEnv=NotThis
KeyWithSingleQuoteEnd=text'
KeyWithSingleQuoteMid=te'xt
KeyWithSingleQuoteBegin='text
KeyWithDoubleQuoteEnd=text"
KeyWithDoubleQuoteMid=te"xt
KeyWithDoubleQuoteBegin="text
KeyIsSingleQuote='
KeyIsDoubleQuote="
'''
@pytest.fixture(scope='module')
def config():
with patch('decouple.open', return_value=StringIO(ENVFILE), create=True):
return Config(RepositoryEnv('.env'))
def test_env_comment(config):
with pytest.raises(UndefinedValueError):
config('CommentedKey')
def test_env_percent_not_escaped(config):
assert '%%' == config('PercentNotEscaped')
def test_env_no_interpolation(config):
assert '%(KeyOff)s' == config('NoInterpolation')
def test_env_bool_true(config):
assert True is config('KeyTrue', cast=bool)
assert True is config('KeyOne', cast=bool)
assert True is config('KeyYes', cast=bool)
assert True is config('KeyOn', cast=bool)
assert True is config('KeyY', cast=bool)
assert True is config('Key1int', default=1, cast=bool)
def test_env_bool_false(config):
assert False is config('KeyFalse', cast=bool)
assert False is config('KeyZero', cast=bool)
assert False is config('KeyNo', cast=bool)
assert False is config('KeyOff', cast=bool)
assert False is config('KeyN', cast=bool)
assert False is config('KeyEmpty', cast=bool)
assert False is config('Key0int', default=0, cast=bool)
def test_env_os_environ(config):
os.environ['KeyOverrideByEnv'] = 'This'
assert 'This' == config('KeyOverrideByEnv')
del os.environ['KeyOverrideByEnv']
def test_env_undefined_but_present_in_os_environ(config):
os.environ['KeyOnlyEnviron'] = ''
assert '' == config('KeyOnlyEnviron')
del os.environ['KeyOnlyEnviron']
def test_env_undefined(config):
with pytest.raises(UndefinedValueError):
config('UndefinedKey')
def test_env_default_none(config):
assert None is config('UndefinedKey', default=None)
def test_env_empty(config):
assert '' == config('KeyEmpty', default=None)
assert '' == config('KeyEmpty')
def test_env_support_space(config):
assert 'text' == config('IgnoreSpace')
assert ' text' == config('RespectSingleQuoteSpace')
assert ' text' == config('RespectDoubleQuoteSpace')
def test_env_empty_string_means_false(config):
assert False is config('KeyEmpty', cast=bool)
def test_env_with_quote(config):
assert "text'" == config('KeyWithSingleQuoteEnd')
assert 'text"' == config('KeyWithDoubleQuoteEnd')
assert "te'xt" == config('KeyWithSingleQuoteMid')
assert "'text" == config('KeyWithSingleQuoteBegin')
assert 'te"xt' == config('KeyWithDoubleQuoteMid')
assert '"text' == config('KeyWithDoubleQuoteBegin')
assert '"' == config('KeyIsDoubleQuote')
assert "'" == config('KeyIsSingleQuote')
|
[] |
[] |
[
"KeyOverrideByEnv",
"KeyOnlyEnviron"
] |
[]
|
["KeyOverrideByEnv", "KeyOnlyEnviron"]
|
python
| 2 | 0 | |
providers/aws/aws_service.go
|
// Copyright 2018 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"context"
"os"
"regexp"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
)
type AWSService struct { //nolint
terraformutils.Service
}
var awsVariable = regexp.MustCompile(`(\${[0-9A-Za-z:]+})`)
var configCache *aws.Config
func (s *AWSService) generateConfig() (aws.Config, error) {
if configCache != nil {
return *configCache, nil
}
baseConfig, e := s.buildBaseConfig()
if e != nil {
return baseConfig, e
}
if s.Verbose {
baseConfig.ClientLogMode = aws.LogRequestWithBody & aws.LogResponseWithBody
}
creds, e := baseConfig.Credentials.Retrieve(context.TODO())
if e != nil {
return baseConfig, e
}
// terraform cannot ask for MFA token, so we need to pass STS session token, which might contain credentials with MFA requirement
accessKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
if accessKey == "" {
os.Setenv("AWS_ACCESS_KEY_ID", creds.AccessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", creds.SecretAccessKey)
if creds.SessionToken != "" {
os.Setenv("AWS_SESSION_TOKEN", creds.SessionToken)
}
}
configCache = &baseConfig
return baseConfig, nil
}
func (s *AWSService) buildBaseConfig() (aws.Config, error) {
var loadOptions []func(*config.LoadOptions) error
if s.GetArgs()["profile"].(string) != "" {
loadOptions = append(loadOptions, config.WithSharedConfigProfile(s.GetArgs()["profile"].(string)))
}
if s.GetArgs()["region"].(string) != "" {
os.Setenv("AWS_REGION", s.GetArgs()["region"].(string))
}
loadOptions = append(loadOptions, config.WithAssumeRoleCredentialOptions(func(options *stscreds.AssumeRoleOptions) {
options.TokenProvider = stscreds.StdinTokenProvider
}))
return config.LoadDefaultConfig(context.TODO(), loadOptions...)
}
// for CF interpolation and IAM Policy variables
func (*AWSService) escapeAwsInterpolation(str string) string {
return awsVariable.ReplaceAllString(str, "$$$1")
}
func (s *AWSService) getAccountNumber(config aws.Config) (*string, error) {
stsSvc := sts.NewFromConfig(config)
identity, err := stsSvc.GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{})
if err != nil {
return nil, err
}
return identity.Account, nil
}
|
[
"\"AWS_SECRET_ACCESS_KEY\""
] |
[] |
[
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_SECRET_ACCESS_KEY"]
|
go
| 1 | 0 | |
cmd/xsum-pcm/main.go
|
package main
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/sclevine/xsum/cli"
)
var Version = "0.0.0"
func main() {
alg := "sha256"
if strings.HasPrefix(os.Args[0], "xsum-pcm-") {
alg = strings.TrimPrefix(os.Args[0], "xsum-pcm-")
}
switch os.Getenv("XSUM_PLUGIN_TYPE") {
case "metadata":
hash, err := cli.ParseHash(alg)
if err != nil {
log.Fatalf("Error: %s", err)
}
r, err := input()
if err != nil {
log.Fatalf("Error: %s", err)
}
defer r.Close()
out, err := hash.Data(r)
if err != nil {
r.Close()
log.Fatalf("Error: %s", err)
}
fmt.Printf("%x", out)
default: // "data"
if len(os.Args) < 2 {
log.Fatal("Error: xsum PCM plugin does not support audio input via stdin")
} else if len(os.Args) > 2 {
log.Fatalf("Error: extra arguments: %s", strings.Join(os.Args[2:], ", "))
}
out, err := pcmSHA(os.Args[1], alg)
if err != nil {
log.Fatalf("Error: %s", err)
}
fmt.Print(out)
}
}
func input() (io.ReadCloser, error) {
switch len(os.Args) {
case 0, 1:
return io.NopCloser(os.Stdin), nil
case 2:
f, err := os.Open(os.Args[1])
if err != nil {
return nil, err
}
return f, nil
default:
return nil, fmt.Errorf("extra arguments: %s", strings.Join(os.Args[2:], ", "))
}
}
func pcmSHA(path, alg string) (string, error) {
ext := strings.ToLower(filepath.Ext(path))
cmd := exec.Command("ffprobe",
"-v", "error",
"-select_streams", "a:0",
"-show_entries", "stream=bits_per_raw_sample",
"-of", "default=noprint_wrappers=1:nokey=1",
path,
)
out, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s\n", ee.Stderr)
}
return "", err
}
bits := strings.TrimSpace(string(out))
if bits == "N/A" {
cmd := exec.Command("ffprobe",
"-v", "error",
"-select_streams", "a:0",
"-show_entries", "stream=sample_fmt",
"-of", "default=noprint_wrappers=1:nokey=1",
path,
)
out, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s\n", ee.Stderr)
}
return "", err
}
switch strings.TrimSpace(string(out)) {
case "s16", "s16p":
bits = "16"
default:
switch ext {
case ".m4a":
log.Printf("Warning: assuming '%s' is lossy m4a", path)
fallthrough
case ".mp3", ".ogg", ".opus":
return pcmSHAOpt(path, "16", alg)
default:
return "", fmt.Errorf("invalid bit depth for '%s'", path)
}
}
}
if ext == ".flac" {
real, err := pcmSHAOpt(path, bits, "md5")
if err != nil {
return "", err
}
claim, err := flacMD5(path)
if err != nil {
return "", err
}
if claim == "00000000000000000000000000000000" {
log.Printf("Warning: flac '%s' missing PCM md5 checksum", path)
} else if real != claim {
return "", fmt.Errorf("corrupted flac '%s' (%s != %s)", path, claim, real)
}
}
return pcmSHAOpt(path, bits, alg)
}
func pcmSHAOpt(path, bits, hash string) (string, error) {
hashL := strings.ToLower(hash)
hashU := strings.ToUpper(hash)
cmd := exec.Command("ffmpeg",
"-i", path,
"-vn",
"-c", "pcm_s"+bits+"le",
"-f", "hash",
"-hash", hashL,
"-loglevel", "error",
"-nostats",
"-",
)
out, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s\n", ee.Stderr)
}
return "", err
}
if string(out[:len(hashU)+1]) != hashU+"=" ||
len(out) <= len(hashU)+2 ||
out[len(out)-1] != '\n' {
return "", fmt.Errorf("invalid checksum '%s'", strings.TrimSpace(string(out)))
}
return string(out[len(hashU)+1 : len(out)-1]), nil
}
func flacMD5(path string) (string, error) {
cmd := exec.Command("metaflac", "--show-md5sum", path)
out, err := cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s\n", ee.Stderr)
}
return "", err
}
if len(out) != 33 {
return "", fmt.Errorf("invalid checksum '%s'", strings.TrimSpace(string(out)))
}
return string(out[:32]), nil
}
|
[
"\"XSUM_PLUGIN_TYPE\""
] |
[] |
[
"XSUM_PLUGIN_TYPE"
] |
[]
|
["XSUM_PLUGIN_TYPE"]
|
go
| 1 | 0 | |
daemon/daemon_unix.go
|
// +build linux freebsd
package daemon // import "github.com/demonoid81/moby/daemon"
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
statsV1 "github.com/containerd/cgroups/stats/v1"
statsV2 "github.com/containerd/cgroups/v2/stats"
"github.com/demonoid81/moby/api/types"
"github.com/demonoid81/moby/api/types/blkiodev"
pblkiodev "github.com/demonoid81/moby/api/types/blkiodev"
containertypes "github.com/demonoid81/moby/api/types/container"
"github.com/demonoid81/moby/container"
"github.com/demonoid81/moby/daemon/config"
"github.com/demonoid81/moby/daemon/initlayer"
"github.com/demonoid81/moby/errdefs"
"github.com/demonoid81/moby/opts"
"github.com/demonoid81/moby/pkg/containerfs"
"github.com/demonoid81/moby/pkg/idtools"
"github.com/demonoid81/moby/pkg/ioutils"
"github.com/demonoid81/moby/pkg/parsers"
"github.com/demonoid81/moby/pkg/parsers/kernel"
"github.com/demonoid81/moby/pkg/sysinfo"
"github.com/demonoid81/moby/runconfig"
volumemounts "github.com/demonoid81/moby/volume/mounts"
"github.com/demonoid81/libnetwork"
nwconfig "github.com/demonoid81/libnetwork/config"
"github.com/demonoid81/libnetwork/drivers/bridge"
"github.com/demonoid81/libnetwork/netlabel"
"github.com/demonoid81/libnetwork/netutils"
"github.com/demonoid81/libnetwork/options"
lntypes "github.com/demonoid81/libnetwork/types"
"github.com/moby/sys/mount"
"github.com/opencontainers/runc/libcontainer/cgroups"
rsystem "github.com/opencontainers/runc/libcontainer/system"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
isWindows = false
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 4M limit to supply a reasonable functional container
linuxMinMemory = 4194304
// constants for remapped root settings
defaultIDSpecifier = "default"
defaultRemappedID = "dockremap"
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
cgroupNoneDriver = "none"
// DefaultRuntimeName is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeName = "runc"
)
type containerGetter interface {
GetContainer(string) (*container.Container, error)
}
func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
memory := specs.LinuxMemory{}
if config.Memory > 0 {
memory.Limit = &config.Memory
}
if config.MemoryReservation > 0 {
memory.Reservation = &config.MemoryReservation
}
if config.MemorySwap > 0 {
memory.Swap = &config.MemorySwap
}
if config.MemorySwappiness != nil {
swappiness := uint64(*config.MemorySwappiness)
memory.Swappiness = &swappiness
}
if config.OomKillDisable != nil {
memory.DisableOOMKiller = config.OomKillDisable
}
if config.KernelMemory != 0 {
memory.Kernel = &config.KernelMemory
}
if config.KernelMemoryTCP != 0 {
memory.KernelTCP = &config.KernelMemoryTCP
}
return &memory
}
func getPidsLimit(config containertypes.Resources) *specs.LinuxPids {
if config.PidsLimit == nil {
return nil
}
if *config.PidsLimit <= 0 {
// docker API allows 0 and negative values to unset this to be consistent
// with default values. When updating values, runc requires -1 to unset
// the previous limit.
return &specs.LinuxPids{Limit: -1}
}
return &specs.LinuxPids{Limit: *config.PidsLimit}
}
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
cpu := specs.LinuxCPU{}
if config.CPUShares < 0 {
return nil, fmt.Errorf("shares: invalid argument")
}
if config.CPUShares >= 0 {
shares := uint64(config.CPUShares)
cpu.Shares = &shares
}
if config.CpusetCpus != "" {
cpu.Cpus = config.CpusetCpus
}
if config.CpusetMems != "" {
cpu.Mems = config.CpusetMems
}
if config.NanoCPUs > 0 {
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
period := uint64(100 * time.Millisecond / time.Microsecond)
quota := config.NanoCPUs * int64(period) / 1e9
cpu.Period = &period
cpu.Quota = "a
}
if config.CPUPeriod != 0 {
period := uint64(config.CPUPeriod)
cpu.Period = &period
}
if config.CPUQuota != 0 {
q := config.CPUQuota
cpu.Quota = &q
}
if config.CPURealtimePeriod != 0 {
period := uint64(config.CPURealtimePeriod)
cpu.RealtimePeriod = &period
}
if config.CPURealtimeRuntime != 0 {
c := config.CPURealtimeRuntime
cpu.RealtimeRuntime = &c
}
return &cpu, nil
}
func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) {
var stat unix.Stat_t
var blkioWeightDevices []specs.LinuxWeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := unix.Stat(weightDevice.Path, &stat); err != nil {
return nil, err
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
// The type is 32bit on mips.
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
blkioWeightDevices = append(blkioWeightDevices, d)
}
return blkioWeightDevices, nil
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.NoNewPrivileges = daemon.configStore.NoNewPrivileges
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
if opt == "no-new-privileges" {
container.NoNewPrivileges = true
continue
}
if opt == "disable" {
labelOpts = append(labelOpts, "disable")
continue
}
var con []string
if strings.Contains(opt, "=") {
con = strings.SplitN(opt, "=", 2)
} else if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
}
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
case "no-new-privileges":
noNewPrivileges, err := strconv.ParseBool(con[1])
if err != nil {
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
container.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) {
var throttleDevices []specs.LinuxThrottleDevice
var stat unix.Stat_t
for _, d := range devs {
if err := unix.Stat(d.Path, &stat); err != nil {
return nil, err
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
// the type is 32bit on mips
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
throttleDevices = append(throttleDevices, d)
}
return throttleDevices, nil
}
// adjustParallelLimit takes a number of objects and a proposed limit and
// figures out if it's reasonable (and adjusts it accordingly). This is only
// used for daemon startup, which does a lot of parallel loading of containers
// (and if we exceed RLIMIT_NOFILE then we're in trouble).
func adjustParallelLimit(n int, limit int) int {
// Rule-of-thumb overhead factor (how many files will each goroutine open
// simultaneously). Yes, this is ugly but to be frank this whole thing is
// ugly.
const overhead = 2
// On Linux, we need to ensure that parallelStartupJobs doesn't cause us to
// exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it
// and give a warning (since in theory the user should increase their
// ulimits to the largest possible value for dockerd).
var rlim unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil {
logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
return limit
}
softRlimit := int(rlim.Cur)
// Much fewer containers than RLIMIT_NOFILE. No need to adjust anything.
if softRlimit > overhead*n {
return limit
}
// RLIMIT_NOFILE big enough, no need to adjust anything.
if softRlimit > overhead*limit {
return limit
}
logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
return softRlimit / overhead
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/demonoid81/moby/issues/407
// Docker 1.11 and above doesn't actually run on kernels older than 3.4,
// due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4).
if !kernel.CheckKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = config.DefaultShmSize
if daemon.configStore != nil {
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
}
}
// Set default IPC mode, if unset for container
if hostConfig.IpcMode.IsEmpty() {
m := config.DefaultIpcMode
if daemon.configStore != nil {
m = daemon.configStore.IpcMode
}
hostConfig.IpcMode = containertypes.IpcMode(m)
}
// Set default cgroup namespace mode, if unset for container
if hostConfig.CgroupnsMode.IsEmpty() {
// for cgroup v2: unshare cgroupns even for privileged containers
// https://github.com/containers/libpod/pull/4374#issuecomment-549776387
if hostConfig.Privileged && !cgroups.IsCgroup2UnifiedMode() {
hostConfig.CgroupnsMode = containertypes.CgroupnsMode("host")
} else {
m := "host"
if cgroups.IsCgroup2UnifiedMode() {
m = "private"
}
if daemon.configStore != nil {
m = daemon.configStore.CgroupNamespaceMode
}
hostConfig.CgroupnsMode = containertypes.CgroupnsMode(m)
}
}
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
secOpts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig.
// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode
// and NetworkMode.
//
// When a container shares its namespace with another container, use ID can keep the namespace
// sharing connection between the two containers even the another container is renamed.
func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) {
containerPrefix := "container:"
if hostConfig.PidMode.IsContainer() {
pidContainer := hostConfig.PidMode.Container()
// if there is any error returned here, we just ignore it and leave it to be
// handled in the following logic
if c, err := daemon.GetContainer(pidContainer); err == nil {
hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID)
}
}
if hostConfig.IpcMode.IsContainer() {
ipcContainer := hostConfig.IpcMode.Container()
if c, err := daemon.GetContainer(ipcContainer); err == nil {
hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID)
}
}
if hostConfig.NetworkMode.IsContainer() {
netContainer := hostConfig.NetworkMode.ConnectedContainer()
if c, err := daemon.GetContainer(netContainer); err == nil {
hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID)
}
}
}
// verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration
func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) {
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB")
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 {
warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.")
}
if resources.PidsLimit != nil && !sysInfo.PidsLimit {
if *resources.PidsLimit > 0 {
warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
}
resources.PidsLimit = nil
}
// cpu subsystem checks and adjustments
if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUQuota > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set")
}
if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) {
return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted")
}
// The highest precision we could get on Linux is 0.001, by setting
// cpu.cfs_period_us=1000ms
// cpu.cfs_quota=1ms
// See the following link for details:
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
// Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error.
// The error message is 0.01 so that this is consistent with Windows
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
resources.CPUShares = 0
}
if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod {
warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.")
resources.CPUPeriod = 0
}
if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) {
return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)")
}
if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota {
warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.")
resources.CPUQuota = 0
}
if resources.CPUQuota > 0 && resources.CPUQuota < 1000 {
return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)")
}
if resources.CPUPercent > 0 {
warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS))
resources.CPUPercent = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000")
}
if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 {
return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS)
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
if UsingSystemd(daemon.configStore) {
return cgroupSystemdDriver
}
if daemon.Rootless() {
return cgroupNoneDriver
}
return cgroupFsDriver
}
// getCD gets the raw value of the native.cgroupdriver option, if set.
func getCD(config *config.Config) string {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
return val
}
return ""
}
// VerifyCgroupDriver validates native.cgroupdriver
func VerifyCgroupDriver(config *config.Config) error {
cd := getCD(config)
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
if cd == cgroupNoneDriver {
return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
}
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
func UsingSystemd(config *config.Config) bool {
return getCD(config) == cgroupSystemdDriver
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
sysInfo := daemon.RawSysInfo(true)
w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size can not be less than 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
}
// ip-forwarding does not affect container with '--net=host' (or '--net=none')
if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
}
if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 {
warnings = append(warnings, "Published ports are discarded when using host network mode")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
if hostConfig.Privileged {
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
}
if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if hostConfig.Runtime == "" {
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
}
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
}
parser := volumemounts.NewParser(runtime.GOOS)
for dest := range hostConfig.Tmpfs {
if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
return warnings, err
}
}
if !hostConfig.CgroupnsMode.Valid() {
return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode)
}
if hostConfig.CgroupnsMode.IsPrivate() {
if !sysInfo.CgroupNamespaces {
warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.")
}
}
return warnings, nil
}
func (daemon *Daemon) loadRuntimes() error {
return daemon.initRuntimes(daemon.configStore.Runtimes)
}
func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
// Remove old temp directory if any
os.RemoveAll(runtimeDir + "-old")
tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
if err != nil {
return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
}
defer func() {
if err != nil {
if err1 := os.RemoveAll(tmpDir); err1 != nil {
logrus.WithError(err1).WithField("dir", tmpDir).
Warn("failed to remove tmp dir")
}
return
}
if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil {
return
}
if err = os.Rename(tmpDir, runtimeDir); err != nil {
err = errors.Wrap(err, "failed to setup runtimes dir, new containers may not start")
return
}
if err = os.RemoveAll(runtimeDir + "-old"); err != nil {
logrus.WithError(err).WithField("dir", tmpDir).
Warn("failed to remove old runtimes dir")
}
}()
for name, rt := range runtimes {
if len(rt.Args) == 0 {
continue
}
script := filepath.Join(tmpDir, name)
content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil {
return err
}
}
return nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(conf *config.Config) error {
if conf.ContainerdNamespace == conf.ContainerdPluginNamespace {
return errors.New("containers namespace and plugins namespace cannot be the same")
}
// Check for mutually incompatible config options
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq {
conf.BridgeConfig.EnableIPMasq = false
}
if err := VerifyCgroupDriver(conf); err != nil {
return err
}
if conf.CgroupParent != "" && UsingSystemd(conf) {
if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if conf.Rootless && UsingSystemd(conf) && !cgroups.IsCgroup2UnifiedMode() {
return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode")
}
if conf.DefaultRuntime == "" {
conf.DefaultRuntime = config.StockRuntimeName
}
if conf.Runtimes == nil {
conf.Runtimes = make(map[string]types.Runtime)
}
conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *config.Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
func overlaySupportsSelinux() (bool, error) {
f, err := os.Open("/proc/kallsyms")
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.HasSuffix(s.Text(), " security_inode_copy_up") {
return true, nil
}
}
return false, s.Err()
}
// configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport {
if !selinuxEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system")
return nil
}
if driverName == "overlay" || driverName == "overlay2" {
// If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay.
supported, err := overlaySupportsSelinux()
if err != nil {
return err
}
if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
}
}
} else {
selinuxSetDisabled()
}
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
removeDefaultBridgeInterface()
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
// Set HostGatewayIP to the default bridge's IP if it is empty
if daemon.configStore.HostGatewayIP == nil && controller != nil {
if n, err := controller.NetworkByName("bridge"); err == nil {
v4Info, v6Info := n.Info().IpamInfo()
var gateway net.IP
if len(v4Info) > 0 {
gateway = v4Info[0].Gateway.IP
} else if len(v6Info) > 0 {
gateway = v6Info[0].Gateway.IP
}
daemon.configStore.HostGatewayIP = gateway
}
}
return controller, nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.BridgeConfig.EnableIPForward,
"EnableIPTables": config.BridgeConfig.EnableIPTables,
"EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy,
"UserlandProxyPath": config.BridgeConfig.UserlandProxyPath}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.PreferredPool = ipNet.String()
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var (
deferIPv6Alloc bool
ipamV6Conf *libnetwork.IpamConf
)
if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" {
return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6"))
} else if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
ipamV6Conf = &libnetwork.IpamConf{
AuxAddresses: make(map[string]string),
PreferredPool: fCIDRv6.String(),
}
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// Remove default bridge interface if present (--bridge=none use case)
func removeDefaultBridgeInterface() {
if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
if err := netlink.LinkDel(lnk); err != nil {
logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
}
}
}
func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error {
return func(initPath containerfs.ContainerFS) error {
return initlayer.Setup(initPath, idMapping.RootPair())
}
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := idtools.LookupUID(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := idtools.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := idtools.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to an unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
if _, err := idtools.LookupGroup(idparts[1]); err != nil {
return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err)
}
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return &idtools.IdentityMapping{}, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
// try with username:groupname, uid:groupname, username:gid, uid:gid,
// but keep the original error message (err)
mappings, err := idtools.NewIdentityMapping(username, groupname)
if err == nil {
return mappings, nil
}
user, lookupErr := idtools.LookupUser(username)
if lookupErr != nil {
return nil, errors.Wrap(err, "Can't create ID mappings")
}
logrus.Infof("Can't create ID mappings with username:groupname %s:%s, try uid:groupname %d:%s", username, groupname, user.Uid, groupname)
mappings, lookupErr = idtools.NewIdentityMapping(fmt.Sprintf("%d", user.Uid), groupname)
if lookupErr == nil {
return mappings, nil
}
logrus.Infof("Can't create ID mappings with uid:groupname %d:%s, try username:gid %s:%d", user.Uid, groupname, username, user.Gid)
mappings, lookupErr = idtools.NewIdentityMapping(username, fmt.Sprintf("%d", user.Gid))
if lookupErr == nil {
return mappings, nil
}
logrus.Infof("Can't create ID mappings with username:gid %s:%d, try uid:gid %d:%d", username, user.Gid, user.Uid, user.Gid)
mappings, lookupErr = idtools.NewIdentityMapping(fmt.Sprintf("%d", user.Uid), fmt.Sprintf("%d", user.Gid))
if lookupErr == nil {
return mappings, nil
}
return nil, errors.Wrap(err, "Can't create ID mappings")
}
return &idtools.IdentityMapping{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (g+x,o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0711); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0711 with root:root ownership
if err := os.MkdirAll(rootDir, 0711); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIdentity.UID, rootIdentity.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIdentity); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
// the graphroot won't block access to remapped root--if any pre-existing directory
// has strict permissions that don't allow "x", container start will fail, so
// better to warn and fail now
dirPath := config.Root
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if !idtools.CanAccess(dirPath, rootIdentity) {
return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root)
}
}
}
if err := setupDaemonRootPropagation(config); err != nil {
logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
}
return nil
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
var cleanupOldFile bool
cleanupFile := getUnmountOnShutdownPath(cfg)
defer func() {
if !cleanupOldFile {
return
}
if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
}
}()
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
if err := mount.MakeShared(cfg.Root); err != nil {
return errors.Wrap(err, "could not setup daemon root propagation to shared")
}
// check the case where this may have already been a mount to itself.
// If so then the daemon only performed a remount and should not try to unmount this later.
if rootParentMount == cfg.Root {
cleanupOldFile = true
return nil
}
if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil {
return errors.Wrap(err, "error creating dir to store mount cleanup file")
}
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
}
return nil
}
// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown
// the daemon root should be unmounted.
func getUnmountOnShutdownPath(config *config.Config) string {
return filepath.Join(config.ExecRoot, "unmount-on-shutdown")
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := opts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
_, err := container.WriteHostConfig()
return err
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
return daemon.Unmount(container)
}
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
cs, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
s := &types.StatsJSON{}
s.Read = cs.Read
stats := cs.Metrics
switch t := stats.(type) {
case *statsV1.Metrics:
return daemon.statsV1(s, t)
case *statsV2.Metrics:
return daemon.statsV2(s, t)
default:
return nil, errors.Errorf("unexpected type of metrics %+v", t)
}
}
func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) {
if stats.Blkio != nil {
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive),
SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive),
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.Usage.Total,
PercpuUsage: stats.CPU.Usage.PerCPU,
UsageInKernelmode: stats.CPU.Usage.Kernel,
UsageInUsermode: stats.CPU.Usage.User,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.Throttling.Periods,
ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
ThrottledTime: stats.CPU.Throttling.ThrottledTime,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["cache"] = stats.Memory.Cache
raw["rss"] = stats.Memory.RSS
raw["rss_huge"] = stats.Memory.RSSHuge
raw["mapped_file"] = stats.Memory.MappedFile
raw["dirty"] = stats.Memory.Dirty
raw["writeback"] = stats.Memory.Writeback
raw["pgpgin"] = stats.Memory.PgPgIn
raw["pgpgout"] = stats.Memory.PgPgOut
raw["pgfault"] = stats.Memory.PgFault
raw["pgmajfault"] = stats.Memory.PgMajFault
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
raw["total_cache"] = stats.Memory.TotalCache
raw["total_rss"] = stats.Memory.TotalRSS
raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
raw["total_mapped_file"] = stats.Memory.TotalMappedFile
raw["total_dirty"] = stats.Memory.TotalDirty
raw["total_writeback"] = stats.Memory.TotalWriteback
raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
raw["total_pgfault"] = stats.Memory.TotalPgFault
raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
raw["total_active_anon"] = stats.Memory.TotalActiveAnon
raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
raw["total_active_file"] = stats.Memory.TotalActiveFile
raw["total_unevictable"] = stats.Memory.TotalUnevictable
if stats.Memory.Usage != nil {
s.MemoryStats = types.MemoryStats{
Stats: raw,
Usage: stats.Memory.Usage.Usage,
MaxUsage: stats.Memory.Usage.Max,
Limit: stats.Memory.Usage.Limit,
Failcnt: stats.Memory.Usage.Failcnt,
}
} else {
s.MemoryStats = types.MemoryStats{
Stats: raw,
}
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) {
if stats.Io != nil {
var isbr []types.BlkioStatEntry
for _, re := range stats.Io.Usage {
isbr = append(isbr,
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "read",
Value: re.Rbytes,
},
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "write",
Value: re.Wbytes,
},
)
}
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: isbr,
// Other fields are unsupported
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.UsageUsec * 1000,
// PercpuUsage is not supported
UsageInKernelmode: stats.CPU.SystemUsec * 1000,
UsageInUsermode: stats.CPU.UserUsec * 1000,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.NrPeriods,
ThrottledPeriods: stats.CPU.NrThrottled,
ThrottledTime: stats.CPU.ThrottledUsec * 1000,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["anon"] = stats.Memory.Anon
raw["file"] = stats.Memory.File
raw["kernel_stack"] = stats.Memory.KernelStack
raw["slab"] = stats.Memory.Slab
raw["sock"] = stats.Memory.Sock
raw["shmem"] = stats.Memory.Shmem
raw["file_mapped"] = stats.Memory.FileMapped
raw["file_dirty"] = stats.Memory.FileDirty
raw["file_writeback"] = stats.Memory.FileWriteback
raw["anon_thp"] = stats.Memory.AnonThp
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["slab_reclaimable"] = stats.Memory.SlabReclaimable
raw["slab_unreclaimable"] = stats.Memory.SlabUnreclaimable
raw["pgfault"] = stats.Memory.Pgfault
raw["pgmajfault"] = stats.Memory.Pgmajfault
raw["workingset_refault"] = stats.Memory.WorkingsetRefault
raw["workingset_activate"] = stats.Memory.WorkingsetActivate
raw["workingset_nodereclaim"] = stats.Memory.WorkingsetNodereclaim
raw["pgrefill"] = stats.Memory.Pgrefill
raw["pgscan"] = stats.Memory.Pgscan
raw["pgsteal"] = stats.Memory.Pgsteal
raw["pgactivate"] = stats.Memory.Pgactivate
raw["pgdeactivate"] = stats.Memory.Pgdeactivate
raw["pglazyfree"] = stats.Memory.Pglazyfree
raw["pglazyfreed"] = stats.Memory.Pglazyfreed
raw["thp_fault_alloc"] = stats.Memory.ThpFaultAlloc
raw["thp_collapse_alloc"] = stats.Memory.ThpCollapseAlloc
s.MemoryStats = types.MemoryStats{
// Stats is not compatible with v1
Stats: raw,
Usage: stats.Memory.Usage,
// MaxUsage is not supported
Limit: stats.Memory.UsageLimit,
// TODO: Failcnt
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
// setDefaultIsolation determines the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
return nil
}
// setupDaemonProcess sets various settings for the daemon's process
func setupDaemonProcess(config *config.Config) error {
// setup the daemons oom_score_adj
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
return err
}
if err := setMayDetachMounts(); err != nil {
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
}
return nil
}
// This is used to allow removal of mountpoints that may be mounted in other
// namespaces on RHEL based kernels starting from RHEL 7.4.
// Without this setting, removals on these RHEL based kernels may fail with
// "device or resource busy".
// This setting is not available in upstream kernels as it is not configurable,
// but has been in the upstream kernels since 3.15.
func setMayDetachMounts() error {
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
}
defer f.Close()
_, err = f.WriteString("1")
if os.IsPermission(err) {
// Setting may_detach_mounts does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
}
return nil
}
return err
}
func setupOOMScoreAdj(score int) error {
f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
stringScore := strconv.Itoa(score)
_, err = f.WriteString(stringScore)
if os.IsPermission(err) {
// Setting oom_score_adj does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore)
}
return nil
}
return err
}
func (daemon *Daemon) initCgroupsPath(path string) error {
if path == "/" || path == "." {
return nil
}
if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 {
return nil
}
if cgroups.IsCgroup2UnifiedMode() {
return fmt.Errorf("daemon-scoped cpu-rt-period and cpu-rt-runtime are not implemented for cgroup v2")
}
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
// for the period and runtime as this limits what the children can be set to.
daemon.initCgroupsPath(filepath.Dir(path))
mnt, root, err := cgroups.FindCgroupMountpointAndRoot("", "cpu")
if err != nil {
return err
}
// When docker is run inside docker, the root is based of the host cgroup.
// Should this be handled in runc/libcontainer/cgroups ?
if strings.HasPrefix(root, "/docker/") {
root = "/"
}
path = filepath.Join(mnt, root, path)
sysInfo := daemon.RawSysInfo(true)
if err := maybeCreateCPURealTimeFile(sysInfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(sysInfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
if sysinfoPresent && configValue != 0 {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil {
return err
}
}
return nil
}
func (daemon *Daemon) setupSeccompProfile() error {
if daemon.configStore.SeccompProfile != "" {
daemon.seccompProfilePath = daemon.configStore.SeccompProfile
b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile)
if err != nil {
return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err)
}
daemon.seccompProfile = b
}
return nil
}
func (daemon *Daemon) useShimV2() bool {
return cgroups.IsCgroup2UnifiedMode()
}
// RawSysInfo returns *sysinfo.SysInfo .
func (daemon *Daemon) RawSysInfo(quiet bool) *sysinfo.SysInfo {
var opts []sysinfo.Opt
if daemon.getCgroupDriver() == cgroupSystemdDriver {
rootlesskitParentEUID := os.Getenv("ROOTLESSKIT_PARENT_EUID")
if rootlesskitParentEUID != "" {
groupPath := fmt.Sprintf("/user.slice/user-%s.slice", rootlesskitParentEUID)
opts = append(opts, sysinfo.WithCgroup2GroupPath(groupPath))
}
}
return sysinfo.New(quiet, opts...)
}
|
[
"\"DOCKER_NOWARN_KERNEL_VERSION\"",
"\"ROOTLESSKIT_PARENT_EUID\""
] |
[] |
[
"DOCKER_NOWARN_KERNEL_VERSION",
"ROOTLESSKIT_PARENT_EUID"
] |
[]
|
["DOCKER_NOWARN_KERNEL_VERSION", "ROOTLESSKIT_PARENT_EUID"]
|
go
| 2 | 0 | |
ulez.py
|
import argparse
import json
import requests
parser = argparse.ArgumentParser(description='Check if vehicle is ULEZ compliant using TfL Unified API')
parser.add_argument('-p', '--plate', metavar='AB12CDE',
help='enter UK licence plate without spaces (e.g. AB12CDE)', type=str, required=True)
parser.add_argument('-d', '--detail', help="request additional vehicle details", action='store_true')
args = parser.parse_args()
def check(plate, detail=False):
# test compliant plate = 'DU64HSO'
# test non-compliant plate = 'GU57YCP'
plate = plate
detail = detail
url = f'https://82vqql5ek8.execute-api.eu-west-2.amazonaws.com/live/plate?vrm={plate}'
req = requests.get(url)
try:
req_json = req.json()
except:
return print('Connection Error: Check API credentials and/or internet connection')
try:
if detail is True:
del req_json['$type']
del req_json['type']
st_json = json.dumps(req_json, indent=2)
return print(f'{st_json}')
else:
return print(f'"compliance": "{req_json["compliance"]}"')
except TypeError:
return print('Invalid Licence Plate')
if __name__ == "__main__":
check(args.plate, args.detail)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
active_learning/al_classes.py
|
"""Base model classes for Active Learning pipelines
"""
import os
import time
from typing import List
from copy import deepcopy
import wandb
import torch
from torch.utils.data import DataLoader, SubsetRandomSampler, Subset
from active_learning.data import Collate
from active_learning.query_strats import get_query_strat
from active_learning.model_classes import get_model
from active_learning import utils
class ActiveLearner():
"""General class for model used to acquire data.
TODO this could inherit from EvalModel.
"""
def __init__(self, config: dict, num_classes: int, labelled_idx: List[int], unlabelled_idx: List[int]):
"""Params:
- config (dict): full experiment config
- num_classes (int): number of classes for classification (currently derived from dataset)
- labelled_idx (List(int)): indices of labelled pool examples (seeded)
- unlabelled_idx (List(int)): indices of unlabelled pool examples
"""
self.device = torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
print(f"The model will run on the {self.device} device")
self.config = config
self.num_classes = num_classes
self.experiment_type = config["experiment"]["type"]
self.optim_config = config["acq_model"]["model_hypers"]["optim"]
self.collate = Collate(config["acq_model"])
# strategy
self.query_strat = get_query_strat(
strat_config=config["query_strategy"],
collate=self.collate,
batch_size=self.optim_config["batch_size"],
num_workers=self.optim_config["num_workers"],
device=self.device)
# initialise model and save initial weights
self.acq_model = None
self.init_weights = None
self.acq_model = self._init_net()
self.optimizer, self.scheduler, self.criterion = utils.init_optim(self.optim_config, self.acq_model)
# data
self.val_prop = self.config['dataset']['val_prop']
self.labelled_idx = labelled_idx
self.unlabelled_idx = unlabelled_idx # could just get via labelled pool
def _init_net(self):
"""Randomly initialise the acquisition model
"""
if self.acq_model is None:
self.acq_model = get_model(self.config["acq_model"], self.experiment_type, self.num_classes)
self.init_weights = deepcopy(self.acq_model.state_dict())
else:
# use cached init weights
self.acq_model.load_state_dict(self.init_weights)
self.acq_model.to(self.device)
return self.acq_model
def al_step(self, train_data, dev_data):
"""Carry out one iteration of Active Learning training:
- Use AL strategy to add newly labelled data to labelled pool
- train acq_model with labelled pool
Params:
- train_data (Dataset): full training pool
- dev_data (Dataset): full dev set
Returns:
- labelled_idx (list): indices of pool in the labelled set
- unlabelled_idx (list): indices of pool not in labelled set
- train_losses (List[float]): train loss at each epoch
- val_losses (List[float]): val loss at each epoch
- best_loss (float): best val loss achieved
- best_acc (float): best val acc achieved
"""
# # put on gpu again
# self.acq_model.to(self.device)
# get new indices of labelled and unlabelled pools
print("Choosing data to label...")
start_time = time.time()
self.labelled_idx, self.unlabelled_idx = self.query_strat.get_query(
self.acq_model,
train_data,
self.labelled_idx,
self.unlabelled_idx
)
print(f"Chosen new labels. Time taken: {time.time()-start_time} seconds")
# train on labelled pool only
train_loader = DataLoader(
train_data,
batch_size=self.optim_config["batch_size"],
sampler=SubsetRandomSampler(self.labelled_idx),
num_workers=self.optim_config["num_workers"],
collate_fn=self.collate
)
# Change val set size as a function of training set size.
val_subset = Subset(dev_data, list(range(int(len(self.labelled_idx)*self.val_prop))))
val_loader = DataLoader(
val_subset,
batch_size=self.optim_config["batch_size"],
shuffle=False,
num_workers=self.optim_config["num_workers"],
collate_fn=self.collate,
)
train_losses, val_losses, best_loss, best_acc = self.fit(train_loader, val_loader)
return self.labelled_idx, self.unlabelled_idx, train_losses, val_losses, best_loss, best_acc
def fit(self, train_loader, val_loader):
"""Train on current labelled pool and log training curves.
Params:
- train_loader (DataLoader): dataloader for batches from labelled pool
- val_loader (DataLoader): dataloader for batches from val dataset
Returns:
- train_losses (List[float]): train loss at each epoch
- val_losses (List[float]): val loss at each epoch
- best_loss (float): best val loss achieved
- best_acc (float): best val acc achieved
"""
# set up directories and logs for this training run
scratch_dir = os.getenv('SCRATCH_DIR', wandb.run.dir) # if given a scratch dir save models here
checkpoint_file = os.path.join(scratch_dir, "acq_model_ckpt.pth.tar")
best_file = os.path.join(scratch_dir, "acq_model_best.pth.tar")
train_losses = []
train_accs = []
val_losses = []
val_accs = []
# reinitialise network and optimizer
self.acq_model = self._init_net()
self.optimizer, self.scheduler, self.criterion = utils.init_optim(self.optim_config, self.acq_model)
# TODO optionally train online (without reinitialising)
# initial validation loss
best_loss, best_acc = utils.evaluate(
self.acq_model,
val_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="val"
)
best_epoch = 0
# initially save first as best
checkpoint_dict = {"epoch": best_epoch,
"state_dict": self.acq_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, best_file)
# try, except to stop training partway through
try:
# training loop
print("\nFitting acquisition model...")
for epoch in range(self.optim_config["epochs"]):
# train
train_loss, train_acc = utils.evaluate(
self.acq_model,
train_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="train"
)
# validation
with torch.no_grad():
val_loss, val_acc = utils.evaluate(
self.acq_model,
val_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="val"
)
print(f"\nEpoch {epoch+1}/{self.optim_config['epochs']}: \ntrain/loss: {train_loss}, train/acc: {train_acc}"
f"\nval/loss: {val_loss}, val/acc: {val_acc}\n")
# TODO log other metrics on train and val set while training?
train_losses.append(train_loss)
train_accs.append(train_acc)
val_losses.append(val_loss)
val_accs.append(val_accs)
# model checkpointing
is_best = val_loss < best_loss
if is_best:
best_loss = val_loss
best_acc = val_acc
best_epoch = epoch
# save checkpoint if best
checkpoint_dict = {"epoch": epoch,
"state_dict": self.acq_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, best_file)
# early stopping
if epoch - best_epoch > self.optim_config["patience"]:
break
except KeyboardInterrupt:
pass
# save last checkpoint
checkpoint_dict = {"epoch": epoch,
"state_dict": self.acq_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, checkpoint_file)
return train_losses, val_losses, best_loss, best_acc
class EvalModel():
"""Generic evaluation model to wrap over neural networks to ease training and eval.
TODO this could be a base class of ActiveLearner. fit and _init_net methods are the same.
"""
def __init__(self, model_config: dict, num_classes: int, val_prop: int, experiment_type: str, model_id: int):
self.device = torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
self.config = model_config
self.num_classes = num_classes
self.experiment_type = experiment_type
self.model_id = model_id
self.collate = Collate(model_config)
self.optim_config = self.config["model_hypers"]["optim"]
self.val_prop = val_prop
self.eval_model = None
self.init_weights = None
self.eval_model = self._init_net()
self.optimizer, self.scheduler, self.criterion = utils.init_optim(self.optim_config, self.eval_model)
def _init_net(self):
"""Randomly initialise the evaluation model
"""
if self.eval_model is None:
self.eval_model = get_model(self.config, self.experiment_type, self.num_classes)
self.init_weights = deepcopy(self.eval_model.state_dict())
else:
# use cached init weights
self.eval_model.load_state_dict(self.init_weights)
self.eval_model.to(self.device)
return self.eval_model
def fit(self, train_loader, val_loader):
"""Train on current labelled pool.
Params:
- train_loader (DataLoader): dataloader for batches from labelled pool
- val_loader (DataLoader): dataloader for batches from val dataset
Returns:
- train_losses (List[float]): train loss at each epoch
- val_losses (List[float]): val loss at each epoch
- best_loss (float): best val loss achieved
- best_acc (float): best val acc achieved
"""
# set up directories and logs for this training run
scratch_dir = os.getenv('SCRATCH_DIR', wandb.run.dir) # if given a scratch dir save models here
checkpoint_file = os.path.join(scratch_dir, f"eval_model_{self.model_id}_ckpt.pth.tar")
best_file = os.path.join(scratch_dir, f"eval_model_{self.model_id}_best.pth.tar")
train_losses = []
train_accs = []
val_losses = []
val_accs = []
# reinitialise network and optimizer
self.eval_model = self._init_net()
self.optimizer, self.scheduler, self.criterion = utils.init_optim(self.optim_config, self.eval_model)
best_loss, best_acc = utils.evaluate(
self.eval_model,
val_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="val"
)
best_epoch = 0
checkpoint_dict = {"epoch": best_epoch,
"state_dict": self.eval_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, best_file)
try:
# training loop
print(f"\nFitting eval model {self.model_id}...\n")
for epoch in range(self.optim_config["epochs"]):
# train
train_loss, train_acc = utils.evaluate(
self.eval_model,
train_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="train"
)
# validation
with torch.no_grad():
val_loss, val_acc = utils.evaluate(
self.eval_model,
val_loader,
self.optimizer,
self.scheduler,
self.criterion,
self.device,
task="val"
)
print(f"\nEpoch {epoch+1}/{self.optim_config['epochs']}: \ntrain/loss: {train_loss}, train/acc: {train_acc}"
f"\nval/loss: {val_loss}, val/acc: {val_acc}\n")
train_losses.append(train_loss)
train_accs.append(train_acc)
val_losses.append(val_loss)
val_accs.append(val_acc)
# model checkpointing
is_best = val_loss < best_loss
if is_best:
best_loss = val_loss
best_acc = val_acc
best_epoch = epoch
# save checkpoint if best
checkpoint_dict = {"epoch": epoch,
"state_dict": self.eval_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, best_file)
# if is_best or epoch == 0:
# shutil.copyfile(checkpoint_file, best_file)
# early stopping
if epoch - best_epoch > self.optim_config["patience"]:
break
except KeyboardInterrupt:
pass
# save last checkpoint
checkpoint_dict = {"epoch": epoch,
"state_dict": self.eval_model.state_dict(),
"best_loss": best_loss,
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"config": self.config}
torch.save(checkpoint_dict, checkpoint_file)
return train_losses, val_losses, best_loss, best_acc
|
[] |
[] |
[
"SCRATCH_DIR"
] |
[]
|
["SCRATCH_DIR"]
|
python
| 1 | 0 | |
app/utils/ssh/auth.go
|
package ssh
import (
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"strings"
"syscall"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/terminal"
)
//HasAgent reports whether the SSH agent is available
func HasAgent() bool {
authsock, ok := os.LookupEnv("SSH_AUTH_SOCK")
if !ok {
return false
}
if dirent, err := os.Stat(authsock); err != nil {
if os.IsNotExist(err) {
return false
}
if dirent.Mode()&os.ModeSocket == 0 {
return false
}
}
return true
}
//An implementation of ssh.KeyboardInteractiveChallenge that simply sends
//back the password for all questions. The questions are logged.
func passwordKeyboardInteractive(password string) ssh.KeyboardInteractiveChallenge {
return func(user, instruction string, questions []string, echos []bool) ([]string, error) {
//log.Printf("Keyboard interactive challenge: ")
//log.Printf("-- User: %s", user)
//log.Printf("-- Instructions: %s", instruction)
//for i, question := range questions {
//log.Printf("-- Question %d: %s", i+1, question)
//}
//Just send the password back for all questions
answers := make([]string, len(questions))
for i := range answers {
answers[i] = password
}
return answers, nil
}
}
//AuthWithKeyboardPassword Generate a password-auth'd ssh ClientConfig
func AuthWithKeyboardPassword(password string) (ssh.AuthMethod, error) {
return ssh.KeyboardInteractive(passwordKeyboardInteractive(password)), nil
}
//AuthWithPassword Generate a password-auth'd ssh ClientConfig
func AuthWithPassword(password string) (ssh.AuthMethod, error) {
return ssh.Password(password), nil
}
//AuthWithAgent use already authed user
func AuthWithAgent() (ssh.AuthMethod, error) {
sock := os.Getenv("SSH_AUTH_SOCK")
if sock == "" {
//fmt.Println(errors.New("Agent Disabled"))
return nil, errors.New("Agent Disabled")
}
socks, err := net.Dial("unix", sock)
if err != nil {
fmt.Println(err)
return nil, err
}
//1. Return the result of signers function
agent := agent.NewClient(socks)
signers, err := agent.Signers()
return ssh.PublicKeys(signers...), nil
//2. Return signers function
//getSigners := agent.NewClient(socks).Signers
//return ssh.PublicKeysCallback(getSigners), nil
//3. Abbreviation
//if sshAgent, err := net.Dial("unix", os.Getenv("SSH_ AUTH_ SOCK")); err == nil {
//return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
//}
//return nil
}
//Authwithprivatekeys set multiple ~ /. SSH / IDS_ RSA, if encryption is attempted with passphrase
func AuthWithPrivateKeys(keyFiles []string, passphrase string) (ssh.AuthMethod, error) {
var signers []ssh.Signer
for _, key := range keyFiles {
pemBytes, err := ioutil.ReadFile(key)
if err != nil {
println(err.Error())
//return
}
signer, err := ssh.ParsePrivateKey([]byte(pemBytes))
if err != nil {
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
if signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(passphrase)); err != nil {
continue
}
}
//println(err.Error())
}
signers = append(signers, signer)
}
if signers == nil {
return nil, errors.New("WithPrivateKeys: no keyfiles input")
}
return ssh.PublicKeys(signers...), nil
}
//Authwithprivatekey automatically monitors whether there is a password
func AuthWithPrivateKey(keyfile string, passphrase string) (ssh.AuthMethod, error) {
pemBytes, err := ioutil.ReadFile(keyfile)
if err != nil {
println(err.Error())
return nil, err
}
var signer ssh.Signer
signer, err = ssh.ParsePrivateKey(pemBytes)
if err != nil {
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(passphrase))
if err == nil {
return ssh.PublicKeys(signer), nil
}
}
return nil, err
}
return ssh.PublicKeys(signer), nil
}
//Authwithprivatekeystring directly through the string
func AuthWithPrivateKeyString(key string, password string) (ssh.AuthMethod, error) {
var signer ssh.Signer
var err error
if password == "" {
signer, err = ssh.ParsePrivateKey([]byte(key))
} else {
signer, err = ssh.ParsePrivateKeyWithPassphrase([]byte(key), []byte(password))
}
if err != nil {
println(err.Error())
return nil, err
}
return ssh.PublicKeys(signer), nil
}
//Authwithprivatekeyterminal reads the public key with password through the terminal
func AuthWithPrivateKeyTerminal(keyfile string) (ssh.AuthMethod, error) {
pemBytes, err := ioutil.ReadFile(keyfile)
if err != nil {
println(err.Error())
return nil, err
}
var signer ssh.Signer
signer, err = ssh.ParsePrivateKey(pemBytes)
if err != nil {
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
fmt.Fprintf(os.Stderr, "This SSH key is encrypted. Please enter passphrase for key '%s':", keyfile)
passphrase, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
//println(err.Error())
return nil, err
}
fmt.Fprintln(os.Stderr)
if signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(passphrase)); err == nil {
return ssh.PublicKeys(signer), nil
}
}
return nil, err
}
return ssh.PublicKeys(signer), nil
}
|
[
"\"SSH_AUTH_SOCK\"",
"\"SSH_ AUTH_ SOCK\""
] |
[] |
[
"SSH_ AUTH_ SOCK",
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_ AUTH_ SOCK", "SSH_AUTH_SOCK"]
|
go
| 2 | 0 | |
go/test/endtoend/vtctldweb/vtctld_web_main_test.go
|
/*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vtctldweb
import (
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"strings"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tebeka/selenium"
"github.com/tebeka/selenium/chrome"
"vitess.io/vitess/go/test/endtoend/cluster"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
"vitess.io/vitess/go/vt/vttest"
)
//nolint
var (
localCluster *vttest.LocalCluster
hostname = "localhost" //nolint
wd selenium.WebDriver
seleniumService *selenium.Service
vtctldAddr string
ks1 = "test_keyspace"
ks2 = "test_keyspace2"
sqlSchema = "CREATE TABLE test_table (\n" +
" `id` BIGINT(20) UNSIGNED NOT NULL,\n" +
" `msg` VARCHAR(64),\n" +
" `keyspace_id` BIGINT(20) UNSIGNED NOT NULL,\n" +
" PRIMARY KEY (id)\n" +
") ENGINE=InnoDB"
)
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitcode, err := func() (int, error) {
// runs Xvfb in background
tearDownXvfb, err := RunXvfb()
if err != nil {
return 1, err
}
defer tearDownXvfb()
// cluster setup using vtcombo
topology := new(vttestpb.VTTestTopology)
topology.Cells = []string{"test", "test2"}
topology.Keyspaces = []*vttestpb.Keyspace{
{
Name: ks1,
Shards: []*vttestpb.Shard{
{Name: "-80"},
{Name: "80-"},
},
RdonlyCount: 2,
ReplicaCount: 2,
},
{
Name: ks2,
Shards: []*vttestpb.Shard{
{Name: "0"},
},
RdonlyCount: 2,
ReplicaCount: 1,
},
}
// create driver here
err = CreateWebDriver(getPort())
if err != nil {
return 1, err
}
defer TeardownWebDriver()
var cfg vttest.Config
cfg.Topology = topology
cfg.SchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema"
cfg.DefaultSchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema/default"
localCluster = &vttest.LocalCluster{
Config: cfg,
}
err = localCluster.Setup()
defer localCluster.TearDown()
vtctldAddr = fmt.Sprintf("http://localhost:%d", localCluster.Env.PortForProtocol("vtcombo", "port"))
if err != nil {
return 1, err
}
return m.Run(), nil
}()
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
} else {
os.Exit(exitcode)
}
}
// RunXvfb runs Xvfb command in background and returns the teardown function.
func RunXvfb() (func() error, error) {
tmpProcess := exec.Command("Xvfb", ":15", "-ac")
err := tmpProcess.Start()
if err != nil {
return nil, err
}
exit := make(chan error)
go func() {
exit <- tmpProcess.Wait()
}()
teardownFunc := func() error {
tmpProcess.Process.Signal(syscall.SIGTERM)
select {
case <-exit:
return nil
case <-time.After(10 * time.Second):
tmpProcess.Process.Kill()
return <-exit
}
}
os.Setenv("DISPLAY", ":15")
return teardownFunc, nil
}
// CreateWebDriver Creates a webdriver object (local or remote for Travis).
func CreateWebDriver(port int) error {
// selenium.SetDebug(true)
// Set common Options
options := selenium.ChromeDriver(os.Getenv("VTROOT") + "/dist")
if os.Getenv("CI") == "true" && os.Getenv("TRAVIS") == "true" {
capabilities := selenium.Capabilities{}
capabilities["tunnel-identifier"] = os.Getenv("TRAVIS_JOB_NUMBER")
capabilities["build"] = os.Getenv("TRAVIS_BUILD_NUMBER")
capabilities["platform"] = "Linux"
capabilities["browserName"] = "chrome"
capabilities["chromeOptions"] = options
var err error
wd, err = selenium.NewRemote(capabilities, fmt.Sprintf("%s:%s@localhost:4445/wd/hub", os.Getenv("SAUCE_USERNAME"), os.Getenv("SAUCE_ACCESS_KEY")))
if err != nil {
return err
}
name, err := wd.CurrentWindowHandle() //nolint
return wd.ResizeWindow(name, 1280, 1024)
}
// Only testing against Chrome for now
cc := selenium.Capabilities{"browserName": "chrome"}
cc.AddChrome(chrome.Capabilities{
Args: []string{
"--disable-gpu",
"--no-sandbox",
"--headless",
},
})
os.Setenv("webdriver.chrome.driver", os.Getenv("VTROOT")+"/dist")
var err error
seleniumService, err = selenium.NewChromeDriverService(os.Getenv("VTROOT")+"/dist/chromedriver/chromedriver", port, options)
if err != nil {
return err
}
wd, err = selenium.NewRemote(cc, fmt.Sprintf("http://localhost:%d/wd/hub", port))
if err != nil {
return err
}
name, err := wd.CurrentWindowHandle() //nolint
return wd.ResizeWindow(name, 1280, 1024)
}
func TeardownWebDriver() {
wd.Quit()
if seleniumService != nil {
seleniumService.Stop()
}
}
func checkNewView(t *testing.T, keyspaces, cells, types, metrics []string, selectedKs, selectedCell, selectedType, selectedMetric string) {
checkDropdowns(t, keyspaces, cells, types, metrics, selectedKs, selectedCell, selectedType, selectedMetric)
checkHeatMaps(t, selectedKs)
}
func checkHeatMaps(t *testing.T, selectedKs string) {
elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
require.Nil(t, err)
elems, err := elem.FindElements(selenium.ByTagName, "vt-heatmap")
require.Nil(t, err)
if selectedKs == "all" {
availableKs := getDropdownOptions(t, "keyspace")
assert.Equal(t, len(elems), len(availableKs)-1)
for _, elem := range elems {
heading, err := elem.FindElement(selenium.ByID, "keyspaceName")
require.Nil(t, err)
headingTxt := text(t, heading)
_, err = elem.FindElement(selenium.ByID, headingTxt)
require.Nil(t, err)
assert.Contains(t, availableKs, headingTxt)
}
return
}
assert.Equal(t, 1, len(elems))
heading, err := elems[0].FindElement(selenium.ByID, "keyspaceName")
require.Nil(t, err)
headingTxt := text(t, heading)
_, err = elem.FindElement(selenium.ByID, headingTxt)
require.Nil(t, err)
assert.Equal(t, selectedKs, headingTxt)
}
// changeDropdownOptions changes the selected value of dropdown.
func changeDropdownOptions(t *testing.T, dropdownID, dropdownValue string) {
statusContent, err := wd.FindElement(selenium.ByTagName, "vt-status")
require.Nil(t, err)
dropdown, err := statusContent.FindElement(selenium.ByID, dropdownID)
require.Nil(t, err)
click(t, dropdown)
options, err := dropdown.FindElements(selenium.ByTagName, "li")
require.Nil(t, err)
triedOption := []string{}
for _, op := range options {
opTxt := text(t, op)
if opTxt == dropdownValue {
click(t, op)
return
}
triedOption = append(triedOption, opTxt)
}
ss(t, "option_check")
t.Log("dropdown options change failed", strings.Join(triedOption, ","), dropdownValue)
}
// checkDropdowns validates the dropdown values and selected value.
func checkDropdowns(t *testing.T, keyspaces, cells, types, metrics []string, selectedKs, selectedCell, selectedType, selectedMetric string) {
Options := getDropdownOptions(t, "keyspace")
Selected := getDropdownSelection(t, "keyspace")
assert.Equal(t, keyspaces, Options)
assert.Equal(t, selectedKs, Selected)
Options = getDropdownOptions(t, "cell")
Selected = getDropdownSelection(t, "cell")
assert.Equal(t, cells, Options)
assert.Equal(t, selectedCell, Selected)
Options = getDropdownOptions(t, "type")
Selected = getDropdownSelection(t, "type")
assert.Equal(t, types, Options)
assert.Equal(t, selectedType, Selected)
Options = getDropdownOptions(t, "metric")
Selected = getDropdownSelection(t, "metric")
assert.Equal(t, metrics, Options)
assert.Equal(t, selectedMetric, Selected)
}
// get element functions
// getDropdownSelection fetchs selected value for corresponding group.
func getDropdownSelection(t *testing.T, group string) string {
elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
require.Nil(t, err)
elem, err = elem.FindElement(selenium.ByID, group)
require.Nil(t, err)
elem, err = elem.FindElement(selenium.ByTagName, "label")
require.Nil(t, err)
return text(t, elem)
}
// getDropdownOptions fetchs list of option available for corresponding group.
func getDropdownOptions(t *testing.T, group string) []string {
elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
require.Nil(t, err)
elem, err = elem.FindElement(selenium.ByID, group)
require.Nil(t, err)
elems, err := elem.FindElements(selenium.ByTagName, "option")
require.Nil(t, err)
var out []string
for _, elem = range elems {
out = append(out, text(t, elem))
}
return out
}
// getDashboardKeyspaces fetches keyspaces from the dashboard.
func getDashboardKeyspaces(t *testing.T) []string {
wait(t, selenium.ByTagName, "vt-dashboard")
dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
require.Nil(t, err)
ksCards, err := dashboardContent.FindElements(selenium.ByClassName, "vt-keyspace-card") //nolint
var out []string
for _, ks := range ksCards {
out = append(out, text(t, ks))
}
return out
}
// getDashboardShards fetches shards from the dashboard.
func getDashboardShards(t *testing.T) []string {
wait(t, selenium.ByTagName, "vt-dashboard")
dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard") //nolint
require.Nil(t, err)
ksCards, err := dashboardContent.FindElements(selenium.ByClassName, "vt-shard-stats") //nolint
var out []string
for _, ks := range ksCards {
out = append(out, text(t, ks))
}
return out
}
func getKeyspaceShard(t *testing.T) []string {
wait(t, selenium.ByTagName, "vt-keyspace-view")
ksContent, err := wd.FindElement(selenium.ByTagName, "vt-keyspace-view")
require.Nil(t, err)
shards, err := ksContent.FindElements(selenium.ByClassName, "vt-serving-shard")
require.Nil(t, err)
var out []string
for _, s := range shards {
out = append(out, text(t, s))
}
return out
}
// getShardTablets gives list of tablet type and uid.
func getShardTablets(t *testing.T) ([]string, []string) {
wait(t, selenium.ByTagName, "vt-shard-view")
shardContent, err := wd.FindElement(selenium.ByTagName, "vt-shard-view")
require.Nil(t, err)
tableRows, err := shardContent.FindElements(selenium.ByTagName, "tr") //nolint
tableRows = tableRows[1:]
var tabletTypes, tabletUIDs []string
for _, row := range tableRows {
columns, err := row.FindElements(selenium.ByTagName, "td")
require.Nil(t, err)
typ, err := columns[1].FindElement(selenium.ByClassName, "ui-cell-data")
require.Nil(t, err)
typTxt := text(t, typ)
tabletTypes = append(tabletTypes, typTxt)
uid, err := columns[3].FindElement(selenium.ByClassName, "ui-cell-data")
require.Nil(t, err)
uidTxt := text(t, uid)
tabletUIDs = append(tabletUIDs, uidTxt)
}
return tabletTypes, tabletUIDs
}
// navigation functions
// navigateToDashBoard navigates chrome screen to dashboard of vitess.
func navigateToDashBoard(t *testing.T) {
err := wd.Get(vtctldAddr + "/app2")
require.Nil(t, err)
wait(t, selenium.ByID, "test_keyspace")
}
// navigateToKeyspaceView navigates chrome screen to first keyspace.
func navigateToKeyspaceView(t *testing.T) {
navigateToDashBoard(t)
dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
require.Nil(t, err)
ksCard, err := dashboardContent.FindElements(selenium.ByClassName, "vt-card")
require.Nil(t, err)
require.Equal(t, 2, len(ksCard))
shardStarts, err := ksCard[0].FindElement(selenium.ByTagName, "md-list")
require.Nil(t, err)
click(t, shardStarts)
wait(t, selenium.ByClassName, "vt-card")
}
// navigateToShardView navigates chrome screen to the first shard of first keyspace.
func navigateToShardView(t *testing.T) {
navigateToKeyspaceView(t)
ksContent, err := wd.FindElement(selenium.ByTagName, "vt-keyspace-view")
require.Nil(t, err)
shardCards, err := ksContent.FindElements(selenium.ByClassName, "vt-serving-shard")
require.Nil(t, err)
require.Equal(t, 2, len(shardCards))
click(t, shardCards[0])
wait(t, selenium.ByID, "1")
}
// other utility
// wait waits for the given element to be discoverable.
func wait(t *testing.T, by, val string) {
err := wd.WaitWithTimeout(func(xwd selenium.WebDriver) (bool, error) {
_, err := xwd.FindElement(by, val)
return err == nil, nil
}, selenium.DefaultWaitTimeout)
require.Nil(t, err)
}
// assertDialogCommand validates the command in dialog.
func assertDialogCommand(t *testing.T, dialog selenium.WebElement, cmds []string) {
elms, err := dialog.FindElements(selenium.ByClassName, "vt-sheet")
require.Nil(t, err)
var tmpCmd []string
for _, elm := range elms {
tmpCmd = append(tmpCmd, text(t, elm))
}
assert.ElementsMatch(t, cmds, tmpCmd)
}
func text(t *testing.T, elem selenium.WebElement) string {
for i := 0; i < 5; i++ {
opTxt, err := elem.Text()
require.Nil(t, err)
if opTxt != "" {
return opTxt
}
}
return ""
}
func click(t *testing.T, elem selenium.WebElement) {
require.Nil(t, elem.Click())
}
// ss takes screenshot of chrome, for debugging only.
func ss(t *testing.T, name string) {
b, err := wd.Screenshot()
require.Nil(t, err)
f, err := os.Create("./" + name)
require.Nil(t, err)
_, err = f.Write(b)
require.Nil(t, err)
}
func getPort() int {
return 20000 + rand.Intn(10000)
}
|
[
"\"VTROOT\"",
"\"VTROOT\"",
"\"VTROOT\"",
"\"CI\"",
"\"TRAVIS\"",
"\"TRAVIS_JOB_NUMBER\"",
"\"TRAVIS_BUILD_NUMBER\"",
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\"",
"\"VTROOT\"",
"\"VTROOT\""
] |
[] |
[
"TRAVIS_BUILD_NUMBER",
"CI",
"VTROOT",
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY",
"TRAVIS",
"TRAVIS_JOB_NUMBER"
] |
[]
|
["TRAVIS_BUILD_NUMBER", "CI", "VTROOT", "SAUCE_USERNAME", "SAUCE_ACCESS_KEY", "TRAVIS", "TRAVIS_JOB_NUMBER"]
|
go
| 7 | 0 | |
lib/promscrape/discovery/openstack/auth.go
|
package openstack
import (
"encoding/json"
"fmt"
"net/url"
"os"
"time"
)
// authResponse represents identity api response
//
// See https://docs.openstack.org/api-ref/identity/v3/#authentication-and-token-management
type authResponse struct {
Token struct {
ExpiresAt time.Time `json:"expires_at,omitempty"`
Catalog []catalogItem `json:"catalog,omitempty"`
}
}
type catalogItem struct {
Name string `json:"name"`
Type string `json:"type"`
Endpoints []endpoint `json:"endpoints"`
}
// openstack api endpoint
//
// See https://docs.openstack.org/api-ref/identity/v3/#list-endpoints
type endpoint struct {
RegionID string `json:"region_id"`
RegionName string `json:"region_name"`
URL string `json:"url"`
Name string `json:"name"`
Type string `json:"type"`
Interface string `json:"interface"`
}
// getComputeEndpointURL extracts compute endpoint url with given filters from keystone catalog
func getComputeEndpointURL(catalog []catalogItem, availability, region string) (*url.URL, error) {
for _, eps := range catalog {
if eps.Type != "compute" {
continue
}
for _, ep := range eps.Endpoints {
if ep.Interface == availability && (len(region) == 0 || region == ep.RegionID || region == ep.RegionName) {
return url.Parse(ep.URL)
}
}
}
return nil, fmt.Errorf("cannot find compute url for the given availability: %q, region: %q", availability, region)
}
// buildAuthRequestBody builds request for authentication
func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) {
if len(sdc.Password) == 0 && len(sdc.ApplicationCredentialID) == 0 && len(sdc.ApplicationCredentialName) == 0 {
return nil, fmt.Errorf("password and application credentials are missing")
}
type domainReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
}
type userReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Password *string `json:"password,omitempty"`
Passcode *string `json:"passcode,omitempty"`
Domain *domainReq `json:"domain,omitempty"`
}
type passwordReq struct {
User userReq `json:"user"`
}
type tokenReq struct {
ID string `json:"id"`
}
type applicationCredentialReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
User *userReq `json:"user,omitempty"`
Secret *string `json:"secret,omitempty"`
}
type identityReq struct {
Methods []string `json:"methods"`
Password *passwordReq `json:"password,omitempty"`
Token *tokenReq `json:"token,omitempty"`
ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"`
}
type authReq struct {
Identity identityReq `json:"identity"`
Scope map[string]interface{} `json:"scope,omitempty"`
}
type request struct {
Auth authReq `json:"auth"`
}
// Populate the request structure based on the provided arguments. Create and return an error
// if insufficient or incompatible information is present.
var req request
if len(sdc.Password) == 0 {
// There are three kinds of possible application_credential requests
// 1. application_credential id + secret
// 2. application_credential name + secret + user_id
// 3. application_credential name + secret + username + domain_id / domain_name
if len(sdc.ApplicationCredentialID) > 0 {
if len(sdc.ApplicationCredentialSecret) == 0 {
return nil, fmt.Errorf("ApplicationCredentialSecret is empty")
}
req.Auth.Identity.Methods = []string{"application_credential"}
req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{
ID: &sdc.ApplicationCredentialID,
Secret: &sdc.ApplicationCredentialSecret,
}
return json.Marshal(req)
}
if len(sdc.ApplicationCredentialSecret) == 0 {
return nil, fmt.Errorf("missing application_credential_secret when application_credential_name is set")
}
var userRequest *userReq
if len(sdc.UserID) > 0 {
// UserID could be used without the domain information
userRequest = &userReq{
ID: &sdc.UserID,
}
}
if userRequest == nil && len(sdc.Username) == 0 {
return nil, fmt.Errorf("username and userid is empty")
}
if userRequest == nil && len(sdc.DomainID) > 0 {
userRequest = &userReq{
Name: &sdc.Username,
Domain: &domainReq{ID: &sdc.DomainID},
}
}
if userRequest == nil && len(sdc.DomainName) > 0 {
userRequest = &userReq{
Name: &sdc.Username,
Domain: &domainReq{Name: &sdc.DomainName},
}
}
if userRequest == nil {
return nil, fmt.Errorf("domain_id and domain_name cannot be empty for application_credential_name auth")
}
req.Auth.Identity.Methods = []string{"application_credential"}
req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{
Name: &sdc.ApplicationCredentialName,
User: userRequest,
Secret: &sdc.ApplicationCredentialSecret,
}
return json.Marshal(req)
}
// Password authentication.
req.Auth.Identity.Methods = append(req.Auth.Identity.Methods, "password")
if len(sdc.Username) == 0 && len(sdc.UserID) == 0 {
return nil, fmt.Errorf("username and userid is empty for username/password auth")
}
if len(sdc.Username) > 0 {
if len(sdc.UserID) > 0 {
return nil, fmt.Errorf("both username and userid is present")
}
if len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, fmt.Errorf(" domain_id or domain_name is missing for username/password auth: %s", sdc.Username)
}
if len(sdc.DomainID) > 0 {
if sdc.DomainName != "" {
return nil, fmt.Errorf("both domain_id and domain_name is present")
}
// Configure the request for Username and Password authentication with a DomainID.
if len(sdc.Password) > 0 {
req.Auth.Identity.Password = &passwordReq{
User: userReq{
Name: &sdc.Username,
Password: &sdc.Password,
Domain: &domainReq{ID: &sdc.DomainID},
},
}
}
}
if len(sdc.DomainName) > 0 {
// Configure the request for Username and Password authentication with a DomainName.
if len(sdc.Password) > 0 {
req.Auth.Identity.Password = &passwordReq{
User: userReq{
Name: &sdc.Username,
Password: &sdc.Password,
Domain: &domainReq{Name: &sdc.DomainName},
},
}
}
}
}
if len(sdc.UserID) > 0 {
if len(sdc.DomainID) > 0 {
return nil, fmt.Errorf("both user_id and domain_id is present")
}
if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both user_id and domain_name is present")
}
// Configure the request for UserID and Password authentication.
if len(sdc.Password) > 0 {
req.Auth.Identity.Password = &passwordReq{
User: userReq{
ID: &sdc.UserID,
Password: &sdc.Password,
},
}
}
}
// build scope for password auth
scope, err := buildScope(sdc)
if err != nil {
return nil, err
}
if len(scope) > 0 {
req.Auth.Scope = scope
}
return json.Marshal(req)
}
// buildScope adds scope information into auth request
//
// See https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization
func buildScope(sdc *SDConfig) (map[string]interface{}, error) {
if len(sdc.ProjectName) == 0 && len(sdc.ProjectID) == 0 && len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, nil
}
if len(sdc.ProjectName) > 0 {
// ProjectName provided: either DomainID or DomainName must also be supplied.
// ProjectID may not be supplied.
if len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, fmt.Errorf("both domain_id and domain_name present")
}
if len(sdc.ProjectID) > 0 {
return nil, fmt.Errorf("both domain_id and domain_name present")
}
if len(sdc.DomainID) > 0 {
return map[string]interface{}{
"project": map[string]interface{}{
"name": &sdc.ProjectName,
"domain": map[string]interface{}{"id": &sdc.DomainID},
},
}, nil
}
if len(sdc.DomainName) > 0 {
return map[string]interface{}{
"project": map[string]interface{}{
"name": &sdc.ProjectName,
"domain": map[string]interface{}{"name": &sdc.DomainName},
},
}, nil
}
} else if len(sdc.ProjectID) > 0 {
// ProjectID provided. ProjectName, DomainID, and DomainName may not be provided.
if len(sdc.DomainID) > 0 {
return nil, fmt.Errorf("both project_id and domain_id present")
}
if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both project_id and domain_name present")
}
return map[string]interface{}{
"project": map[string]interface{}{
"id": &sdc.ProjectID,
},
}, nil
} else if len(sdc.DomainID) > 0 {
if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both domain_id and domain_name present")
}
return map[string]interface{}{
"domain": map[string]interface{}{
"id": &sdc.DomainID,
},
}, nil
} else if len(sdc.DomainName) > 0 {
return map[string]interface{}{
"domain": map[string]interface{}{
"name": &sdc.DomainName,
},
}, nil
}
return nil, nil
}
// readCredentialsFromEnv obtains serviceDiscoveryConfig from env variables for openstack
func readCredentialsFromEnv() SDConfig {
authURL := os.Getenv("OS_AUTH_URL")
username := os.Getenv("OS_USERNAME")
userID := os.Getenv("OS_USERID")
password := os.Getenv("OS_PASSWORD")
tenantID := os.Getenv("OS_TENANT_ID")
tenantName := os.Getenv("OS_TENANT_NAME")
domainID := os.Getenv("OS_DOMAIN_ID")
domainName := os.Getenv("OS_DOMAIN_NAME")
applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID")
applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME")
applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET")
// If OS_PROJECT_ID is set, overwrite tenantID with the value.
if v := os.Getenv("OS_PROJECT_ID"); v != "" {
tenantID = v
}
// If OS_PROJECT_NAME is set, overwrite tenantName with the value.
if v := os.Getenv("OS_PROJECT_NAME"); v != "" {
tenantName = v
}
return SDConfig{
IdentityEndpoint: authURL,
Username: username,
UserID: userID,
Password: password,
ProjectName: tenantName,
ProjectID: tenantID,
DomainName: domainName,
DomainID: domainID,
ApplicationCredentialName: applicationCredentialName,
ApplicationCredentialID: applicationCredentialID,
ApplicationCredentialSecret: applicationCredentialSecret,
}
}
|
[
"\"OS_AUTH_URL\"",
"\"OS_USERNAME\"",
"\"OS_USERID\"",
"\"OS_PASSWORD\"",
"\"OS_TENANT_ID\"",
"\"OS_TENANT_NAME\"",
"\"OS_DOMAIN_ID\"",
"\"OS_DOMAIN_NAME\"",
"\"OS_APPLICATION_CREDENTIAL_ID\"",
"\"OS_APPLICATION_CREDENTIAL_NAME\"",
"\"OS_APPLICATION_CREDENTIAL_SECRET\"",
"\"OS_PROJECT_ID\"",
"\"OS_PROJECT_NAME\""
] |
[] |
[
"OS_PROJECT_NAME",
"OS_PROJECT_ID",
"OS_AUTH_URL",
"OS_PASSWORD",
"OS_TENANT_ID",
"OS_USERNAME",
"OS_TENANT_NAME",
"OS_DOMAIN_ID",
"OS_APPLICATION_CREDENTIAL_NAME",
"OS_DOMAIN_NAME",
"OS_USERID",
"OS_APPLICATION_CREDENTIAL_SECRET",
"OS_APPLICATION_CREDENTIAL_ID"
] |
[]
|
["OS_PROJECT_NAME", "OS_PROJECT_ID", "OS_AUTH_URL", "OS_PASSWORD", "OS_TENANT_ID", "OS_USERNAME", "OS_TENANT_NAME", "OS_DOMAIN_ID", "OS_APPLICATION_CREDENTIAL_NAME", "OS_DOMAIN_NAME", "OS_USERID", "OS_APPLICATION_CREDENTIAL_SECRET", "OS_APPLICATION_CREDENTIAL_ID"]
|
go
| 13 | 0 | |
tests/metadata-overlays-istio_test.go
|
package tests_test
import (
"testing"
"sigs.k8s.io/kustomize/v3/k8sdeps/kunstruct"
"sigs.k8s.io/kustomize/v3/k8sdeps/transformer"
"sigs.k8s.io/kustomize/v3/pkg/fs"
"sigs.k8s.io/kustomize/v3/pkg/loader"
"sigs.k8s.io/kustomize/v3/pkg/plugins"
"sigs.k8s.io/kustomize/v3/pkg/resmap"
"sigs.k8s.io/kustomize/v3/pkg/resource"
"sigs.k8s.io/kustomize/v3/pkg/target"
"sigs.k8s.io/kustomize/v3/pkg/validators"
)
func writeMetadataOverlaysIstio(th *KustTestHarness) {
th.writeF("/manifests/metadata/overlays/istio/virtual-service.yaml", `
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: metadata-ui
spec:
gateways:
- kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /metadata
rewrite:
uri: /metadata
route:
- destination:
host: $(service).$(ui-namespace).svc.$(ui-clusterDomain)
port:
number: 80
timeout: 300s
`)
th.writeF("/manifests/metadata/overlays/istio/virtual-service-metadata-grpc.yaml", `
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: metadata-grpc
spec:
gateways:
- kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /ml_metadata
rewrite:
uri: /ml_metadata
route:
- destination:
host: $(metadata-envoy-service).$(ui-namespace).svc.$(ui-clusterDomain)
port:
number: 9090
timeout: 300s
`)
th.writeF("/manifests/metadata/overlays/istio/params.yaml", `
varReference:
- path: spec/http/route/destination/host
kind: VirtualService
`)
th.writeK("/manifests/metadata/overlays/istio", `
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../base
resources:
- virtual-service.yaml
- virtual-service-metadata-grpc.yaml
configurations:
- params.yaml
`)
th.writeF("/manifests/metadata/base/metadata-db-pvc.yaml", `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
`)
th.writeF("/manifests/metadata/base/metadata-db-configmap.yaml", `
apiVersion: v1
kind: ConfigMap
metadata:
name: db-configmap
data:
mysql_database: "metadb"
mysql_port: "3306"
`)
th.writeF("/manifests/metadata/base/metadata-db-secret.yaml", `
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: db-secrets
data:
username: cm9vdA== # "root"
password: dGVzdA== # "test"
`)
th.writeF("/manifests/metadata/base/metadata-db-deployment.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
labels:
component: db
spec:
replicas: 1
template:
metadata:
name: db
labels:
component: db
spec:
containers:
- name: db-container
image: mysql:8.0.3
args:
- --datadir
- /var/lib/mysql/datadir
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: metadata-db-secrets
key: password
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "true"
- name: MYSQL_DATABASE
valueFrom:
configMapKeyRef:
name: metadata-db-configmap
key: mysql_database
ports:
- name: dbapi
containerPort: 3306
readinessProbe:
exec:
command:
- "/bin/bash"
- "-c"
- "mysql -D $$MYSQL_DATABASE -p$$MYSQL_ROOT_PASSWORD -e 'SELECT 1'"
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
volumeMounts:
- name: metadata-mysql
mountPath: /var/lib/mysql
volumes:
- name: metadata-mysql
persistentVolumeClaim:
claimName: metadata-mysql
`)
th.writeF("/manifests/metadata/base/metadata-db-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: db
labels:
component: db
spec:
type: ClusterIP
ports:
- port: 3306
protocol: TCP
name: dbapi
selector:
component: db
`)
th.writeF("/manifests/metadata/base/metadata-deployment.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
labels:
component: server
spec:
replicas: 1
selector:
matchLabels:
component: server
template:
metadata:
labels:
component: server
spec:
containers:
- name: container
image: gcr.io/kubeflow-images-public/metadata:v0.1.11
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: metadata-db-secrets
key: password
- name: MYSQL_USER_NAME
valueFrom:
secretKeyRef:
name: metadata-db-secrets
key: username
- name: MYSQL_DATABASE
valueFrom:
configMapKeyRef:
name: metadata-db-configmap
key: mysql_database
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: metadata-db-configmap
key: mysql_port
command: ["./server/server",
"--http_port=8080",
"--mysql_service_host=metadata-db.kubeflow",
"--mysql_service_port=$(MYSQL_PORT)",
"--mysql_service_user=$(MYSQL_USER_NAME)",
"--mysql_service_password=$(MYSQL_ROOT_PASSWORD)",
"--mlmd_db_name=$(MYSQL_DATABASE)"]
ports:
- name: backendapi
containerPort: 8080
readinessProbe:
httpGet:
path: /api/v1alpha1/artifact_types
port: backendapi
httpHeaders:
- name: ContentType
value: application/json
initialDelaySeconds: 3
periodSeconds: 5
timeoutSeconds: 2
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grpc-deployment
labels:
component: grpc-server
spec:
replicas: 1
selector:
matchLabels:
component: grpc-server
template:
metadata:
labels:
component: grpc-server
spec:
containers:
- name: container
image: gcr.io/tfx-oss-public/ml_metadata_store_server:0.15.1
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: metadata-db-secrets
key: password
- name: MYSQL_USER_NAME
valueFrom:
secretKeyRef:
name: metadata-db-secrets
key: username
- name: MYSQL_DATABASE
valueFrom:
configMapKeyRef:
name: metadata-db-configmap
key: mysql_database
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: metadata-db-configmap
key: mysql_port
command: ["/bin/metadata_store_server"]
args: ["--grpc_port=8080",
"--mysql_config_host=metadata-db.kubeflow",
"--mysql_config_database=$(MYSQL_DATABASE)",
"--mysql_config_port=$(MYSQL_PORT)",
"--mysql_config_user=$(MYSQL_USER_NAME)",
"--mysql_config_password=$(MYSQL_ROOT_PASSWORD)"
]
ports:
- name: grpc-backendapi
containerPort: 8080
`)
th.writeF("/manifests/metadata/base/metadata-service.yaml", `
kind: Service
apiVersion: v1
metadata:
labels:
app: metadata
name: service
spec:
selector:
component: server
type: ClusterIP
ports:
- port: 8080
protocol: TCP
name: backendapi
---
kind: Service
apiVersion: v1
metadata:
labels:
app: grpc-metadata
name: grpc-service
spec:
selector:
component: grpc-server
type: ClusterIP
ports:
- port: 8080
protocol: TCP
name: grpc-backendapi
`)
th.writeF("/manifests/metadata/base/metadata-ui-deployment.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: ui
labels:
app: metadata-ui
spec:
selector:
matchLabels:
app: metadata-ui
template:
metadata:
name: ui
labels:
app: metadata-ui
spec:
containers:
- image: gcr.io/kubeflow-images-public/metadata-frontend:v0.1.8
imagePullPolicy: IfNotPresent
name: metadata-ui
ports:
- containerPort: 3000
serviceAccountName: ui
`)
th.writeF("/manifests/metadata/base/metadata-ui-role.yaml", `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
app: metadata-ui
name: ui
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- create
- get
- list
- apiGroups:
- "kubeflow.org"
resources:
- viewers
verbs:
- create
- get
- list
- watch
- delete
`)
th.writeF("/manifests/metadata/base/metadata-ui-rolebinding.yaml", `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
app: metadata-ui
name: ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ui
subjects:
- kind: ServiceAccount
name: ui
namespace: kubeflow
`)
th.writeF("/manifests/metadata/base/metadata-ui-sa.yaml", `
apiVersion: v1
kind: ServiceAccount
metadata:
name: ui
`)
th.writeF("/manifests/metadata/base/metadata-ui-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: ui
labels:
app: metadata-ui
spec:
ports:
- port: 80
targetPort: 3000
selector:
app: metadata-ui
`)
th.writeF("/manifests/metadata/base/metadata-envoy-deployment.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: envoy-deployment
labels:
component: envoy
spec:
replicas: 1
selector:
matchLabels:
component: envoy
template:
metadata:
labels:
component: envoy
spec:
containers:
- name: container
image: gcr.io/ml-pipeline/envoy:metadata-grpc
ports:
- name: md-envoy
containerPort: 9090
- name: envoy-admin
containerPort: 9901
`)
th.writeF("/manifests/metadata/base/metadata-envoy-service.yaml", `
kind: Service
apiVersion: v1
metadata:
labels:
app: metadata
name: envoy-service
spec:
selector:
component: envoy
type: ClusterIP
ports:
- port: 9090
protocol: TCP
name: md-envoy
`)
th.writeF("/manifests/metadata/base/params.env", `
uiClusterDomain=cluster.local
`)
th.writeK("/manifests/metadata/base", `
namePrefix: metadata-
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
commonLabels:
kustomize.component: metadata
configMapGenerator:
- name: ui-parameters
env: params.env
resources:
- metadata-db-pvc.yaml
- metadata-db-configmap.yaml
- metadata-db-secret.yaml
- metadata-db-deployment.yaml
- metadata-db-service.yaml
- metadata-deployment.yaml
- metadata-service.yaml
- metadata-ui-deployment.yaml
- metadata-ui-role.yaml
- metadata-ui-rolebinding.yaml
- metadata-ui-sa.yaml
- metadata-ui-service.yaml
- metadata-envoy-deployment.yaml
- metadata-envoy-service.yaml
namespace: kubeflow
vars:
- name: ui-namespace
objref:
kind: Service
name: ui
apiVersion: v1
fieldref:
fieldpath: metadata.namespace
- name: ui-clusterDomain
objref:
kind: ConfigMap
name: ui-parameters
version: v1
fieldref:
fieldpath: data.uiClusterDomain
- name: service
objref:
kind: Service
name: ui
apiVersion: v1
fieldref:
fieldpath: metadata.name
- name: metadata-envoy-service
objref:
kind: Service
name: envoy-service
apiVersion: v1
fieldref:
fieldpath: metadata.name
images:
- name: gcr.io/kubeflow-images-public/metadata
newName: gcr.io/kubeflow-images-public/metadata
newTag: v0.1.11
- name: gcr.io/tfx-oss-public/ml_metadata_store_server
newName: gcr.io/tfx-oss-public/ml_metadata_store_server
newTag: 0.15.1
- name: gcr.io/ml-pipeline/envoy
newName: gcr.io/ml-pipeline/envoy
newTag: metadata-grpc
- name: mysql
newName: mysql
newTag: 8.0.3
- name: gcr.io/kubeflow-images-public/metadata-frontend
newName: gcr.io/kubeflow-images-public/metadata-frontend
newTag: v0.1.8
`)
}
func TestMetadataOverlaysIstio(t *testing.T) {
th := NewKustTestHarness(t, "/manifests/metadata/overlays/istio")
writeMetadataOverlaysIstio(th)
m, err := th.makeKustTarget().MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
expected, err := m.AsYaml()
if err != nil {
t.Fatalf("Err: %v", err)
}
targetPath := "../metadata/overlays/istio"
fsys := fs.MakeRealFS()
lrc := loader.RestrictionRootOnly
_loader, loaderErr := loader.NewLoader(lrc, validators.MakeFakeValidator(), targetPath, fsys)
if loaderErr != nil {
t.Fatalf("could not load kustomize loader: %v", loaderErr)
}
rf := resmap.NewFactory(resource.NewFactory(kunstruct.NewKunstructuredFactoryImpl()), transformer.NewFactoryImpl())
pc := plugins.DefaultPluginConfig()
kt, err := target.NewKustTarget(_loader, rf, transformer.NewFactoryImpl(), plugins.NewLoader(pc, rf))
if err != nil {
th.t.Fatalf("Unexpected construction error %v", err)
}
actual, err := kt.MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
th.assertActualEqualsExpected(actual, string(expected))
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
espnet/bin/asr_train.py
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Automatic speech recognition model training script."""
import logging
import os
import random
import subprocess
import sys
from distutils.version import LooseVersion
import configargparse
import numpy as np
import torch
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2")
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
# encoder
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# loss related
parser.add_argument(
"--ctc_type",
default="warpctc",
type=str,
choices=["builtin", "warpctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# asr_mix related
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
# front end related
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
# WPE related
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
# Beamformer related
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if is_torch_1_2_plus and args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
# for non-autoregressive training using Transformer
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"PYTHONPATH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "PYTHONPATH"]
|
python
| 2 | 0 | |
locallibrary/settings.py
|
"""Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
# Set hosts to allow any app on Heroku and the local testing URL
ALLOWED_HOSTS = ['.herokuapp.com','127.0.0.1','chainsaw2100.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Add our new application
'catalog.apps.CatalogConfig', #This object was created for us in /catalog/apps.py
'mathfilters'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
# Add to test email:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Static file serving.
# http://whitenoise.evans.io/en/stable/django.html#django-middleware
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
[] |
[] |
[
"DJANGO_DEBUG",
"DJANGO_SECRET_KEY"
] |
[]
|
["DJANGO_DEBUG", "DJANGO_SECRET_KEY"]
|
python
| 2 | 0 | |
services/community/api/router/routes.go
|
/*
* Licensed under the Apache License, Version 2.0 (the “License”);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package router
import (
"fmt"
"net/http"
"os"
"github.com/gorilla/mux"
"crapi.proj/goservice/api/config"
"crapi.proj/goservice/api/controllers"
"crapi.proj/goservice/api/middlewares"
)
type Server config.Server
var controller = controllers.Server{}
//initializeRoutes initialize routes of url with Authentication or without Authentication
func (server *Server) InitializeRoutes() *mux.Router {
controller.DB = server.DB
controller.Client = server.Client
server.Router.Use(middlewares.AccessControlMiddleware)
// Post Route
server.Router.HandleFunc("/community/api/v2/community/posts/recent", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.GetPost, server.DB))).Methods("GET", "OPTIONS")
server.Router.HandleFunc("/community/api/v2/community/posts/{postID}", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.GetPostByID, server.DB))).Methods("GET", "OPTIONS")
server.Router.HandleFunc("/community/api/v2/community/posts", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.AddNewPost, server.DB))).Methods("POST", "OPTIONS")
server.Router.HandleFunc("/community/api/v2/community/posts/{postID}/comment", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.Comment, server.DB))).Methods("POST", "OPTIONS")
//Coupon Route
server.Router.HandleFunc("/community/api/v2/coupon/new-coupon", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.AddNewCoupon, server.DB))).Methods("POST", "OPTIONS")
server.Router.HandleFunc("/community/api/v2/coupon/validate-coupon", middlewares.SetMiddlewareJSON(middlewares.SetMiddlewareAuthentication(controller.ValidateCoupon, server.DB))).Methods("POST", "OPTIONS")
//Health
server.Router.HandleFunc("/community/home", middlewares.SetMiddlewareJSON(controller.Home)).Methods("GET")
return server.Router
}
//
func (server *Server) Run(addr string) {
fmt.Println("Listening to port "+ os.Getenv("SERVER_PORT"))
fmt.Println(http.ListenAndServe(addr, server.Router))
}
|
[
"\"SERVER_PORT\""
] |
[] |
[
"SERVER_PORT"
] |
[]
|
["SERVER_PORT"]
|
go
| 1 | 0 | |
tensorflow/python/ops/nn_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables as variables_lib
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
# Acceptable channels last formats (robust to H, W, D order).
_CHANNELS_LAST_FORMATS = frozenset({
"NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC",
"NWHDC"
})
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections_abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.shape
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape` and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.shape.
filter_shape: static filter shape, i.e. filter.shape.
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
num_batch_dims: (Optional.) The number of batch dimensions in the input;
if not provided, the default of `1` is used.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None,
num_batch_dims=1):
# filter shape is always rank num_spatial_dims + 2
# and num_spatial_dims == input_shape.ndims - num_batch_dims - 1
if input_shape.ndims is not None:
filter_shape = filter_shape.with_rank(
input_shape.ndims - num_batch_dims + 1)
self.padding = padding
self.name = name
# input shape is == num_spatial_dims + num_batch_dims + 1
# and filter_shape is always rank num_spatial_dims + 2
if filter_shape.ndims is not None:
input_shape = input_shape.with_rank(
filter_shape.ndims + num_batch_dims - 1)
if input_shape.ndims is None:
raise ValueError(
"Rank of convolution must be known, but saw input_shape.ndims == {}"
.format(input_shape.ndims))
if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5:
raise ValueError(
"`input_shape.ndims - num_batch_dims + 1` must be at least 3 and at "
"most 5 but saw `input_shape.ndims == {}` and `num_batch_dims == {}`"
.format(input_shape.ndims, num_batch_dims))
conv_dims = input_shape.ndims - num_batch_dims - 1
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = _conv3d_expanded_batch
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
def squeeze_batch_dims(inp, op, inner_rank, name=None):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
name: A string.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with ops.name_scope(name, "squeeze_batch_dims", [inp]):
inp = ops.convert_to_tensor(inp, name="input")
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(
inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(
out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
@tf_export("nn.dilation2d", v1=[])
@dispatch.add_dispatch_support
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
@dispatch.add_dispatch_support
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
@dispatch.add_dispatch_support
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape.
dilation_rate: see `with_space_to_batch`.
padding: see `with_space_to_batch`.
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see `with_space_to_batch`.
spatial_dims: `see with_space_to_batch`.
data_format: see `with_space_to_batch`.
num_batch_dims: (Optional). Number of batch dims in `input_shape`.
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None,
num_batch_dims=1):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
if dilation_rate.shape.ndims not in (None, 1):
raise ValueError(
"rate must be rank 1 but saw {}".format(dilation_rate.shape.ndims))
if not dilation_rate.shape.is_fully_defined():
raise ValueError("rate must have known shape, but saw {}"
.format(dilation_rate.shape))
num_spatial_dims = dilation_rate.shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = num_batch_dims + 1
else:
starting_spatial_dim = num_batch_dims
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a monotonically increasing sequence of "
"positive integers, but saw: {}".format(orig_spatial_dims))
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank at least {}, but saw rank {}"
.format(expected_input_rank, input_shape.ndims))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive, but saw: {}"
.format(const_rate))
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
@dispatch.add_dispatch_support
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
@dispatch.add_dispatch_support
def convolution_v2( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True,
num_spatial_dims=None):
"""Internal function which performs rank agnostic convolution.
Args:
input: See `convolution`.
filters: See `convolution`.
strides: See `convolution`.
padding: See `convolution`.
data_format: See `convolution`.
dilations: See `convolution`.
name: See `convolution`.
call_from_convolution: See `convolution`.
num_spatial_dims: (Optional.). It is a integer describing the
rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
Returns:
A tensor of shape and dtype matching that of `input`.
Raises:
ValueError: If input and filter both have unknown shapes, or if
`num_spatial_dims` is provided and incompatible with the value
estimated from `filters.shape`.
"""
if (not isinstance(filters, variables_lib.Variable) and
not tensor_util.is_tensor(filters)):
with ops.name_scope("convolution_internal", None, [filters, input]):
filters = ops.convert_to_tensor(filters, name='filters')
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tensor(input)):
with ops.name_scope("convolution_internal", None, [filters, input]):
input = ops.convert_to_tensor(input, name="input")
filters_rank = filters.shape.rank
inputs_rank = input.shape.rank
if num_spatial_dims is None:
if filters_rank:
num_spatial_dims = filters_rank - 2
elif inputs_rank:
num_spatial_dims = inputs_rank - 2
else:
raise ValueError("rank of input or filter must be known")
elif filters_rank and filters_rank - 2 != num_spatial_dims:
raise ValueError(
"inconsistent estimate of spatial dims ({}) vs. actual passed "
"num_spatial_dims ({}). n was estimated as len(filters.shape) - 2, "
"but filters shape is: {}".format(filters_rank, num_spatial_dims,
filters.shape))
if inputs_rank:
num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.
else:
num_batch_dims = 1 # By default, assume single batch dimension.
if num_spatial_dims not in {1, 2, 3}:
raise ValueError(
"num_spatial_dims (input.shape.ndims - num_batch_dims - 1) must be one "
"of 1, 2 or 3 but saw {}. num_batch_dims: {}.".format(
num_spatial_dims, num_batch_dims))
if data_format is None or data_format in _CHANNELS_LAST_FORMATS:
channel_index = num_batch_dims + num_spatial_dims
else:
channel_index = num_batch_dims
if dilations is None:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = False
else:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = any(i != 1 for i in dilations)
strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides")
has_tpu_context = device_context.enclosing_tpu_context() is not None
if name:
default_name = None
elif not has_tpu_context or call_from_convolution:
default_name = "convolution"
elif num_spatial_dims == 2: # Most common case.
default_name = "Conv2D"
elif num_spatial_dims == 3:
default_name = "Conv3D"
else:
default_name = "conv1d"
with ops.name_scope(name, default_name, [input, filters]) as name:
# Fast path for TPU or if no dilation, as gradient only supported on TPU
# for dilations.
if not is_dilated_conv or has_tpu_context:
if num_spatial_dims == 2: # Most common case.
op = _conv2d_expanded_batch
elif num_spatial_dims == 3:
op = _conv3d_expanded_batch
else:
op = conv1d
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format,
num_spatial_dims=num_spatial_dims)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`num_spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape. Its length is
`batch_shape + input_spatial_shape + [num_channels]` if `data_format`
does not start with `NC`, or
`batch_shape + [num_channels] + input_spatial_shape` if `data_format`
starts with `NC`.
filter_shape: static shape of the filter. i.e. filter.shape.
padding: The padding algorithm, must be "SAME" or "VALID".
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: A string or `None`. Specifies whether the channel dimension of
the `input` and output is the last dimension (if `data_format` is `None`
or does not start with `NC`), or the first post-batch dimension (i.e. if
`data_format` starts with `NC`).
num_spatial_dims: (Usually optional.) Python integer, the rank of the
spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
num_spatial_dims=None):
"""Helper function for convolution."""
num_batch_dims = None
filter_shape = tensor_shape.as_shape(filter_shape)
input_shape = tensor_shape.as_shape(input_shape)
if filter_shape.ndims is not None:
if (num_spatial_dims is not None and
filter_shape.ndims != num_spatial_dims + 2):
raise ValueError(
"Expected filter_shape.ndims == num_spatial_dims + 2, "
"but saw filter_shape.ndims == {} and num_spatial_dims == {}"
.format(filter_shape.ndims, num_spatial_dims))
else:
num_spatial_dims = filter_shape.ndims - 2
if input_shape.ndims is not None and num_spatial_dims is not None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
num_spatial_dims = input_shape.ndims - 2
else:
if input_shape.ndims is not None:
if input_shape.ndims < num_spatial_dims + 2:
raise ValueError(
"Expected input_shape.ndims >= num_spatial_dims + 2, but saw "
"input_shape.ndims == {} and num_spatial_dims == {}"
.format(input_shape.ndims, num_spatial_dims))
else:
if num_batch_dims is None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
raise ValueError(
"Cannot estimate num_spatial_dims since input_shape.ndims is None, "
"filter_shape.ndims is None, and argument num_spatial_dims is also "
"None.")
if num_batch_dims is None:
num_batch_dims = 1
if num_batch_dims < 1:
raise ValueError(
"num_batch_dims should be >= 1, but saw {}. num_batch_dims was "
"estimated as `input_shape.ndims - num_spatial_dims - 1` and "
"num_spatial_dims was either provided or estimated as "
"`filter_shape.ndims - 2`. input_shape.ndims: {}, "
"num_spatial_dims: {}, filter_shape.ndims: {}"
.format(num_batch_dims, input_shape.ndims, num_spatial_dims,
filter_shape.ndims))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + num_batch_dims)
spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims)
else:
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_batch_dims)
spatial_dims = range(
num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError("The number of input channels is not divisible by the "
"corresponding number of output filters. Received: "
"input channels={}, output filters={}".format(
input_channels_dim, filter_dim))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.num_batch_dims = num_batch_dims
self.num_spatial_dims = num_spatial_dims
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
num_batch_dims=num_batch_dims)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
num_batch_dims=self.num_batch_dims)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False,
num_spatial_dims=self.num_spatial_dims)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
@dispatch.add_dispatch_support
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
@dispatch.add_dispatch_support
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
@dispatch.add_dispatch_support
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of `batch_shape + [in_width,
in_channels]`. The `"NCW"` format stores data as `batch_shape +
[in_channels, in_width]`.
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to batch_shape + [1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = -3
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = -2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
if value.shape.ndims in (4, 3, 2, 1, 0, None):
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
else:
result = squeeze_batch_dims(
value,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
),
inner_rank=3,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
@dispatch.add_dispatch_support
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with `"NC"`, a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of
`batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data
as `batch_shape + [in_channels, in_width]`.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
@dispatch.add_dispatch_support
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `input` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `input`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
@dispatch.add_dispatch_support
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors.
The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]`
are considered batch dimensions (`batch_shape`).
Given an input tensor of shape
`batch_shape + [in_height, in_width, in_channels]` and a filter / kernel
tensor of shape `[filter_height, filter_width, in_channels, out_channels]`,
this op performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A Tensor of rank at least 4. The dimension order is interpreted according
to the value of `data_format`; with the all-but-inner-3 dimensions acting
as batch dimensions. See below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
`batch_shape + [height, width, channels]`.
Alternatively, the format could be "NCHW", the data storage order of:
`batch_shape + [channels, height, width]`.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input` and the same outer batch shape.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
@dispatch.add_dispatch_support
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
@dispatch.add_dispatch_support
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
@dispatch.add_dispatch_support
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
@dispatch.add_dispatch_support
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _conv2d_expanded_batch(
input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format,
dilations,
name):
"""Helper function for `convolution_internal`; handles expanded batches."""
# Try really hard to avoid modifying the legacy name scopes - return early.
input_rank = input.shape.rank
if input_rank is None or input_rank < 5:
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
@dispatch.add_dispatch_support
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export(v1=["nn.depthwise_conv2d_native"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native")
def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D depthwise convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`. 1-D of length 4. The stride of the sliding
window for each dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native(
input,
filter,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_input",
v1=[
"nn.depthwise_conv2d_native_backprop_input",
"nn.depthwise_conv2d_backprop_input"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input")
def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`. An integer vector representing the
shape of `input`, based on `data_format`. For example, if `data_format`
is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape `[filter_height, filter_width,
in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with
shape based on `data_format`. For example, if `data_format` is 'NHWC'
then out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_input(
input_sizes,
filter,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_filter",
v1=[
"nn.depthwise_conv2d_native_backprop_filter",
"nn.depthwise_conv2d_backprop_filter"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter")
def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape based on `data_format`. For example,
if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`. An integer vector representing the
tensor shape of `filter`, where `filter` is a 4-D `[filter_height,
filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape
based on `data_format`. For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_filter(
input,
filter_sizes,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv3d_expanded_batch(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations=None,
name=None):
"""Helper function for `conv3d`; handles expanded batches."""
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (5, 4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv3d(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv3d,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=4,
name=name)
@tf_export("nn.conv3d", v1=[])
@dispatch.add_dispatch_support
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return _conv3d_expanded_batch(input, filters, strides, padding, data_format,
dilations, name)
@tf_export(v1=["nn.conv3d"])
@dispatch.add_dispatch_support
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
@dispatch.add_dispatch_support
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height,
width, in_channels]` for `NDHWC` data format or `[batch, in_channels,
depth, height, width]` for `NCDHW` data format.
filters: A 5-D `Tensor` with the same type as `input` and shape `[depth,
height, width, output_channels, in_channels]`. `filter`'s `in_channels`
dimension must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
@dispatch.add_dispatch_support
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tensor(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections_abc.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
@dispatch.add_dispatch_support
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
@dispatch.add_dispatch_support
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
@dispatch.add_dispatch_support
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
@dispatch.add_dispatch_support
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
@dispatch.add_dispatch_support
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
@tf_export("nn.gelu", v1=[])
@dispatch.add_dispatch_support
def gelu(features, approximate=False, name=None):
"""Compute the Gaussian Error Linear Unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.nn.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.nn.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
features: A `Tensor` representing preactivation values.
approximate: An optional `bool`. Defaults to `False`. Whether to enable
approximation.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
[Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415).
"""
with ops.name_scope(name, "Gelu", [features]):
features = ops.convert_to_tensor(features, name="features")
if approximate:
coeff = math_ops.cast(0.044715, features.dtype)
return 0.5 * features * (
1.0 + math_ops.tanh(0.7978845608028654 *
(features + coeff * math_ops.pow(features, 3))))
else:
return 0.5 * features * (1.0 + math_ops.erf(
features / math_ops.cast(1.4142135623730951, features.dtype)))
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
See: https://en.wikipedia.org/wiki/Softmax_function
Example usage:
>>> tf.nn.softmax([-1, 0., 1.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
Args:
logits: A non-empty `Tensor`, or an object whose type has a registered
`Tensor` conversion function. Must be one of the following types:
`half`,`float32`, `float64`. See also `convert_to_tensor`
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
TypeError: If no conversion function is registered for `logits` to
Tensor.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
@dispatch.add_dispatch_support
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
@dispatch.add_dispatch_support
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
@dispatch.add_dispatch_support
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
@dispatch.add_dispatch_support
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
@dispatch.add_dispatch_support
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
@dispatch.add_dispatch_support
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
@dispatch.add_dispatch_support
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
@dispatch.add_dispatch_support
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
max_pooling_ops = {
1: max_pool1d,
2: gen_nn_ops.max_pool,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
@dispatch.add_dispatch_support
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
@dispatch.add_dispatch_support
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
@dispatch.add_dispatch_support
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
@dispatch.add_dispatch_support
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
@dispatch.add_dispatch_support
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
@dispatch.add_dispatch_support
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
@dispatch.add_dispatch_support
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
@dispatch.add_dispatch_support
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
@dispatch.add_dispatch_support
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
@dispatch.add_dispatch_support
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
@dispatch.add_dispatch_support
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool))
tf_export(v1=["nn.quantized_conv2d"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d))
tf_export(v1=["nn.quantized_relu_x"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x))
tf_export(v1=["nn.quantized_max_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool))
|
[] |
[] |
[
"TF_DETERMINISTIC_OPS"
] |
[]
|
["TF_DETERMINISTIC_OPS"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/gob"
"encoding/json"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/boltdb/bolt"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/shurcooL/github_flavored_markdown"
)
const (
maxQueueLen = 100 // maxQueueLen is the maximum length of the queue
dbFilename = "results.db" // dbFilename is the name of the bolt database
resultBucket = "results" // resultBucket is the name of the bolt bucket containing results
)
// Globals
var (
queue chan string // queue contains the names of all the jobs that need to be processed
db *bolt.DB // db is bolt db for persistent storage
tmpls *template.Template // tmpls contains all the html templates
)
func init() {
queue = make(chan string, maxQueueLen)
}
func main() {
log.Println("Starting...")
listen := flag.String("listen", ":80", "address:port to listen to, leave address blank for all addresses")
flag.Parse()
// open database
log.Println("Opening database...")
var err error
db, err = bolt.Open("results.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
log.Fatal(err)
}
defer db.Close()
// initialise buckets
log.Println("Initialising buckets...")
err = db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(resultBucket))
return err
})
if err != nil {
log.Fatalf("count not initalise %s: %s", dbFilename, err)
}
// fetch readme.md
log.Println("Fetching README.md...")
if err := generateReadme(); err != nil {
log.Fatalf("could not fetch readme: %s", err)
}
// initialise html templates
log.Println("Parsing templates...")
if tmpls, err = template.ParseGlob("tmpl/*.tmpl"); err != nil {
log.Fatalf("could not parse html templates: %s", err)
}
// Start the runner
go runner()
r := mux.NewRouter()
r.NotFoundHandler = http.HandlerFunc(notFoundHandler)
r.HandleFunc("/", homeHandler)
r.HandleFunc("/submit", submitHandler)
r.HandleFunc("/result/{pkg:.+}", resultHandler)
r.HandleFunc("/api/status/{pkg:.+}", statusHandler)
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir("./static/"))))
// TODO panic handler? per ip (and package?) rate limiter?
h := handlers.CombinedLoggingHandler(os.Stdout, r)
h = handlers.CompressHandler(h)
log.Println("Listening on", *listen)
log.Fatal(http.ListenAndServe(*listen, h))
}
// runner listens for jobs in the queue and runs them
func runner() {
log.Println("Starting runner")
for {
// block waiting for items from the queue
pkg := <-queue
log.Println("Running pkg:", pkg)
// TODO run all as exec commands, not external
cmd := exec.Command("./runGodzilla.sh", pkg)
// Send all stdout/stderr to result's write methods
result, _ := ResultFromDB(pkg)
cmd.Stdout, cmd.Stderr = result, result
// Start and block until finished
if err := cmd.Run(); err != nil {
// TODO non-zero should be OK, probably means it found an error
log.Println("error running godzilla:", err)
}
result.Finished = true
result.Save()
log.Println("finished:", pkg)
}
}
type result struct {
Package string // Package is the name of the package being tested
Finished bool // whether the job has finished
Results []byte // partial or full output of the job
}
// NewResult creates a new result with name of pkg, stores the new result and
// returns it or an error. If the result already exists in storage, it will be
// overwritten.
func NewResult(pkg string) (*result, error) {
r := &result{Package: pkg}
err := r.Save()
return r, err
}
// ResultFromDB gets the package name from the bolt datastore and stores in
// result, if result is not found, result will be nil
func ResultFromDB(pkg string) (*result, error) {
var result *result
err := db.View(func(tx *bolt.Tx) error {
val := tx.Bucket([]byte(resultBucket)).Get([]byte(pkg))
if val == nil {
// not found so just leave result
return nil
}
var buf bytes.Buffer
if _, err := buf.Write(val); err != nil {
return fmt.Errorf("could not write result to buffer: %s", err)
}
dec := gob.NewDecoder(&buf)
if err := dec.Decode(&result); err != nil {
log.Printf("bytes: %s", buf.Bytes())
return fmt.Errorf("could not decode result %s: %s", val, err)
}
return nil
})
return result, err
}
// Save the current result to storage
func (r *result) Save() error {
_, err := r.Write(nil)
return err
}
// Write implements the io.Writer interface and writes the results to
// persistent storage
func (r *result) Write(p []byte) (int, error) {
r.Results = append(r.Results, p...)
err := db.Update(func(tx *bolt.Tx) error {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(r); err != nil {
return fmt.Errorf("could not decode result: %s", err)
}
r := tx.Bucket([]byte(resultBucket)).Put([]byte(r.Package), buf.Bytes())
return r
})
if err != nil {
return 0, err
}
return len(p), nil
}
// generateReadme gets the README.md file, converts to HTML and writes out to a template
func generateReadme() error {
log.Println("GOPATH:", os.Getenv("GOPATH"))
wd, err := os.Getwd()
if err != nil {
return err
}
log.Println("CWD:", wd)
md, err := ioutil.ReadFile(filepath.Join(os.Getenv("GOPATH"), "src/github.com/hydroflame/godzilla/README.md"))
if err != nil {
return err
}
html := []byte(`{{define "generated-readme"}}`)
html = append(html, github_flavored_markdown.Markdown(md)...)
html = append(html, []byte(`{{- end}}`)...)
return ioutil.WriteFile("tmpl/generated-readme.tmpl", html, 0644)
}
// notFoundHandler displays a 404 not found error
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
errorHandler(w, r, http.StatusNotFound, "")
}
// errorHandler handles an error message, with an optional description
func errorHandler(w http.ResponseWriter, r *http.Request, code int, desc string) {
page := struct {
Title string
Code string // eg 400
Status string // eg Bad Request
Desc string // eg Missing key foo
}{fmt.Sprintf("%d - %s", code, http.StatusText(code)), strconv.Itoa(code), http.StatusText(code), desc}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(code)
if err := tmpls.ExecuteTemplate(w, "error.tmpl", page); err != nil {
fmt.Fprintf(os.Stderr, "error parsing home template: %s", err)
}
}
// homeHandler displays the home page
func homeHandler(w http.ResponseWriter, r *http.Request) {
page := struct {
Title string
}{"Mutation Testing Tool for Go"}
if err := tmpls.ExecuteTemplate(w, "home.tmpl", page); err != nil {
fmt.Fprintf(os.Stderr, "error parsing home template: %s", err)
}
}
// submitHandler handles submissions of packages to be checked and places them
// on the queue, redirecting clients to the results page
func submitHandler(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
errorHandler(w, r, http.StatusInternalServerError, "")
return
}
pkg := r.Form.Get("pkg")
if pkg == "" {
errorHandler(w, r, http.StatusBadRequest, "pkg not set")
return
}
// there's obviously a race here, where checking the length of the queue and
// adding to the queue are different operations, this isn't a big concern atm
if len(queue) > maxQueueLen*0.75 {
errorHandler(w, r, http.StatusInternalServerError, "server too busy")
return
}
// overwrite old entry and store a new one
_, err := NewResult(pkg)
if err != nil {
errorHandler(w, r, http.StatusInternalServerError, "could not store placeholder result")
return
}
// add to the queue
queue <- pkg
// return with a redirect to the result page
redirect := url.URL{
Scheme: r.URL.Scheme,
Host: r.URL.Host,
Path: fmt.Sprintf("/result/%s", pkg),
}
http.Redirect(w, r, redirect.String(), http.StatusFound)
}
// resultHandler shows the result which maybe still running or finished
func resultHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
res, err := ResultFromDB(vars["pkg"])
if err != nil {
fmt.Fprintln(os.Stderr, "error fetching result:", err)
errorHandler(w, r, http.StatusInternalServerError, "error fetching result")
return
}
page := struct {
Title string
Result *result
}{vars["pkg"], res}
// return html
if err := tmpls.ExecuteTemplate(w, "results.tmpl", page); err != nil {
fmt.Fprintln(os.Stderr, "error parsing results template:", err)
}
}
// statusHandler is the API endpoint to check on the status of a job
func statusHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
res, err := ResultFromDB(vars["pkg"])
if err != nil {
fmt.Fprintln(os.Stderr, "error fetching result:", err)
errorHandler(w, r, http.StatusInternalServerError, "error fetching result")
return
}
status := struct {
Finished bool
Result string
}{
Finished: res.Finished,
Result: string(res.Results),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(status)
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
main.py
|
import aiohttp
import asyncio
import box
import config
import datetime
import discord
import jishaku
import json
import nekos
import os
import platform
import random
import requests
import re
import typing
from dadjokes import *
from datetime import datetime
from discord.ext import commands
from webserver import keepalive
#Core Bot
client = commands.Bot(description = "Rob Bot", command_prefix = "!")
client.remove_command('help')
client.remove_command('frizzy')
client.launch_time = datetime.utcnow()
EQUAL_REGEX = re.compile(r"""(\w+)\s*=\s*["'](.+?)["']""")
#Starting Bot
@client.event
async def on_ready():
await client.change_presence(activity=discord.Game(name='A Netplay Game'))
print("#################\n# Bot is online #\n#################")
print("Running as: " + client.user.name)
print("Discord.py: " + discord.__version__)
print("Created by Cranky Supertoon#7376")
#Join Logs
@client.event
async def on_member_join(member):
welcomechannel = client.get_channel(684180084307001423)
staffwelcomechannel = client.get_channel(691010936500256859)
jl = [f"We've got cookies {member.mention}!",
f"Isn't there a discord server for memes like {member.mention}?",
f"October 31st, Halloween, November 1st, beginning of Hanukkah and Christmas, what is this {member.mention}!",
f"{member.mention}, Do you like spooky? I like spooky, SPOOOOOKY!!!",
f"The cake is a lie, or is it {member.mention}?",
f"There’s a fire burning {member.mention}! Anybody got marshmallows?",
f"Minecraft 1.13 is here {member.mention}! It took a long time for you guys to add water animals Mojang!",
f"You like games {member.mention}? Hopefully!",
f"Once you get here {member.mention}, you just keep going and going and going...!",
f"Every {member.mention} is like a bird, they just fly in out of nowhere and poop on your head! Not really though!",
f"Never enough {member.mention}'s, or maybe too many I don’t know!",
f"Free Advice From Phantom_storm {member.mention} don't eat your mic, it's too...salty.",
f"I see a message in the sky it says, “welcome {member.mention}!",
f":notes:I see trees of green, {member.mention} too:notes: and i think to myself what a wonderful sever!:notes:",
f"{member.mention} came prepared, with absolutely nothing!",
f"A new player has entered the ring, {member.mention} , show us what you can do!",
f"We have free icecream {member.mention}! But it may melt, so hurry fast!",
f"It’s time to do do do do do do do do do do do do DOOO ITTTT {member.mention}!!!!",
f"Made with 100% dank memes {member.mention}!",
f"This match will get red hot {member.mention}!",
f"Wonder what this button does {member.mention}, oh, another member, amazing!!!",
f"A brawl is surely brewing {member.mention}!",
f"The Man, The Myth, The Legend, {member.mention} has entered the building!",
f"Do you knew the wae {member.mention}? We do know the wae!",
f"Old friends new friends like {member.mention} they’re all my friends!",
f"We were expecting you {member.mention} ( ͡° ͜ʖ ͡°)",
f"We count by friends not members {member.mention}!:grin:",
f"I wonder how many people are on the server? Oh wait, here comes {member.mention}!",
f"Obviously {member.mention} is not an alt account, am I right or am I right! :sunglasses:"
]
jlrandom = random.choice(jl)
await welcomechannel.send(f"{jlrandom}")
await staffwelcomechannel.send(f"{member} Joined. Account created on {member.created_at}")
#Info Command
@client.command("info")
async def s_info(ctx):
server = ctx.guild
icon = server.icon_url_as(size=256)
icon = ("\uFEFF")
embed = discord.Embed(title=f"Server info for {server.name}", description=None, colour=0x98FB98)
embed.set_thumbnail(url=icon)
# Basic info -- Name, Region, ID, Owner (USER+descrim, ID), Creation date, member count
embed.add_field(name="Name", value=server.name, inline=False)
embed.add_field(name="Region", value=server.region, inline=True)
embed.add_field(name="ID", value=server.id, inline=True)
embed.add_field(name="Owner", value=f"{server.owner.name}**-**{server.owner.id}", inline=True)
embed.add_field(name="Creation Date", value=f"{server.created_at}", inline=True)
embed.add_field(name="Server Icon Url", value=server.icon_url, inline=False)
embed.add_field(name="Member Count", value=server.member_count, inline=True)
await ctx.send(content=None, embed=embed)
@client.command()
async def help(ctx):
author = ctx.message.author
embed = discord.Embed(color = discord.Color.orange())
embed.set_author(name="Commands:")
embed.add_field(name="General", value="!help - Shows This Message\n\n!ping - Says Pong Back To You\n\n!uptime - Bot Uptime Counter\n\n!toss - Coin Flip\n\n!Dadjoke - Give a Dad Joke\n\n!dice - Roll 1-6", inline=False)
embed.add_field(name="Nintendo Emulators", value="!nes - Shows the Different NES Emulators\n\n!snes - Shows the Different SNES Emulators\n\n!n64 - Shows the Different N64 Emulators\n\n!gc - Shows the Different GameCube Emulators\n\n!wii - Shows the Different Wii Emulators\n\n!wiiu - Shows the Different Wii U Emulators\n\n!switch - Shows the Different Switch Emulators")
embed.add_field(name="Sega Emulators", value="!mastersystem - Shows the Different Master System Emulators\n\n!megadrive - Shows the Different Mega Drive Emulators\n\n!32x - Shows the Different 32X Emulators\n\n!saturn - Shows the Different Sega CD Emulators\n\n!saturn - Shows the Different Saturn Emulators\n\n!dreamcast - Shows the Different Dreamcast Emulators\n\n!gamegear - Shows the Different Game Gear Emulators and their Qualities\n")
embed.add_field(name="PlayStation Emulators", value="!ps1 - Shows the Different PS1 Emulators\n\n!ps2 - Shows the Different PS2 Emulators\n\n!ps3 - Shows the Different PS3 Emulators\n\n!ps4 - Shows the Different PS4 Emulators\n\n!psp - Shows the Different PSP Emulators\n\n!vita - Shows the Different Vita Emulators")
embed.add_field(name="Xbox Emulators", value="!ogxbox - Shows the Different Original Xbox Emulators\n\n!xbox360 - Shows the Different Xbox 360 Emulators\n\n!xbox1 - Shows the Different Xbox 1 Emulators")
embed.add_field(name="Atari Emulators", value="!2600 - Shows the Different Atari 2600 Emulators\n\n!5200 - Shows the Different Atari 5200 Emulators\n\n!7800 - Shows the Different Atari 7800 Emulators\n\n!lynx - Shows the Different Atari Lynx Emulators\n\n!jaguar - Shows the Different Atari Jaguar Emulator")
embed.add_field(name="Other Emulators", value="!pcengine - Shows the Different PC Engine")
await ctx.send(author, embed=embed)
@client.command()
async def nes(ctx):
#author = ctx.message.author
embed = discord.Embed(color = discord.Color.orange())
#embed.set_author(name="NES Emulators")
embed.add_field(name="NES Emulators", value="!mesen - A New Open Source Cycle Accurate NES Emulator with a Clean Interface and Compatibility. Supports Netplay\n\n!nestopia - A Open Source Cycle Accurate NES Emulator that has Excellant Compatibility and is Trusted for being around for a decade\n\n!fceux - FCEUX is an old Open Source Mid Accurate Emulator that has good Compatibility but was surpassed by alot of others. It has really good debugging tools.\n\n!punes - puNES is a Semi New Cycle Accurate NES Emulator. It has some really nice features like a excellant Rewind function.\n\n!virtuanes - VirtuaNES is an Open Source Low Accurate Japaneese Emulator. It is famous for its stupid amount of accessory support but should only be used by the games that require said accessories.", inline=False)
await ctx.send(embed=embed)
#Uptime Command
@client.command()
async def uptime(ctx):
delta_uptime = datetime.utcnow() - client.launch_time
hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
await ctx.send(f"{days}d, {hours}h, {minutes}m, {seconds}s")
#Ping Command
@client.command()
async def ping(ctx):
"""Ping Pong"""
await ctx.send('Pong!')
#Dice Command
@client.command()
async def dice(ctx):
"""Rolls the dice"""
cont = random.randint(1, 6)
await ctx.send("You Rolled **{}**".format(cont))
#Toss Command
@client.command()
async def toss(ctx):
"""Put the toss"""
ch = ["Heads", "Tails"]
rch = random.choice(ch)
await ctx.send(f"You got **{rch}**")
#Dadjoke Command
@client.command()
async def dadjoke(ctx):
"""Sends the dadjokes"""
async with ctx.typing():
await ctx.send(Dadjoke().joke)
#PSCX2 Emulator Command
@client.command()
async def pcsx2(ctx):
"""Sends a link to the PSCX2 Download Page and its Bios"""
async with ctx.typing():
await ctx.send('**PSCX2 Stable Builds: **\n<https://pcsx2.net/download.html>\n\n**PSCX2 Development Builds: **\n<https://buildbot.orphis.net/pcsx2/>\n\n**Bios:**\n<https://romsmania.cc/bios/pcsx2-playstation-2-bios-3>')
#RPCS3 Emulator Command
@client.command()
async def rpcs3(ctx):
"""Sends a link to the RPCS3 Download Page and its Bios"""
async with ctx.typing():
await ctx.send('**RPCS3 Stable Builds:**\n<https://rpcs3.net/download>\n\n**RPCS3 Development Builds:**\n<https://rpcs3.net/compatibility?b> \n\n**Firmware:**\n<https://www.playstation.com/en-us/support/system-updates/ps3>')
#Citra Emulator Command
@client.command()
async def citra(ctx):
"""Sends a link to the Citra Download Page"""
async with ctx.typing():
await ctx.send('**Citra Builds:**\n<https://citra-emu.org/download/>')
#PPSSPP Emulator Command
@client.command()
async def vita3k(ctx):
"""Sends a link to the Vita3K Download Page and its Bios"""
async with ctx.typing():
await ctx.send('**Vita3K Development Builds:**\n<https://vita3k.org/#download>\n\n**Firmware:**\n<https://www.playstation.com/en-us/support/system-updates/ps-vita/>')
#PPSSPP Emulator Command
@client.command()
async def ppsspp(ctx):
"""Sends a link to the PPSSPP Download Page"""
async with ctx.typing():
await ctx.send('**PPSSPP Stable Builds:**\n<https://www.ppsspp.org/downloads.html>\n\n**PPSSPP Development Builds:**\n<https://buildbot.orphis.net/ppsspp/>')
#Mednafen Emulator Command
@client.command()
async def mednafen(ctx):
"""Sends a link to the Mednafen Download Page"""
async with ctx.typing():
await ctx.send('**Mednafen Stable Builds:**\n<https://mednafen.github.io/releases/>')
#Higan Emulator Command
@client.command()
async def higan(ctx):
"""Sends a link to the Higan Download Page"""
async with ctx.typing():
await ctx.send('**Higan Stable Builds:**\n<https://byuu.org/higan#download>\n\n**Higan Development Builds**\n<https://cirrus-ci.com/github/higan-emu/higan/master>')
#PuNES Emulator Command
@client.command()
async def punes(ctx):
"""Sends a link to the PuNES Emulator Download Page"""
async with ctx.typing():
await ctx.send('**PuNES Stable Builds:**\n<https://github.com/punesemu/puNES/releases>\n\n**PuNES Development Builds:**\n<https://ci.appveyor.com/project/punesemu/punes/build/artifacts>')
#FCEUX Emulator Command
@client.command()
async def fceux(ctx):
"""Sends a link to the FCEUX Emulator Download Page"""
async with ctx.typing():
await ctx.send('**FCEUX Stable Builds:**\n<http://www.fceux.com/web/download.html>\n\n**FCEUX Development Builds:**\n<https://ci.appveyor.com/project/zeromus/fceux/build/artifacts>')
#Mesen Emulator Command
@client.command()
async def mesen(ctx):
"""Sends a link to the Mesen Emulator Download Page"""
async with ctx.typing():
await ctx.send('**Mesen Stable Builds:**\n<https://www.mesen.ca/#Downloads>\n\n**Mesen Development Builds:**\n<https://ci.appveyor.com/project/Sour/mesen/build/artifacts>')
#VirtuaNES Emulator Command
@client.command()
async def virtuanes(ctx):
"""Sends a link to the VirtuaNES Emulator Download Page"""
async with ctx.typing():
await ctx.send('**VirtuaNES Stable Builds:**\n<http://virtuanes.s1.xrea.com/vnes_dl.php>')
#Nestopia Emulator Command
@client.command()
async def nestopia(ctx):
"""Sends a link to the Nestopia Emulator Download Page"""
async with ctx.typing():
await ctx.send('**Nestopia Stable Builds:**\n<https://sourceforge.net/projects/nestopiaue/files/>')
#Mesen-S Emulator Command
@client.command(aliases=['mesen-s'])
async def mesensnes(ctx):
"""Sends a link to the Mesen-S Emulator Download Page"""
async with ctx.typing():
await ctx.send('**Mesen-S Stable Builds:**\n<https://github.com/SourMesen/Mesen-S/releases>\n\n**Mesen-S Developement Builds:**\n<https://ci.appveyor.com/project/Sour/mesen-s/build/artifacts>')
#bsnes Emulator Command
@client.command()
async def bsnes(ctx):
"""Sends a link to the bsnes Emulator Download Page"""
async with ctx.typing():
await ctx.send('**bsnes Stable Builds:**\n<https://byuu.org/bsnes#download>\n\n**bsnes Builds Download:**\n<https://cirrus-ci.com/github/bsnes-emu/bsnes/master>')
#ZSNES Emulator Command
@client.command()
async def zsnes(ctx):
"""Sends a link to the zsnes Emulator Download Page"""
async with ctx.typing():
await ctx.send('**zsnes Stable Builds:**\n<https://www.zsnes.com/index.php?page=files>')
#ZSNES Emulator Command
@client.command()
async def snes9x(ctx):
"""Sends a link to the Snes9x Emulator Download Page"""
async with ctx.typing():
await ctx.send('**Snes9x Stable Builds:**\n<http://www.s9x-w32.de/dl/>\n\n**Snes9x Development Builds:**\n<https://ci.appveyor.com/project/snes9x/snes9x>')
#Project64 Emulator Command
@client.command()
async def project64(ctx):
"""Sends a link to the Project64 Download Page"""
async with ctx.typing():
await ctx.send('**Project64 Stable Builds:**\n<https://www.pj64-emu.com/public-releases>\n\n**Project64 Development Builds:**\nPlease Use These, The Stable Builds are Super Old\n<https://www.pj64-emu.com/nightly-builds>')
#Project64 Netplay Emulator Command
@client.command()
async def project64netplay(ctx):
"""Sends a link to the Project64 Netplay Download Page"""
async with ctx.typing():
await ctx.send('**Project64 Netplay Stable Builds:**\n<https://pj64netplay-emu.ml/download.html>')
#Mupen64Plus Emulator Command
@client.command(aliases=['mupen64'])
async def mupen64plus(ctx):
"""Sends a link to the Mupen64Plus Download Page"""
async with ctx.typing():
await ctx.send('**Mupen64 Stable Builds:**\nNot Recommended For The Average User\n<https://github.com/mupen64plus/mupen64plus-core/releases/>\n\n**m64p (Mupen64 Plus a GUI) Builds**:\nRecommended for its Custom Plugins that fits well with its GUI\n<https://github.com/loganmc10/m64p/releases>\n\n**M64Py (Mupen 64 Python) Builds**:\nHas a Decent GUI and good Plugin Support\n<https://sourceforge.net/projects/m64py/files/>')
#CEN64 Emulator Command
@client.command()
async def cen64(ctx):
"""Sends a link to the CEN64 Download Page"""
async with ctx.typing():
await ctx.send('**CEN64 Stable Builds:**\n<https://cen64.com/>\n\n**CEN64-QT Builds:**\nGUI for CEN64\n<https://github.com/dh4/cen64-qt/releases>')
#Nemu64 Emulator Command
@client.command()
async def nemu64(ctx):
"""Sends a link to the Nemu64 Download Page"""
async with ctx.typing():
await ctx.send('**Nemu64 0.8 Mirror lInk:**\nOnly Use for Its Extensive Set of Plugins. Offical Website is long dead\n<https://www.majorgeeks.com/files/details/nemu64.html/>')
#Dolphin Emulator Command
@client.command()
async def dolphin(ctx):
"""Sends a link to the Dolphin Emulator Download Page"""
async with ctx.typing():
await ctx.send('**Dolphin Stable 5.0:\n<https://dl-mirror.dolphin-emu.org/5.0/dolphin-x64-5.0.exe>\n\n**Dolphin Development Builds**\n<https://dolphin-emu.org/download/list/master/1/>')
#Cemu Emulator Command
@client.command()
async def cemu(ctx):
"""Sends a link to the Cemu Download Page"""
async with ctx.typing():
await ctx.send('**Cemu Stable Build:**\n<http://cemu.info/#download>')
#Run Bot
keepalive()
TOKEN = os.environ.get("DISCORD_BOT_SECRET")
client.run(TOKEN)
|
[] |
[] |
[
"DISCORD_BOT_SECRET"
] |
[]
|
["DISCORD_BOT_SECRET"]
|
python
| 1 | 0 | |
localization_service/venv/lib/python3.5/site-packages/matplotlib/__init__.py
|
"""
This is an object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is
encouraged when programming; pyplot is primarily for working
interactively. The
exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.pyplot.subplots`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pyplot
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod:`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridden by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pyplot (if pyplot is imported).
matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for existing editable installs.
import sys
if sys.version_info < (3, 5): # noqa: E402
raise ImportError("""
Matplotlib 3.0+ does not support Python 2.x, 3.0, 3.1, 3.2, 3.3, or 3.4.
Beginning with Matplotlib 3.0, Python 3.5 and above is required.
See Matplotlib `INSTALL.rst` file for more information:
https://github.com/matplotlib/matplotlib/blob/master/INSTALL.rst
""")
import atexit
from collections.abc import MutableMapping
import contextlib
import distutils.version
import functools
import io
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import stat
import subprocess
import tempfile
import urllib.request
import warnings
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from . import cbook, rcsetup
from matplotlib.cbook import (
MatplotlibDeprecationWarning, dedent, get_label, sanitize_sequence)
from matplotlib.cbook import mplDeprecation # deprecated
from matplotlib.rcsetup import defaultParams, validate_backend, cycler
import numpy
# Get the version from the _version.py versioneer file. For a git checkout,
# this is computed based on the number of commits since the last tag.
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
_log = logging.getLogger(__name__)
__version__numpy__ = '1.10.0' # minimum required numpy version
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing In Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if isinstance(a, bytes):
cbook.warn_deprecated(
"3.0", "compare_version arguments should be strs.")
a = a.decode('ascii')
if isinstance(b, bytes):
cbook.warn_deprecated(
"3.0", "compare_version arguments should be strs.")
b = b.decode('ascii')
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
return a >= b
else:
return False
try:
import dateutil
except ImportError:
raise ImportError("Matplotlib requires dateutil")
try:
import pyparsing
except ImportError:
raise ImportError("Matplotlib requires pyparsing")
else:
if not compare_versions(pyparsing.__version__, '2.0.1'):
raise ImportError(
"Matplotlib requires pyparsing>=2.0.1; you have %s"
% pyparsing.__version__)
if not compare_versions(numpy.__version__, __version__numpy__):
raise ImportError(
"Matplotlib requires numpy>=%s; you have %s" % (
__version__numpy__, numpy.__version__))
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
_verbose_msg = """\
matplotlib.verbose is deprecated;
Command line argument --verbose-LEVEL is deprecated.
This functionality is now provided by the standard
python logging library. To get more (or less) logging output:
import logging
logger = logging.getLogger('matplotlib')
logger.set_level(logging.INFO)"""
def _set_logger_verbose_level(level_str='silent', file_str='sys.stdout'):
"""
Use a --verbose-LEVEL level to set the logging level:
"""
levelmap = {'silent': logging.WARNING, 'helpful': logging.INFO,
'debug': logging.DEBUG, 'debug-annoying': logging.DEBUG,
'info': logging.INFO, 'warning': logging.WARNING}
# Check that current state of logger isn't already more verbose
# than the requested level. If it is more verbose, then leave more
# verbose.
newlev = levelmap[level_str]
oldlev = _log.getEffectiveLevel()
if newlev < oldlev:
_log.setLevel(newlev)
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if file_str in std:
fileo = std[file_str]
else:
fileo = sys.stdout
try:
fileo = open(file_str, 'w')
# if this fails, we will just write to stdout
except IOError:
warnings.warn('could not open log file "{0}"'
'for writing. Check your '
'matplotlibrc'.format(file_str))
console = logging.StreamHandler(fileo)
console.setLevel(newlev)
_log.addHandler(console)
def _parse_commandline():
"""
Check for --verbose-LEVEL type command line arguments and
set logging level appropriately.
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying',
'info', 'warning')
for arg in sys.argv[1:]:
if arg.startswith('--verbose-'):
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_set_logger_verbose_level(level_str)
_parse_commandline()
class Verbose(object):
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = {level: i for i, level in enumerate(levels)}
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'):
continue
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_commandLineVerbose = level_str
@cbook.deprecated("2.2", message=_verbose_msg)
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
@cbook.deprecated("2.2", message=_verbose_msg)
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
' Legal values are %s' % (level, self.levels))
else:
self.level = level
@cbook.deprecated("2.2", message=_verbose_msg)
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = open(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "{0}"'
' for writing.\nCheck your matplotlibrc '
'verbose.fileo setting'.format(fname))
else:
self.fileo = fileo
@cbook.deprecated("2.2", message=_verbose_msg)
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print(s, file=self.fileo)
return True
return False
@cbook.deprecated("2.2", message=_verbose_msg)
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt % ret, level)
if not wrapper._spoke:
wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
@cbook.deprecated("2.2", message=_verbose_msg)
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level] >= self.vald[level]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
verbose = Verbose()
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper():
nonlocal called, ret
if not called:
ret = func()
called = True
_log.debug(fmt, ret)
return ret
return wrapper
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
line = stdout.decode('ascii').split('\n')[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
if checkdep_ghostscript.executable is None:
if sys.platform == 'win32':
# mgs is the name in miktex
gs_execs = ['gswin32c', 'gswin64c', 'mgs', 'gs']
else:
gs_execs = ['gs']
for gs_exec in gs_execs:
try:
s = subprocess.Popen(
[gs_exec, '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
if s.returncode == 0:
v = stdout[:-1].decode('ascii')
checkdep_ghostscript.executable = gs_exec
checkdep_ghostscript.version = v
except (IndexError, ValueError, OSError):
pass
return checkdep_ghostscript.executable, checkdep_ghostscript.version
checkdep_ghostscript.executable = None
checkdep_ghostscript.version = None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops', '-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stderr.decode('ascii').split('\n')
for line in lines:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_inkscape():
if checkdep_inkscape.version is None:
try:
s = subprocess.Popen(['inkscape', '-V'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = s.communicate()
lines = stdout.decode('ascii').split('\n')
for line in lines:
if 'Inkscape' in line:
v = line.split()[1]
break
checkdep_inkscape.version = v
except (IndexError, ValueError, UnboundLocalError, OSError):
pass
return checkdep_inkscape.version
checkdep_inkscape.version = None
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '8.60'
gs_exec, gs_v = checkdep_ghostscript()
if not compare_versions(gs_v, gs_req):
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your '
'system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif (compare_versions(pdftops_v, pdftops_req_alt) and not
compare_versions(pdftops_v, '1.0')):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on '
'your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
gs_req = '8.60'
dvipng_req = '1.6'
flag = True
if shutil.which("tex") is None:
flag = False
warnings.warn('matplotlibrc text.usetex option can not be used unless '
'TeX is installed on your system')
dvipng_v = checkdep_dvipng()
if not compare_versions(dvipng_v, dvipng_req):
flag = False
warnings.warn('matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-%s or later is installed on '
'your system' % dvipng_req)
gs_exec, gs_v = checkdep_ghostscript()
if not compare_versions(gs_v, gs_req):
flag = False
warnings.warn('matplotlibrc text.usetex can not be used unless '
'ghostscript-%s or later is installed on your system'
% gs_req)
return flag
@_logged_cached('$HOME=%s')
def get_home():
"""
Return the user's home directory.
If the user's home directory cannot be found, return None.
"""
try:
return str(Path.home())
except Exception:
return None
def _create_tmp_config_dir():
"""
If the config directory can not be created, create a temporary directory.
"""
configdir = os.environ['MPLCONFIGDIR'] = (
tempfile.mkdtemp(prefix='matplotlib-'))
atexit.register(shutil.rmtree, configdir)
return configdir
def _get_xdg_config_dir():
"""
Returns the XDG configuration directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
return (os.environ.get('XDG_CONFIG_HOME')
or (str(Path(get_home(), ".config"))
if get_home()
else None))
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
return (os.environ.get('XDG_CACHE_HOME')
or (str(Path(get_home(), ".cache"))
if get_home()
else None))
def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
elif get_home():
configdir = Path(get_home(), ".matplotlib")
else:
configdir = None
if configdir:
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
return _create_tmp_config_dir()
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2a. On Linux, follow the XDG specification and look first in
`$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. If possible, create a temporary directory, and use it as the
configuration directory.
5. A writable directory could not be found or created; return None.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
"""
Return the location of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using `$XDG_CACHE_HOME`/`~/.cache` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a '
'directory')
return path
def get_candidate_paths():
yield Path(__file__).with_name('mpl-data')
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in Matplotlib, not basemap.
import matplotlib.afm
yield Path(matplotlib.afm.__file__).with_name('mpl-data')
# py2exe zips pure python, so still need special check.
if getattr(sys, 'frozen', None):
yield Path(sys.executable).with_name('mpl-data')
# Try again assuming we need to step up one more directory.
yield Path(sys.executable).parent.with_name('mpl-data')
# Try again assuming sys.path[0] is a dir not a exe.
yield Path(sys.path[0]) / 'mpl-data'
for path in get_candidate_paths():
if path.is_dir():
return str(path)
raise RuntimeError('Could not find the matplotlib data files')
@_logged_cached('matplotlib data path: %s')
def get_data_path():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
def get_py2exe_datafiles():
data_path = Path(get_data_path())
d = {}
for path in filter(Path.is_file, data_path.glob("**/*")):
(d.setdefault(str(path.parent.relative_to(data_path.parent)), [])
.append(str(path)))
return list(d.items())
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- `$PWD/matplotlibrc`
- `$MATPLOTLIBRC` if it is a file (or a named pipe, which can be created
e.g. by process substitution)
- `$MATPLOTLIBRC/matplotlibrc`
- `$MPLCONFIGDIR/matplotlibrc`
- On Linux,
- `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is defined)
- or `$HOME/.config/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is not defined)
- On other platforms,
- `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.
- Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a
system-defined copy.
"""
def gen_candidates():
yield os.path.join(os.getcwd(), 'matplotlibrc')
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname):
st_mode = os.stat(fname).st_mode
if stat.S_ISREG(st_mode) or stat.S_ISFIFO(st_mode):
break
# Return first candidate that is a file, or last candidate if none is
# valid (in that case, a warning is raised at startup by `rc_params`).
return fname
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
'text.dvipnghack': ('2.1', None),
'nbagg.transparent': ('2.2', 'figure.facecolor'),
'plugins.directory': ('2.2', None),
'pgf.debug': ('3.0', None),
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are typles of (version,)
_deprecated_remain_as_none = {
'axes.hold': ('2.1',),
'backend.qt4': ('2.2',),
'backend.qt5': ('2.2',),
'text.latex.unicode': ('3.0',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
class RcParams(MutableMapping, dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = {key: converter
for key, (default, converter) in defaultParams.items()
if key not in _all_deprecated}
@property
@cbook.deprecated("3.0")
def msg_depr(self):
return "%s is deprecated and replaced with %s; please use the latter."
@property
@cbook.deprecated("3.0")
def msg_depr_ignore(self):
return "%s is deprecated and ignored. Use %s instead."
@property
@cbook.deprecated("3.0")
def msg_depr_set(self):
return ("%s is deprecated. Please remove it from your matplotlibrc "
"and/or style files.")
@property
@cbook.deprecated("3.0")
def msg_obsolete(self):
return ("%s is obsolete. Please remove it from your matplotlibrc "
"and/or style files.")
@property
@cbook.deprecated("3.0")
def msg_backend_obsolete(self):
return ("The {} rcParam was deprecated in version 2.2. In order to "
"force the use of a specific Qt binding, either import that "
"binding first, or set the QT_API environment variable.")
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
addendum = ''
if key.startswith('backend'):
addendum = (
"In order to force the use of a specific Qt binding, "
"either import that binding first, or set the QT_API "
"environment variable.")
cbook.warn_deprecated(
"2.2", name=key, obj_type="rcparam", addendum=addendum)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'examples.directory':
cbook.warn_deprecated(
"3.0", "{} is deprecated; in the future, examples will be "
"found relative to the 'datapath' directory.".format(key))
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError("Key %s: %s" % (key, str(ve)))
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError(
'%s is not a valid rc parameter. See rcParams.keys() for a '
'list of valid parameters.' % (key,))
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
elif key == 'examples.directory':
cbook.warn_deprecated(
"3.0", "{} is deprecated; in the future, examples will be "
"found relative to the 'datapath' directory.".format(key))
elif key == "backend":
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
"""Yield sorted list of keys."""
yield from sorted(dict.__iter__(self))
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def rc_params(fail_on_error=False):
"""Return a :class:`matplotlib.RcParams` instance from the
default matplotlib rc file.
"""
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([(key, default) for key, (default, _) in
defaultParams.items()
if key not in _all_deprecated])
warnings.warn(message)
return ret
return rc_params_from_file(fname, fail_on_error)
URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
def is_url(filename):
"""Return True if string is an http, ftp, or file URL path."""
return URL_REGEX.match(filename) is not None
@contextlib.contextmanager
def _open_file_or_url(fname):
if is_url(fname):
with urllib.request.urlopen(fname) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
_error_details_fmt = 'line #%d\n\t"%s"\n\tin file "%s"'
def _rc_params_in_file(fname, fail_on_error=False):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
"""
cnt = 0
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line in fd:
cnt += 1
strippedline = line.split('#', 1)[0].strip()
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Illegal %s' % error_details)
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d' %
(fname, cnt))
rc_temp[key] = (val, line, cnt)
except UnicodeDecodeError:
warnings.warn(
('Cannot decode configuration file %s with '
'encoding %s, check LANG and LC_* variables')
% (fname, locale.getpreferredencoding(do_setlocale=False) or
'utf-8 (default)'))
raise
config = RcParams()
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
for key, (val, line, cnt) in rc_temp.items():
if key in defaultParams:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
error_details = _error_details_fmt % (cnt, line, fname)
warnings.warn('Bad val "%s" on %s\n\t%s' %
(val, error_details, msg))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, key, alternative=alt_key,
addendum="Please update your matplotlibrc.")
else:
print("""
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template
or from the matplotlib source distribution""" % (key, cnt, fname),
file=sys.stderr)
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""Return :class:`matplotlib.RcParams` from the contents of the given file.
Parameters
----------
fname : str
Name of file parsed for matplotlib settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error)
if not use_default_template:
return config_from_file
iter_params = defaultParams.items()
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
config = RcParams([(key, default) for key, (default, _) in iter_params
if key not in _all_deprecated])
config.update(config_from_file)
if config['datapath'] is None:
config['datapath'] = get_data_path()
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# this is the instance used by the matplotlib classes
rcParams = rc_params()
# Don't trigger deprecation warning when just fetching.
if dict.__getitem__(rcParams, 'examples.directory'):
# paths that are intended to be relative to matplotlib_fname()
# are allowed for the examples.directory parameter.
# However, we will need to fully qualify the path because
# Sphinx requires absolute paths.
if not os.path.isabs(rcParams['examples.directory']):
_basedir, _fname = os.path.split(matplotlib_fname())
# Sometimes matplotlib_fname() can return relative paths,
# Also, using realpath() guarantees that Sphinx will use
# the same path that matplotlib sees (in case of weird symlinks).
_basedir = os.path.realpath(_basedir)
_fullpath = os.path.join(_basedir, rcParams['examples.directory'])
rcParams['examples.directory'] = _fullpath
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
rcParamsOrig = RcParams(rcParams.copy())
rcParamsDefault = RcParams([(key, default) for key, (default, converter) in
defaultParams.items()
if key not in _all_deprecated])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(
rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current rc params. *group* is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations. Use
``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
restore the default rc params after changes.
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name))
def rcdefaults():
"""
Restore the rc params from Matplotlib's internal default style.
Style-blacklisted rc params (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
See Also
--------
rc_file_defaults :
Restore the rc params from the rc file originally loaded by Matplotlib.
matplotlib.style.use :
Use a specific style file. Call ``style.use('default')`` to restore
the default style.
"""
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore", mplDeprecation)
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
"""
Restore the rc params from the original rc file loaded by Matplotlib.
Style-blacklisted rc params (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
"""
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore", mplDeprecation)
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname):
"""
Update rc params from file.
Style-blacklisted rc params (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
"""
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore", mplDeprecation)
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(fname)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
class rc_context:
"""
Return a context manager for managing rc settings.
This allows one to do::
with mpl.rc_context(fname='screen.rc'):
plt.plot(x, a)
with mpl.rc_context(fname='print.rc'):
plt.plot(x, b)
plt.plot(x, c)
The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
'screen.rc', while the 'b' vs 'x' plot would have settings from
'print.rc'.
A dictionary can also be passed to the context manager::
with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
plt.plot(x, a)
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid. For example a
common usage is::
with mpl.rc_context(rc={'interactive': False}):
fig, ax = plt.subplots()
ax.plot(range(3), range(3))
fig.savefig('A.png', format='png')
plt.close(fig)
"""
# While it may seem natural to implement rc_context using
# contextlib.contextmanager, that would entail always calling the finally:
# clause of the contextmanager (which restores the original rcs) including
# during garbage collection; as a result, something like `plt.xkcd();
# gc.collect()` would result in the style being lost (as `xkcd()` is
# implemented on top of rc_context, and nothing is holding onto context
# manager except possibly circular references.
def __init__(self, rc=None, fname=None):
self._orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
except Exception:
self.__fallback()
raise
def __fallback(self):
# If anything goes wrong, revert to the original rcs.
updated_backend = self._orig['backend']
dict.update(rcParams, self._orig)
# except for the backend. If the context block triggered resloving
# the auto backend resolution keep that value around
if self._orig['backend'] is rcsetup._auto_backend_sentinel:
rcParams['backend'] = updated_backend
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.__fallback()
def use(arg, warn=True, force=False):
"""
Set the matplotlib backend to one of the known backends.
To find out which backend is currently set, see
:func:`matplotlib.get_backend`.
Parameters
----------
arg : str
The backend to switch to. This can either be one of the
'standard' backend names or a string of the form
``module://my.module.name``. This value is case-insensitive.
warn : bool, optional
If True, warn if this is called after pyplot has been imported
and a backend is set up.
defaults to True
force : bool, optional
If True, attempt to switch the backend. This defaults to
False.
"""
name = validate_backend(arg)
# if setting back to the same thing, do nothing
if (dict.__getitem__(rcParams, 'backend') == name):
pass
# Check if we have already imported pyplot and triggered
# backend selection, do a bit more work
elif 'matplotlib.pyplot' in sys.modules:
# If we are here then the requested is different than the current.
# If we are going to force the switch, never warn, else, if warn
# is True, then direct users to `plt.switch_backend`
if (not force) and warn:
warnings.warn(
("matplotlib.pyplot as already been imported, "
"this call will have no effect."),
stacklevel=2)
# if we are going to force switching the backend, pull in
# `switch_backend` from pyplot. This will only happen if
# pyplot is already imported.
if force:
from matplotlib.pyplot import switch_backend
switch_backend(name)
# Finally if pyplot is not imported update both rcParams and
# rcDefaults so restoring the defaults later with rcdefaults
# won't change the backend. This is a bit of overkill as 'backend'
# is already in style.core.STYLE_BLACKLIST, but better to be safe.
else:
rcParams['backend'] = rcParamsDefault['backend'] = name
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
"""Return the name of the current backend."""
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, e.g., after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
return rcParams['interactive']
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
default_test_modules = [
'matplotlib.tests',
'matplotlib.sphinxext.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# CPython's faulthandler since v3.6 handles exceptions on Windows
# https://bugs.python.org/issue23848 but until v3.6.4 it was printing
# non-fatal exceptions https://bugs.python.org/issue30557
import platform
if not (sys.platform == 'win32' and
(3, 6) < sys.version_info < (3, 6, 4) and
platform.python_implementation() == 'CPython'):
import faulthandler
faulthandler.enable()
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
warnings.warn(
"Matplotlib is not built with the correct FreeType version to run "
"tests. Set local_freetype=True in setup.cfg and rebuild. "
"Expect many image comparison failures below. "
"Expected freetype version {0}. "
"Found freetype version {1}. "
"Freetype build type is {2}local".format(
LOCAL_FREETYPE_VERSION,
ft2font.__freetype_version__,
"" if ft2font.__freetype_build_type__ == 'local' else "not "
)
)
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
raise
def test(verbosity=None, coverage=False, switch_backend_warn=True,
recursionlimit=0, **kwargs):
"""run the matplotlib test suite"""
_init_tests()
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
raise ImportError("Matplotlib test data is not installed")
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
if recursionlimit:
sys.setrecursionlimit(recursionlimit)
import pytest
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend, warn=switch_backend_warn)
if recursionlimit:
sys.setrecursionlimit(old_recursionlimit)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, key):
"""Either returns data[key] or passes data back. Also
converts input data to a sequence as needed.
"""
# if key isn't a string don't bother
if not isinstance(key, str):
return key
# try to use __getitem__
try:
return sanitize_sequence(data[key])
# key does not exist, silently fall back to key
except KeyError:
return key
_DATA_DOC_APPENDIX = """
.. note::
In addition to the above described arguments, this function can take a
**data** keyword argument. If such a **data** argument is given, the
following arguments are replaced by **data[<arg>]**:
{replaced}
Objects passed as **data** must support item access (``data[<arg>]``) and
membership test (``<arg> in data``).
"""
def _add_data_doc(docstring, replace_names, replace_all_args):
"""Add documentation for a *data* field to the given docstring.
Parameters
----------
docstring : str
The input docstring.
replace_names : list of strings or None
The list of parameter names which arguments should be replaced by
`data[name]`. If None, all arguments are replaced if they are
included in `data`.
replace_all_args : bool
If True, all arguments in *args get replaced, even if they are not
in replace_names.
Returns
-------
The augmented docstring.
"""
if docstring is None:
docstring = ''
else:
docstring = dedent(docstring)
_repl = ""
if replace_names is None:
_repl = "* All positional and all keyword arguments."
else:
if len(replace_names) != 0:
_repl = "* All arguments with the following names: '{names}'."
if replace_all_args:
_repl += "\n * All positional arguments."
_repl = _repl.format(names="', '".join(sorted(replace_names)))
return docstring + _DATA_DOC_APPENDIX.format(replaced=_repl)
def _preprocess_data(replace_names=None, replace_all_args=False,
label_namer=None, positional_parameter_names=None):
"""
A decorator to add a 'data' kwarg to any a function. The signature
of the input function must include the ax argument at the first position ::
def foo(ax, *args, **kwargs)
so this is suitable for use with Axes methods.
Parameters
----------
replace_names : list of strings, optional, default: None
The list of parameter names which arguments should be replaced by
`data[name]`. If None, all arguments are replaced if they are
included in `data`.
replace_all_args : bool, default: False
If True, all arguments in *args get replaced, even if they are not
in replace_names.
label_namer : string, optional, default: None
The name of the parameter which argument should be used as label, if
label is not set. If None, the label keyword argument is not set.
positional_parameter_names : list of strings or callable, optional
The full list of positional parameter names (excluding an explicit
`ax`/'self' argument at the first place and including all possible
positional parameter in `*args`), in the right order. Can also include
all other keyword parameter. Only needed if the wrapped function does
contain `*args` and (replace_names is not None or replace_all_args is
False). If it is a callable, it will be called with the actual
tuple of *args and the data and should return a list like
above.
NOTE: callables should only be used when the names and order of *args
can only be determined at runtime. Please use list of names
when the order and names of *args is clear before runtime!
.. note:: decorator also converts MappingView input data to list.
"""
if replace_names is not None:
replace_names = set(replace_names)
def param(func):
sig = inspect.signature(func)
_has_varargs = False
_has_varkwargs = False
_arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
_has_varargs = True
elif p.kind is Parameter.VAR_KEYWORD:
_has_varkwargs = True
else:
_arg_names.append(p.name)
data_param = Parameter('data', Parameter.KEYWORD_ONLY, default=None)
if _has_varkwargs:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
# Import-time check: do we have enough information to replace *args?
arg_names_at_runtime = False
# there can't be any positional arguments behind *args and no
# positional args can end up in **kwargs, so only *varargs make
# problems.
# http://stupidpythonideas.blogspot.de/2013/08/arguments-and-parameters.html
if not _has_varargs:
# all args are "named", so no problem
# remove the first "ax" / self arg
arg_names = _arg_names[1:]
else:
# Here we have "unnamed" variables and we need a way to determine
# whether to replace a arg or not
if replace_names is None:
# all argnames should be replaced
arg_names = None
elif len(replace_names) == 0:
# No argnames should be replaced
arg_names = []
elif len(_arg_names) > 1 and (positional_parameter_names is None):
# we got no manual parameter names but more than an 'ax' ...
if len(replace_names - set(_arg_names[1:])) == 0:
# all to be replaced arguments are in the list
arg_names = _arg_names[1:]
else:
raise AssertionError(
"Got unknown 'replace_names' and wrapped function "
"{!r} uses '*args', need 'positional_parameter_names'"
.format(func.__name__))
else:
if positional_parameter_names is not None:
if callable(positional_parameter_names):
# determined by the function at runtime
arg_names_at_runtime = True
# so that we don't compute the label_pos at import time
arg_names = []
else:
arg_names = positional_parameter_names
else:
if replace_all_args:
arg_names = []
else:
raise AssertionError(
"Got 'replace_names' and wrapped function {!r} "
"uses *args, need 'positional_parameter_names' or "
"'replace_all_args'".format(func.__name__))
# compute the possible label_namer and label position in positional
# arguments
label_pos = 9999 # bigger than all "possible" argument lists
label_namer_pos = 9999 # bigger than all "possible" argument lists
if (label_namer and # we actually want a label here ...
arg_names and # and we can determine a label in *args ...
label_namer in arg_names): # and it is in *args
label_namer_pos = arg_names.index(label_namer)
if "label" in arg_names:
label_pos = arg_names.index("label")
# Check the case we know a label_namer but we can't find it the
# arg_names... Unfortunately the label_namer can be in **kwargs,
# which we can't detect here and which results in a non-set label
# which might surprise the user :-(
if label_namer and not arg_names_at_runtime and not _has_varkwargs:
if not arg_names:
raise AssertionError(
"label_namer {!r} can't be found as the parameter without "
"'positional_parameter_names'".format(label_namer))
elif label_namer not in arg_names:
raise AssertionError(
"label_namer {!r} can't be found in the parameter names "
"(known argnames: %s).".format(label_namer, arg_names))
else:
# this is the case when the name is in arg_names
pass
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
# this is needed because we want to change these values if
# arg_names_at_runtime==True, but python does not allow assigning
# to a variable in a outer scope. So use some new local ones and
# set them to the already computed values.
_label_pos = label_pos
_label_namer_pos = label_namer_pos
_arg_names = arg_names
label = None
if data is None: # data validation
args = tuple(sanitize_sequence(a) for a in args)
else:
if arg_names_at_runtime:
# update the information about replace names and
# label position
_arg_names = positional_parameter_names(args, data)
if (label_namer and # we actually want a label here ...
_arg_names and # and we can find a label in *args
(label_namer in _arg_names)): # and it is in *args
_label_namer_pos = _arg_names.index(label_namer)
if "label" in _arg_names:
_label_pos = arg_names.index("label")
# save the current label_namer value so that it can be used as
# a label
if _label_namer_pos < len(args):
label = args[_label_namer_pos]
else:
label = kwargs.get(label_namer, None)
# ensure a string, as label can't be anything else
if not isinstance(label, str):
label = None
if (replace_names is None) or (replace_all_args is True):
# all should be replaced
args = tuple(_replacer(data, a) for
j, a in enumerate(args))
else:
# An arg is replaced if the arg_name of that position is
# in replace_names ...
if len(_arg_names) < len(args):
raise RuntimeError(
"Got more args than function expects")
args = tuple(_replacer(data, a)
if _arg_names[j] in replace_names else a
for j, a in enumerate(args))
if replace_names is None:
# replace all kwargs ...
kwargs = {k: _replacer(data, v) for k, v in kwargs.items()}
else:
# ... or only if a kwarg of that name is in replace_names
kwargs = {
k: _replacer(data, v) if k in replace_names else v
for k, v in kwargs.items()}
# replace the label if this func "wants" a label arg and the user
# didn't set one. Note: if the user puts in "label=None", it does
# *NOT* get replaced!
user_supplied_label = (
len(args) >= _label_pos or # label is included in args
'label' in kwargs # ... or in kwargs
)
if label_namer and not user_supplied_label:
if _label_namer_pos < len(args):
kwargs['label'] = get_label(args[_label_namer_pos], label)
elif label_namer in kwargs:
kwargs['label'] = get_label(kwargs[label_namer], label)
else:
warnings.warn(
"Tried to set a label via parameter %r in func %r but "
"couldn't find such an argument.\n"
"(This is a programming error, please report to "
"the Matplotlib list!)" % (label_namer, func.__name__),
RuntimeWarning, stacklevel=2)
return func(ax, *args, **kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__,
replace_names, replace_all_args)
inner.__signature__ = new_sig
return inner
return param
_log.debug('matplotlib version %s', __version__)
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
|
[] |
[] |
[
"MPLCONFIGDIR",
"XDG_CACHE_HOME",
"MATPLOTLIBRC",
"MPLBACKEND",
"XDG_CONFIG_HOME",
"MATPLOTLIBDATA"
] |
[]
|
["MPLCONFIGDIR", "XDG_CACHE_HOME", "MATPLOTLIBRC", "MPLBACKEND", "XDG_CONFIG_HOME", "MATPLOTLIBDATA"]
|
python
| 6 | 0 | |
soracom/generated/cmd/devices_get_object_model.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// DevicesGetObjectModelCmdModelId holds value of 'model_id' option
var DevicesGetObjectModelCmdModelId string
func init() {
DevicesGetObjectModelCmd.Flags().StringVar(&DevicesGetObjectModelCmdModelId, "model-id", "", TRAPI("Device object model ID"))
DevicesCmd.AddCommand(DevicesGetObjectModelCmd)
}
// DevicesGetObjectModelCmd defines 'get-object-model' subcommand
var DevicesGetObjectModelCmd = &cobra.Command{
Use: "get-object-model",
Short: TRAPI("/device_object_models/{model_id}:get:summary"),
Long: TRAPI(`/device_object_models/{model_id}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectDevicesGetObjectModelCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectDevicesGetObjectModelCmdParams(ac *apiClient) (*apiParams, error) {
if DevicesGetObjectModelCmdModelId == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "model-id")
}
return &apiParams{
method: "GET",
path: buildPathForDevicesGetObjectModelCmd("/device_object_models/{model_id}"),
query: buildQueryForDevicesGetObjectModelCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForDevicesGetObjectModelCmd(path string) string {
escapedModelId := url.PathEscape(DevicesGetObjectModelCmdModelId)
path = strReplace(path, "{"+"model_id"+"}", escapedModelId, -1)
return path
}
func buildQueryForDevicesGetObjectModelCmd() url.Values {
result := url.Values{}
return result
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
pygeoapi/flask_app.py
|
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Norman Barker <[email protected]>
#
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
""" Flask module providing the route paths to the api"""
import os
import click
from flask import Flask, Blueprint, make_response, request, send_from_directory
from pygeoapi.api import API
from pygeoapi.util import get_mimetype, yaml_load
CONFIG = None
if 'PYGEOAPI_CONFIG' not in os.environ:
raise RuntimeError('PYGEOAPI_CONFIG environment variable not set')
with open(os.environ.get('PYGEOAPI_CONFIG'), encoding='utf8') as fh:
CONFIG = yaml_load(fh)
STATIC_FOLDER = 'static'
if 'templates' in CONFIG['server']:
STATIC_FOLDER = CONFIG['server']['templates'].get('static', 'static')
APP = Flask(__name__, static_folder=STATIC_FOLDER, static_url_path='/static')
APP.url_map.strict_slashes = False
BLUEPRINT = Blueprint('pygeoapi', __name__, static_folder=STATIC_FOLDER)
# CORS: optionally enable from config.
if CONFIG['server'].get('cors', False):
from flask_cors import CORS
CORS(APP)
APP.config['JSONIFY_PRETTYPRINT_REGULAR'] = CONFIG['server'].get(
'pretty_print', True)
api_ = API(CONFIG)
OGC_SCHEMAS_LOCATION = CONFIG['server'].get('ogc_schemas_location', None)
if (OGC_SCHEMAS_LOCATION is not None and
not OGC_SCHEMAS_LOCATION.startswith('http')):
# serve the OGC schemas locally
if not os.path.exists(OGC_SCHEMAS_LOCATION):
raise RuntimeError('OGC schemas misconfigured')
@BLUEPRINT.route('/schemas/<path:path>', methods=['GET'])
def schemas(path):
"""
Serve OGC schemas locally
:param path: path of the OGC schema document
:returns: HTTP response
"""
full_filepath = os.path.join(OGC_SCHEMAS_LOCATION, path)
dirname_ = os.path.dirname(full_filepath)
basename_ = os.path.basename(full_filepath)
# TODO: better sanitization?
path_ = dirname_.replace('..', '').replace('//', '')
return send_from_directory(path_, basename_,
mimetype=get_mimetype(basename_))
def get_response(result: tuple):
"""
Creates a Flask Response object and updates matching headers.
:param result: The result of the API call.
This should be a tuple of (headers, status, content).
:returns: A Response instance.
"""
headers, status, content = result
response = make_response(content, status)
if headers:
response.headers = headers
return response
@BLUEPRINT.route('/')
def landing_page():
"""
OGC API landing page endpoint
:returns: HTTP response
"""
return get_response(api_.landing_page(request))
@BLUEPRINT.route('/openapi')
def openapi():
"""
OpenAPI endpoint
:returns: HTTP response
"""
with open(os.environ.get('PYGEOAPI_OPENAPI'), encoding='utf8') as ff:
if os.environ.get('PYGEOAPI_OPENAPI').endswith(('.yaml', '.yml')):
openapi_ = yaml_load(ff)
else: # JSON string, do not transform
openapi_ = ff.read()
return get_response(api_.openapi(request, openapi_))
@BLUEPRINT.route('/conformance')
def conformance():
"""
OGC API conformance endpoint
:returns: HTTP response
"""
return get_response(api_.conformance(request))
@BLUEPRINT.route('/collections')
@BLUEPRINT.route('/collections/<collection_id>')
def collections(collection_id=None):
"""
OGC API collections endpoint
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.describe_collections(request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/queryables')
def collection_queryables(collection_id=None):
"""
OGC API collections querybles endpoint
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.get_collection_queryables(request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/items', methods=['GET', 'POST'])
@BLUEPRINT.route('/collections/<collection_id>/items/<item_id>')
def collection_items(collection_id, item_id=None):
"""
OGC API collections items endpoint
:param collection_id: collection identifier
:param item_id: item identifier
:returns: HTTP response
"""
if item_id is None:
if request.method == 'GET': # list items
return get_response(
api_.get_collection_items(request, collection_id))
elif request.method == 'POST': # filter items
return get_response(
api_.post_collection_items(request, collection_id))
else:
return get_response(
api_.get_collection_item(request, collection_id, item_id))
@BLUEPRINT.route('/collections/<collection_id>/coverage')
def collection_coverage(collection_id):
"""
OGC API - Coverages coverage endpoint
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.get_collection_coverage(request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/coverage/domainset')
def collection_coverage_domainset(collection_id):
"""
OGC API - Coverages coverage domainset endpoint
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.get_collection_coverage_domainset(
request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/coverage/rangetype')
def collection_coverage_rangetype(collection_id):
"""
OGC API - Coverages coverage rangetype endpoint
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.get_collection_coverage_rangetype(
request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/tiles')
def get_collection_tiles(collection_id=None):
"""
OGC open api collections tiles access point
:param collection_id: collection identifier
:returns: HTTP response
"""
return get_response(api_.get_collection_tiles(
request, collection_id))
@BLUEPRINT.route('/collections/<collection_id>/tiles/<tileMatrixSetId>/metadata') # noqa
def get_collection_tiles_metadata(collection_id=None, tileMatrixSetId=None):
"""
OGC open api collection tiles service metadata
:param collection_id: collection identifier
:param tileMatrixSetId: identifier of tile matrix set
:returns: HTTP response
"""
return get_response(api_.get_collection_tiles_metadata(
request, collection_id, tileMatrixSetId))
@BLUEPRINT.route('/collections/<collection_id>/tiles/\
<tileMatrixSetId>/<tileMatrix>/<tileRow>/<tileCol>')
def get_collection_tiles_data(collection_id=None, tileMatrixSetId=None,
tileMatrix=None, tileRow=None, tileCol=None):
"""
OGC open api collection tiles service data
:param collection_id: collection identifier
:param tileMatrixSetId: identifier of tile matrix set
:param tileMatrix: identifier of {z} matrix index
:param tileRow: identifier of {y} matrix index
:param tileCol: identifier of {x} matrix index
:returns: HTTP response
"""
return get_response(api_.get_collection_tiles_data(
request, collection_id, tileMatrixSetId, tileMatrix, tileRow, tileCol))
@BLUEPRINT.route('/processes')
@BLUEPRINT.route('/processes/<process_id>')
def get_processes(process_id=None):
"""
OGC API - Processes description endpoint
:param process_id: process identifier
:returns: HTTP response
"""
return get_response(api_.describe_processes(request, process_id))
@BLUEPRINT.route('/jobs')
@BLUEPRINT.route('/jobs/<job_id>',
methods=['GET', 'DELETE'])
def get_jobs(job_id=None):
"""
OGC API - Processes jobs endpoint
:param job_id: job identifier
:returns: HTTP response
"""
if job_id is None:
return get_response(api_.get_jobs(request))
else:
if request.method == 'DELETE': # dismiss job
return get_response(api_.delete_job(job_id))
else: # Return status of a specific job
return get_response(api_.get_jobs(request, job_id))
@BLUEPRINT.route('/processes/<process_id>/execution', methods=['POST'])
def execute_process_jobs(process_id):
"""
OGC API - Processes execution endpoint
:param process_id: process identifier
:returns: HTTP response
"""
return get_response(api_.execute_process(request, process_id))
@BLUEPRINT.route('/jobs/<job_id>/results',
methods=['GET'])
def get_job_result(job_id=None):
"""
OGC API - Processes job result endpoint
:param job_id: job identifier
:returns: HTTP response
"""
return get_response(api_.get_job_result(request, job_id))
@BLUEPRINT.route('/jobs/<job_id>/results/<resource>',
methods=['GET'])
def get_job_result_resource(job_id, resource):
"""
OGC API - Processes job result resource endpoint
:param job_id: job identifier
:param resource: job resource
:returns: HTTP response
"""
return get_response(api_.get_job_result_resource(
request, job_id, resource))
@BLUEPRINT.route('/collections/<collection_id>/position')
@BLUEPRINT.route('/collections/<collection_id>/area')
@BLUEPRINT.route('/collections/<collection_id>/cube')
@BLUEPRINT.route('/collections/<collection_id>/trajectory')
@BLUEPRINT.route('/collections/<collection_id>/corridor')
@BLUEPRINT.route('/collections/<collection_id>/instances/<instance_id>/position') # noqa
@BLUEPRINT.route('/collections/<collection_id>/instances/<instance_id>/area')
@BLUEPRINT.route('/collections/<collection_id>/instances/<instance_id>/cube')
@BLUEPRINT.route('/collections/<collection_id>/instances/<instance_id>/trajectory') # noqa
@BLUEPRINT.route('/collections/<collection_id>/instances/<instance_id>/corridor') # noqa
def get_collection_edr_query(collection_id, instance_id=None):
"""
OGC EDR API endpoints
:param collection_id: collection identifier
:param instance_id: instance identifier
:returns: HTTP response
"""
query_type = request.path.split('/')[-1]
return get_response(api_.get_collection_edr_query(request, collection_id,
instance_id, query_type))
@BLUEPRINT.route('/stac')
def stac_catalog_root():
"""
STAC root endpoint
:returns: HTTP response
"""
return get_response(api_.get_stac_root(request))
@BLUEPRINT.route('/stac/<path:path>')
def stac_catalog_path(path):
"""
STAC path endpoint
:param path: path
:returns: HTTP response
"""
return get_response(api_.get_stac_path(request, path))
APP.register_blueprint(BLUEPRINT)
@click.command()
@click.pass_context
@click.option('--debug', '-d', default=False, is_flag=True, help='debug')
def serve(ctx, server=None, debug=False):
"""
Serve pygeoapi via Flask. Runs pygeoapi
as a flask server. Not recommend for production.
:param server: `string` of server type
:param debug: `bool` of whether to run in debug mode
:returns: void
"""
# setup_logger(CONFIG['logging'])
APP.run(debug=True, host=api_.config['server']['bind']['host'],
port=api_.config['server']['bind']['port'])
if __name__ == '__main__': # run locally, for testing
serve()
|
[] |
[] |
[
"PYGEOAPI_OPENAPI",
"PYGEOAPI_CONFIG"
] |
[]
|
["PYGEOAPI_OPENAPI", "PYGEOAPI_CONFIG"]
|
python
| 2 | 0 | |
Sender.py
|
import threading
import pyminizip
import os
import datetime
import smtplib
import ssl
import Main
import wx
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
# for i18n support
_ = wx.GetTranslation
class SampleFiles:
"""
Class represent sample Files for compressing
"""
file_list: list = None # All file paths
password: str = None # password for compressing
output_path: str = None # output path
def __init__(self, file_list: str, password: str):
"""
Initialize compressing
:param file_list: file paths
:param password: compress password
"""
self.file_list = file_list.split('\n')
self.password = password
def compress(self):
"""
Compress files to user dir
:return: output path
"""
cur_time = str(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H-%M-%S'))
user_path = os.getenv('APPDATA') + "\\VirusSampleSubmitter"
output_path = '{user_path}\\SamplePack[{time}].zip'.format(user_path=user_path, time=cur_time)
self.output_path = output_path
pyminizip.compress_multiple(self.file_list, [], output_path, self.password, 8)
return output_path
def delete_zip(self):
"""Delete self after compressing"""
if self.output_path is not None:
os.remove(self.output_path)
def compress_to_desktop(self):
"""
Compress files to user desktop
:return: output path
"""
cur_time = str(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H-%M-%S'))
output_path = '{desktop_path}\\SamplePack[{time}].zip'.format(desktop_path=os.path.expanduser("~\\Desktop"),
time=cur_time)
self.output_path = output_path
pyminizip.compress_multiple(self.file_list, [], output_path, self.password, 8)
return output_path
class Mail:
"""
Class represent email body
"""
mail: MIMEMultipart = None # mail body
mail_dst_list: list = None # vendor list
def __init__(self, mail_src, mail_dst, mail_type, mail_content, attach_path):
"""
Initialize mail obj
:param mail_src: sender
:param mail_dst: receivers
:param mail_type: false negative or positive
:param mail_content: content
:param attach_path: sample pack path
"""
mail = MIMEMultipart()
mail['From'] = mail_src
mail['To'] = ';'.join(self._extract_mails(mail_dst))
mail['Subject'] = self._get_mail_title(mail_type)
mail.attach(MIMEText(mail_content, 'plain', 'utf-8'))
with open(attach_path, 'rb') as f:
attachment = MIMEApplication(f.read())
attachment.add_header('Content-Disposition', 'attachment',
filename=str(attach_path)[str(attach_path).rfind("\\") + 1:len(attach_path)])
mail.attach(attachment)
self.mail = mail
self.mail_dst_list = self._extract_mails(mail_dst)
return
def _extract_mails(self, mails_string_array):
"""
Extract email address from vendor list
:param mails_string_array:
:return:
"""
result_list: list = []
for mail_string in mails_string_array:
address = mail_string[mail_string.find(";") + 1: len(mail_string)]
result_list.append(address)
return result_list
def _get_mail_title(self, mail_type):
"""
Get the title of mail based on type
:param mail_type: fn or fp
:return:
"""
cur_time = str(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H-%M-%S'))
if mail_type == 0:
return '[Malware]{time}'.format(time=cur_time)
else:
return '[False Positive]{time}'.format(time=cur_time)
def get_mail(self):
return self.mail
def get_mail_dst_list(self):
return self.mail_dst_list
class SendingThread(threading.Thread):
"""
Class for sending sample file
"""
frame: Main.AppMainFrame = None # main frame
def __init__(self, frame):
"""
Start thread
:param frame: main frame
"""
threading.Thread.__init__(self)
self.frame = frame
self.start()
def run(self):
"""Sending sample"""
status = self.frame.progress_bar
if not self._basic_check(): # check if parameters are valid
wx.MessageBox(message=_('Missing or incorrect parameters. Check if all info filled correctly.'),
caption=_('ERROR'),
style=wx.OK | wx.ICON_ERROR)
status.Destroy()
return
# Compressing files
status.Update(value=25,
newmsg=_('Compressing Files...'))
sample = SampleFiles(self.frame.file_input.GetValue(), self.frame.zip_password)
try:
output_path = sample.compress()
except Exception as e:
wx.MessageBox(message=_('Cannot Access Sample File(s). Check paths.\n') +
'Error: {error}\nInfo: {info}'.format(error=e.__class__.__name__, info=str(e)),
caption=_('ERROR'),
style=wx.OK | wx.ICON_ERROR)
sample.delete_zip()
status.Destroy()
return
# Build email body
status.Update(value=50,
newmsg=_('Composing Mail...'))
mail = Mail(mail_src=self.frame.email_account.GetValue(),
mail_dst=self.frame.selected_vendors.GetStrings(),
mail_type=self._get_mail_type(),
mail_content=self._get_mail_content(),
attach_path=output_path)
# Login to email account and send email
status.Update(value=75,
newmsg=_('Login To Your Email...'))
mail_body = mail.get_mail()
mail_src = self.frame.email_account.GetValue()
mail_password = self.frame.password_input.GetValue()
mail_smtp = self.frame.smtp_input.GetValue()
mail_port = self.frame.port_input.GetValue()
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLS) # use ssl
mail_main = smtplib.SMTP(host=mail_smtp, port=mail_port)
mail_main.ehlo()
mail_main.starttls(context=context)
mail_main.ehlo()
mail_main.login(user=mail_src, password=mail_password)
status.Update(value=90,
newmsg=_('Sending Email...'))
mail_main.sendmail(from_addr=mail_src, to_addrs=mail.get_mail_dst_list(), msg=mail_body.as_string())
except Exception as e:
wx.MessageBox(message=_('Login Fail. Check Internet connection, your login info, or other config.\n') +
'Error: {error}\nInfo: {info}'.format(error=e.__class__.__name__, info=str(e)),
caption=_('ERROR'),
style=wx.OK | wx.ICON_ERROR)
status.Destroy()
sample.delete_zip()
return
status.Update(value=100,
newmsg=_('SUCCEED!'))
wx.MessageBox(message=_('Email Sent. You may login your email to check the status.'),
caption=_('INFO'),
style=wx.OK | wx.ICON_INFORMATION)
status.Destroy()
self.frame.file_input.SetValue(_(u"#Drag all file(s) here. One line per file."))
sample.delete_zip()
return
def _get_mail_type(self):
"""Get mail type; 0 for fn, 1 for fp"""
if self.frame.false_neg_select.GetValue():
return 0
if self.frame.false_positive_select.GetValue():
return 1
def _get_mail_content(self):
"""Get mail content based on type"""
if self._get_mail_type() == 0:
return self.frame.false_negative_content.format(password=self.frame.zip_password)
if self._get_mail_type() == 1:
return self.frame.false_positive_content.format(password=self.frame.zip_password)
def _basic_check(self):
"""Check if parameters are valid"""
f = self.frame
if (f.email_account.GetValue() == '' or
f.password_input.GetValue() == '' or
f.smtp_input.GetValue() == '' or
f.port_input.GetValue() == '' or
f.zip_password == '' or
f.selected_vendors.GetStrings() == []):
return False
return True
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
src/main/java/com/amazonaws/config/BetModule.java
|
package com.amazonaws.config;
import com.amazonaws.dao.BetDao;
import com.fasterxml.jackson.databind.ObjectMapper;
import dagger.Module;
import dagger.Provides;
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
import software.amazon.awssdk.services.dynamodb.DynamoDbClientBuilder;
import java.net.URI;
import java.util.Optional;
import javax.inject.Named;
import javax.inject.Singleton;
@Module
public class BetModule {
@Singleton
@Provides
@Named("tableName")
String tableName() {
return Optional.ofNullable(System.getenv("TABLE_NAME")).orElse("bet");
}
@Singleton
@Provides
DynamoDbClient dynamoDb() {
final String endpoint = System.getenv("ENDPOINT_OVERRIDE");
DynamoDbClientBuilder builder = DynamoDbClient.builder();
builder.httpClient(ApacheHttpClient.builder().build());
if (endpoint != null && !endpoint.isEmpty()) {
builder.endpointOverride(URI.create(endpoint));
}
return builder.build();
}
@Singleton
@Provides
ObjectMapper objectMapper() {
return new ObjectMapper();
}
@Singleton
@Provides
public BetDao betDao(DynamoDbClient dynamoDb, @Named("tableName") String tableName) {
return new BetDao(dynamoDb, tableName,10);
}
}
|
[
"\"TABLE_NAME\"",
"\"ENDPOINT_OVERRIDE\""
] |
[] |
[
"TABLE_NAME",
"ENDPOINT_OVERRIDE"
] |
[]
|
["TABLE_NAME", "ENDPOINT_OVERRIDE"]
|
java
| 2 | 0 | |
fhir/resources/STU3/tests/test_procedure.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Procedure
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import procedure
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ProcedureTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("Procedure", js["resourceType"])
return procedure.Procedure(js)
def testProcedure1(self):
inst = self.instantiate_from("procedure-example-f201-tpf.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure1(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure1(inst2)
def implProcedure1(self, inst):
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].code), force_bytes("272676008")
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].display),
force_bytes("Sphenoid bone"),
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("367336001")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display), force_bytes("Chemotherapy")
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("f201"))
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes(
"Eerste neo-adjuvante TPF-kuur bij groot proces in sphenoid met intracraniale uitbreiding."
),
)
self.assertEqual(
inst.performedPeriod.end.date, FHIRDate("2013-01-28T14:27:00+01:00").date
)
self.assertEqual(
inst.performedPeriod.end.as_json(), "2013-01-28T14:27:00+01:00"
)
self.assertEqual(
inst.performedPeriod.start.date, FHIRDate("2013-01-28T13:31:00+01:00").date
)
self.assertEqual(
inst.performedPeriod.start.as_json(), "2013-01-28T13:31:00+01:00"
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].code), force_bytes("310512001")
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].display),
force_bytes("Medical oncologist"),
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.reasonCode[0].text), force_bytes("DiagnosticReport/f201")
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure2(self):
inst = self.instantiate_from("procedure-example-ambulation.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure2(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure2(inst2)
def implProcedure2(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("62013009"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Ambulating patient (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Ambulation"))
self.assertEqual(force_bytes(inst.id), force_bytes("ambulation"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("12345"))
self.assertTrue(inst.notDone)
self.assertEqual(
force_bytes(inst.notDoneReason.coding[0].code), force_bytes("398254007")
)
self.assertEqual(
force_bytes(inst.notDoneReason.coding[0].display),
force_bytes(" Pre-eclampsia (disorder)"),
)
self.assertEqual(
force_bytes(inst.notDoneReason.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.notDoneReason.text), force_bytes("Pre-eclampsia")
)
self.assertEqual(force_bytes(inst.status), force_bytes("suspended"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Ambulation procedure was not done</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure3(self):
inst = self.instantiate_from("procedure-example-implant.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure3(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure3(inst2)
def implProcedure3(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("25267002"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Insertion of intracardiac pacemaker (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Implant Pacemaker"))
self.assertEqual(
force_bytes(inst.focalDevice[0].action.coding[0].code),
force_bytes("implanted"),
)
self.assertEqual(
force_bytes(inst.focalDevice[0].action.coding[0].system),
force_bytes("http://hl7.org/fhir/device-action"),
)
self.assertEqual(
force_bytes(inst.followUp[0].text), force_bytes("ROS 5 days - 2013-04-10")
)
self.assertEqual(force_bytes(inst.id), force_bytes("example-implant"))
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes(
"Routine Appendectomy. Appendix was inflamed and in retro-caecal position"
),
)
self.assertEqual(inst.performedDateTime.date, FHIRDate("2015-04-05").date)
self.assertEqual(inst.performedDateTime.as_json(), "2015-04-05")
self.assertEqual(
force_bytes(inst.reasonCode[0].text), force_bytes("Bradycardia")
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure4(self):
inst = self.instantiate_from("procedure-example-colon-biopsy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure4(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure4(inst2)
def implProcedure4(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("76164006"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Biopsy of colon (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Biopsy of colon"))
self.assertEqual(force_bytes(inst.id), force_bytes("colon-biopsy"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("12345"))
self.assertFalse(inst.notDone)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Biopsy of colon, which was part of colonoscopy</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure5(self):
inst = self.instantiate_from("procedure-example-f004-tracheotomy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure5(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure5(inst2)
def implProcedure5(self, inst):
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].code), force_bytes("83030008")
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].display),
force_bytes("Retropharyngeal area"),
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("48387007"))
self.assertEqual(
force_bytes(inst.code.coding[0].display), force_bytes("Tracheotomy")
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.followUp[0].text), force_bytes("described in care plan")
)
self.assertEqual(force_bytes(inst.id), force_bytes("f004"))
self.assertEqual(
force_bytes(inst.outcome.text),
force_bytes("removal of the retropharyngeal abscess"),
)
self.assertEqual(
inst.performedPeriod.end.date, FHIRDate("2013-03-22T10:30:10+01:00").date
)
self.assertEqual(
inst.performedPeriod.end.as_json(), "2013-03-22T10:30:10+01:00"
)
self.assertEqual(
inst.performedPeriod.start.date, FHIRDate("2013-03-22T09:30:10+01:00").date
)
self.assertEqual(
inst.performedPeriod.start.as_json(), "2013-03-22T09:30:10+01:00"
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].code), force_bytes("01.000")
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].display), force_bytes("Arts")
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].system),
force_bytes("urn:oid:2.16.840.1.113883.2.4.15.111"),
)
self.assertEqual(
force_bytes(inst.performer[0].role.text), force_bytes("Care role")
)
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("ensure breathing during surgery"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure6(self):
inst = self.instantiate_from("procedure-example-education.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure6(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure6(inst2)
def implProcedure6(self, inst):
self.assertEqual(
force_bytes(inst.category.coding[0].code), force_bytes("311401005")
)
self.assertEqual(
force_bytes(inst.category.coding[0].display),
force_bytes("Patient education (procedure)"),
)
self.assertEqual(
force_bytes(inst.category.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.category.text), force_bytes("Education"))
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("48023004"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Breast self-examination technique education (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.text),
force_bytes("Health education - breast examination"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("education"))
self.assertEqual(inst.performedDateTime.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.performedDateTime.as_json(), "2014-08-16")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("early detection of breast mass"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Health education - breast examination for early detection of breast mass</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure7(self):
inst = self.instantiate_from("procedure-example-colonoscopy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure7(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure7(inst2)
def implProcedure7(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("73761001"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Colonoscopy (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Colonoscopy"))
self.assertEqual(force_bytes(inst.id), force_bytes("colonoscopy"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("12345"))
self.assertFalse(inst.notDone)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Colonoscopy with complication</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure8(self):
inst = self.instantiate_from("procedure-example-physical-therapy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure8(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure8(inst2)
def implProcedure8(self, inst):
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].code), force_bytes("36701003")
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].display),
force_bytes("Both knees (body structure)"),
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.bodySite[0].text), force_bytes("Both knees"))
self.assertEqual(
force_bytes(inst.category.coding[0].code), force_bytes("386053000")
)
self.assertEqual(
force_bytes(inst.category.coding[0].display),
force_bytes("Evaluation procedure (procedure)"),
)
self.assertEqual(
force_bytes(inst.category.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.category.text), force_bytes("Evaluation"))
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("710830005")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Assessment of passive range of motion (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.text),
force_bytes("Assessment of passive range of motion"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("physical-therapy"))
self.assertEqual(inst.performedDateTime.date, FHIRDate("2016-09-27").date)
self.assertEqual(inst.performedDateTime.as_json(), "2016-09-27")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("assessment of mobility limitations due to osteoarthritis"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Assessment of passive range of motion for both knees on Sept 27, 2016 due to osteoarthritis</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure9(self):
inst = self.instantiate_from("procedure-example-f003-abscess.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure9(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure9(inst2)
def implProcedure9(self, inst):
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].code), force_bytes("83030008")
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].display),
force_bytes("Retropharyngeal area"),
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("172960003")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Incision of retropharyngeal abscess"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.followUp[0].text), force_bytes("described in care plan")
)
self.assertEqual(force_bytes(inst.id), force_bytes("f003"))
self.assertEqual(
force_bytes(inst.outcome.text),
force_bytes("removal of the retropharyngeal abscess"),
)
self.assertEqual(
inst.performedPeriod.end.date, FHIRDate("2013-03-24T10:30:10+01:00").date
)
self.assertEqual(
inst.performedPeriod.end.as_json(), "2013-03-24T10:30:10+01:00"
)
self.assertEqual(
inst.performedPeriod.start.date, FHIRDate("2013-03-24T09:30:10+01:00").date
)
self.assertEqual(
inst.performedPeriod.start.as_json(), "2013-03-24T09:30:10+01:00"
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].code), force_bytes("01.000")
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].display), force_bytes("Arts")
)
self.assertEqual(
force_bytes(inst.performer[0].role.coding[0].system),
force_bytes("urn:oid:2.16.840.1.113883.2.4.15.111"),
)
self.assertEqual(
force_bytes(inst.performer[0].role.text), force_bytes("Care role")
)
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("abcess in retropharyngeal area"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testProcedure10(self):
inst = self.instantiate_from("procedure-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure10(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure10(inst2)
def implProcedure10(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("80146002"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Appendectomy (Procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Appendectomy"))
self.assertEqual(
force_bytes(inst.followUp[0].text), force_bytes("ROS 5 days - 2013-04-10")
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes(
"Routine Appendectomy. Appendix was inflamed and in retro-caecal position"
),
)
self.assertEqual(inst.performedDateTime.date, FHIRDate("2013-04-05").date)
self.assertEqual(inst.performedDateTime.as_json(), "2013-04-05")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes(
"Generalized abdominal pain 24 hours. Localized in RIF with rebound and guarding"
),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Routine Appendectomy</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
|
[] |
[] |
[
"FHIR_UNITTEST_DATADIR"
] |
[]
|
["FHIR_UNITTEST_DATADIR"]
|
python
| 1 | 0 | |
server/opts.go
|
// Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nats-server/v2/conf"
)
var allowUnknownTopLevelField = int32(0)
// NoErrOnUnknownFields can be used to change the behavior the processing
// of a configuration file. By default, an error is reported if unknown
// fields are found. If `noError` is set to true, no error will be reported
// if top-level unknown fields are found.
func NoErrOnUnknownFields(noError bool) {
var val int32
if noError {
val = int32(1)
}
atomic.StoreInt32(&allowUnknownTopLevelField, val)
}
// ClusterOpts are options for clusters.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type ClusterOpts struct {
Name string `json:"-"`
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
}
// GatewayOpts are options for gateways.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"`
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
}
// RemoteGatewayOpts are options for connecting to a remote gateway
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
}
// LeafNodeOpts are options for a given server to accept leaf node connections and/or connect to a remote cluster.
type LeafNodeOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
Account string `json:"-"`
Users []*User `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ReconnectInterval time.Duration `json:"-"`
// For solicited connections to other clusters/superclusters.
Remotes []*RemoteLeafOpts `json:"remotes,omitempty"`
// Not exported, for tests.
resolver netResolver
dialTimeout time.Duration
connDelay time.Duration
}
// RemoteLeafOpts are options for connecting to a remote server as a leaf node.
type RemoteLeafOpts struct {
LocalAccount string `json:"local_account,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
Credentials string `json:"-"`
TLS bool `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
Hub bool `json:"hub,omitempty"`
DenyImports []string `json:"-"`
DenyExports []string `json:"-"`
}
// Options block for nats-server.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type Options struct {
ConfigFile string `json:"-"`
ServerName string `json:"server_name"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
TraceVerbose bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
NoSublistCache bool `json:"-"`
NoHeaderSupport bool `json:"-"`
DisableShortFirstPing bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
NoAuthUser string `json:"-"`
SystemAccount string `json:"-"`
NoSystemAccount bool `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPBasePath string `json:"http_base_path"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int32 `json:"max_control_line"`
MaxPayload int32 `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
LeafNode LeafNodeOpts `json:"leaf,omitempty"`
JetStream bool `json:"jetstream"`
JetStreamMaxMemory int64 `json:"-"`
JetStreamMaxStore int64 `json:"-"`
StoreDir string `json:"-"`
Websocket WebsocketOpts `json:"-"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
LogSizeLimit int64 `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSMap bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
AllowNonTLS bool `json:"-"`
WriteDeadline time.Duration `json:"-"`
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
LameDuckGracePeriod time.Duration `json:"-"`
// MaxTracedMsgLen is the maximum printable length for traced messages.
MaxTracedMsgLen int `json:"-"`
// Operating a trusted NATS server
TrustedKeys []string `json:"-"`
TrustedOperators []*jwt.OperatorClaims `json:"-"`
AccountResolver AccountResolver `json:"-"`
AccountResolverTLSConfig *tls.Config `json:"-"`
resolverPreloads map[string]string
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// ConnectErrorReports specifies the number of failed attempts
// at which point server should report the failure of an initial
// connection to a route, gateway or leaf node.
// See DEFAULT_CONNECT_ERROR_REPORTS for default value.
ConnectErrorReports int
// ReconnectErrorReports is similar to ConnectErrorReports except
// that this applies to reconnect events.
ReconnectErrorReports int
// private fields, used to know if bool options are explicitly
// defined in config and/or command line params.
inConfig map[string]bool
inCmdLine map[string]bool
// private fields, used for testing
gatewaysSolicitDelay time.Duration
routeProto int
}
// WebsocketOpts ...
type WebsocketOpts struct {
// The server will accept websocket client connections on this hostname/IP.
Host string
// The server will accept websocket client connections on this port.
Port int
// The host:port to advertise to websocket clients in the cluster.
Advertise string
// If no user is provided when a client connects, will default to this
// user and associated account. This user has to exist either in the
// Users defined here or in the global options.
NoAuthUser string
// Name of the cookie, which if present in WebSocket upgrade headers,
// will be treated as JWT during CONNECT phase as long as
// "jwt" specified in the CONNECT options is missing or empty.
JWTCookie string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration for regular clients.
Username string
Password string
Token string
Users []*User
Nkeys []*NkeyUser
// Timeout for the authentication process.
AuthTimeout float64
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// If true, the Origin header must match the request's host.
SameOrigin bool
// Only origins in this list will be accepted. If empty and
// SameOrigin is false, any origin is accepted.
AllowedOrigins []string
// If set to true, the server will negotiate with clients
// if compression can be used. If this is false, no compression
// will be used (both in server and clients) since it has to
// be negotiated between both endpoints
Compression bool
// Total time allowed for the server to read the client request
// and write the response back to the client. This include the
// time needed for the TLS Handshake.
HandshakeTimeout time.Duration
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
// FIXME(dlc) - clone leaf node stuff.
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
acc string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Insecure bool
Map bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
verify_and_map: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
// also stores the token in lastToken for use in convertPanicToError
func unwrapValue(v interface{}, lastToken *token) (token, interface{}) {
switch tk := v.(type) {
case token:
if lastToken != nil {
*lastToken = tk
}
return tk, tk.Value()
default:
return nil, v
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToErrorList(lastToken *token, errors *[]error) {
// only recover if an error can be stored
if errors == nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*errors = append(*errors, &configErr{*lastToken, fmt.Sprint(err)})
} else {
*errors = append(*errors, fmt.Errorf("encountered panic without a token %v", err))
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToError(lastToken *token, e *error) {
// only recover if an error can be stored
if e == nil || *e != nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*e = &configErr{*lastToken, fmt.Sprint(err)}
} else {
*e = fmt.Errorf("%v", err)
}
}
// configureSystemAccount configures a system account
// if present in the configuration.
func configureSystemAccount(o *Options, m map[string]interface{}) (retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
configure := func(v interface{}) error {
tk, v := unwrapValue(v, <)
sa, ok := v.(string)
if !ok {
return &configErr{tk, "system account name must be a string"}
}
o.SystemAccount = sa
return nil
}
if v, ok := m["system_account"]; ok {
return configure(v)
} else if v, ok := m["system"]; ok {
return configure(v)
}
return nil
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
// First check whether a system account has been defined,
// as that is a condition for other features to be enabled.
if err := configureSystemAccount(o, m); err != nil {
errors = append(errors, err)
}
for k, v := range m {
o.processConfigFileLine(k, v, &errors, &warnings)
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error, warnings *[]error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "server_name":
o.ServerName = v.(string)
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
trackExplicitVal(o, &o.inConfig, "Debug", o.Debug)
case "trace":
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "trace_verbose":
o.TraceVerbose = v.(bool)
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "TraceVerbose", o.TraceVerbose)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "logtime":
o.Logtime = v.(bool)
trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime)
case "disable_sublist_cache", "no_sublist_cache":
o.NoSublistCache = v.(bool)
case "accounts":
err := parseAccounts(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, "Cannot have a user/pass and token"}
*errors = append(*errors, err)
return
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, "Can not have a single user/pass and a users array"}
*errors = append(*errors, err)
return
}
if auth.token != "" {
err := &configErr{tk, "Can not have a token and a users array"}
*errors = append(*errors, err)
return
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "http_base_path":
o.HTTPBasePath = v.(string)
case "cluster":
err := parseCluster(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "gateway":
if err := parseGateway(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "leaf", "leafnodes":
err := parseLeafNodes(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "jetstream":
err := parseJetStream(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "logfile_size_limit", "log_size_limit":
o.LogSizeLimit = v.(int64)
case "syslog":
o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxControlLine = int32(v.(int64))
case "max_payload":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxPayload = int32(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_traced_msg_len":
o.MaxTracedMsgLen = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = parseDuration("ping_interval", tk, v, errors, warnings)
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
return
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.TLSTimeout = tc.Timeout
o.TLSMap = tc.Map
case "allow_non_tls":
o.AllowNonTLS = v.(bool)
case "write_deadline":
o.WriteDeadline = parseDuration("write_deadline", tk, v, errors, warnings)
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 30*time.Second {
err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
*errors = append(*errors, err)
return
}
o.LameDuckDuration = dur
case "lame_duck_grace_period":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_grace_period: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 0 {
err := &configErr{tk, "invalid lame_duck_grace_period, needs to be positive"}
*errors = append(*errors, err)
return
}
o.LameDuckGracePeriod = dur
case "operator", "operators", "roots", "root", "root_operators", "root_operator":
opFiles := []string{}
switch v := v.(type) {
case string:
opFiles = append(opFiles, v)
case []string:
opFiles = append(opFiles, v...)
default:
err := &configErr{tk, fmt.Sprintf("error parsing operators: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Assume for now these are file names, but they can also be the JWT itself inline.
o.TrustedOperators = make([]*jwt.OperatorClaims, 0, len(opFiles))
for _, fname := range opFiles {
opc, err := ReadOperatorJWT(fname)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing operator JWT: %v", err)}
*errors = append(*errors, err)
continue
}
o.TrustedOperators = append(o.TrustedOperators, opc)
}
if len(o.TrustedOperators) == 1 {
// In case "resolver" is defined as well, it takes precedence
if o.AccountResolver == nil {
if accUrl, err := parseURL(o.TrustedOperators[0].AccountServerURL, "account resolver"); err == nil {
// nsc automatically appends "/accounts" during nsc push
o.AccountResolver, _ = NewURLAccResolver(accUrl.String() + "/accounts")
}
}
// In case "system_account" is defined as well, it takes precedence
if o.SystemAccount == "" {
o.SystemAccount = o.TrustedOperators[0].SystemAccount
}
}
case "resolver", "account_resolver", "accounts_resolver":
// "resolver" takes precedence over value obtained from "operator".
// Clear so that parsing errors are not silently ignored.
o.AccountResolver = nil
var memResolverRe = regexp.MustCompile(`(MEM|MEMORY|mem|memory)\s*`)
var resolverRe = regexp.MustCompile(`(?:URL|url){1}(?:\({1}\s*"?([^\s"]*)"?\s*\){1})?\s*`)
str, ok := v.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("error parsing operator resolver, wrong type %T", v)}
*errors = append(*errors, err)
return
}
if memResolverRe.MatchString(str) {
o.AccountResolver = &MemAccResolver{}
} else {
items := resolverRe.FindStringSubmatch(str)
if len(items) == 2 {
url := items[1]
_, err := parseURL(url, "account resolver")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if ur, err := NewURLAccResolver(url); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
} else {
o.AccountResolver = ur
}
}
}
if o.AccountResolver == nil {
err := &configErr{tk, "error parsing account resolver, should be MEM or URL(\"url\")"}
*errors = append(*errors, err)
}
case "resolver_tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
return
}
if o.AccountResolverTLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
case "resolver_preload":
mp, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "preload should be a map of account_public_key:account_jwt"}
*errors = append(*errors, err)
return
}
o.resolverPreloads = make(map[string]string)
for key, val := range mp {
tk, val = unwrapValue(val, <)
if jwtstr, ok := val.(string); !ok {
err := &configErr{tk, "preload map value should be a string JWT"}
*errors = append(*errors, err)
continue
} else {
// Make sure this is a valid account JWT, that is a config error.
// We will warn of expirations, etc later.
if _, err := jwt.DecodeAccountClaims(jwtstr); err != nil {
err := &configErr{tk, "invalid account JWT"}
*errors = append(*errors, err)
continue
}
o.resolverPreloads[key] = jwtstr
}
}
case "no_auth_user":
o.NoAuthUser = v.(string)
case "system_account", "system":
// Already processed at the beginning so we just skip them
// to not treat them as unknown values.
return
case "no_system_account", "no_system", "no_sys_acc":
o.NoSystemAccount = v.(bool)
case "trusted", "trusted_keys":
switch v := v.(type) {
case string:
o.TrustedKeys = []string{v}
case []string:
o.TrustedKeys = v
case []interface{}:
keys := make([]string, 0, len(v))
for _, mv := range v {
tk, mv = unwrapValue(mv, <)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
*errors = append(*errors, err)
continue
}
}
o.TrustedKeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
*errors = append(*errors, err)
}
}
case "connect_error_reports":
o.ConnectErrorReports = int(v.(int64))
case "reconnect_error_reports":
o.ReconnectErrorReports = int(v.(int64))
case "websocket", "ws":
if err := parseWebsocket(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
default:
if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
func parseDuration(field string, tk token, v interface{}, errors *[]error, warnings *[]error) time.Duration {
if wd, ok := v.(string); ok {
if dur, err := time.ParseDuration(wd); err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing %s: %v", field, err)}
*errors = append(*errors, err)
return 0
} else {
return dur
}
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
err := &configWarningErr{
field: field,
configErr: configErr{
token: tk,
reason: field + " should be converted to a duration",
},
}
*warnings = append(*warnings, err)
return time.Duration(v.(int64)) * time.Second
}
}
func trackExplicitVal(opts *Options, pm *map[string]bool, name string, val bool) {
m := *pm
if m == nil {
m = make(map[string]bool)
*pm = m
}
m[name] = val
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("could not parse port %q", port)
}
hp.host = host
default:
return nil, fmt.Errorf("expected port or host:port, got %T", vv)
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
opts.Cluster.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, "Cluster authorization does not allow multiple users"}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = tlsopts.Timeout
opts.Cluster.TLSMap = tlsopts.Map
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "Cluster.NoAdvertise", opts.Cluster.NoAdvertise)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// Dynamic response permissions do not make sense here.
if perms.Response != nil {
err := &configErr{tk, "Cluster permissions do not support dynamic responses"}
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) (urls []*url.URL, errors []error) {
urls = make([]*url.URL, 0, len(a))
var lt token
defer convertPanicToErrorList(<, &errors)
for _, u := range a {
tk, u := unwrapValue(u, <)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = tlsopts.Timeout
o.Gateway.TLSMap = tlsopts.Map
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
var dynamicJSAccountLimits = &JetStreamAccountLimits{-1, -1, -1, -1}
// Parses jetstream account limits for an account. Simple setup with boolen is allowed, and we will
// use dynamic account limits.
func parseJetStreamForAccount(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
if vv {
acc.jsLimits = dynamicJSAccountLimits
}
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
acc.jsLimits = dynamicJSAccountLimits
case "disabled", "disable":
acc.jsLimits = nil
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
jsLimits := &JetStreamAccountLimits{-1, -1, -1, -1}
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "max_memory", "max_mem", "mem", "memory":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxMemory = int64(vv)
case "max_store", "max_file", "max_disk", "store", "disk":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStore = int64(vv)
case "max_streams", "streams":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStreams = int(vv)
case "max_consumers", "consumers":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxConsumers = int(vv)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
acc.jsLimits = jsLimits
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// Parse enablement of jetstream for a server.
func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
opts.JetStream = v.(bool)
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
opts.JetStream = true
case "disabled", "disable":
opts.JetStream = false
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "store_dir", "storedir":
opts.StoreDir = mv.(string)
case "max_memory_store", "max_mem_store", "max_mem":
opts.JetStreamMaxMemory = mv.(int64)
case "max_file_store", "max_file":
opts.JetStreamMaxStore = mv.(int64)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
opts.JetStream = true
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// parseLeafNodes will parse the leaf node config.
func parseLeafNodes(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define a leafnode, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.Host = hp.host
opts.LeafNode.Port = hp.port
case "port":
opts.LeafNode.Port = int(mv.(int64))
case "host", "net":
opts.LeafNode.Host = mv.(string)
case "authorization":
auth, err := parseLeafAuthorization(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Username = auth.user
opts.LeafNode.Password = auth.pass
opts.LeafNode.AuthTimeout = auth.timeout
opts.LeafNode.Account = auth.acc
opts.LeafNode.Users = auth.users
// Validate user info config for leafnode authorization
if err := validateLeafNodeAuthOptions(opts); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
case "remotes":
// Parse the remote options here.
remotes, err := parseRemoteLeafNodes(mv, errors, warnings)
if err != nil {
continue
}
opts.LeafNode.Remotes = remotes
case "reconnect", "reconnect_delay", "reconnect_interval":
opts.LeafNode.ReconnectInterval = time.Duration(int(mv.(int64))) * time.Second
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if opts.LeafNode.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.TLSTimeout = tc.Timeout
case "leafnode_advertise", "advertise":
opts.LeafNode.Advertise = mv.(string)
case "no_advertise":
opts.LeafNode.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "LeafNode.NoAdvertise", opts.LeafNode.NoAdvertise)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// This is the authorization parser adapter for the leafnode's
// authorization config.
func parseLeafAuthorization(v interface{}, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
users, err := parseLeafUsers(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
case "account":
auth.acc = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth, nil
}
// This is a trimmed down version of parseUsers that is adapted
// for the users possibly defined in the authorization{} section
// of leafnodes {}.
func parseLeafUsers(mv interface{}, errors *[]error, warnings *[]error) ([]*User, error) {
var (
tk token
lt token
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
user := &User{}
for k, v := range um {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "account":
// We really want to save just the account name here, but
// the User object is *Account. So we create an account object
// but it won't be registered anywhere. The server will just
// use opts.LeafNode.Users[].Account.Name. Alternatively
// we need to create internal objects to store u/p and account
// name and have a server structure to hold that.
user.Account = NewAccount(v.(string))
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
users = append(users, user)
}
return users, nil
}
func parseRemoteLeafNodes(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteLeafOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
ra, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected remotes field to be an array, got %T", v)}
}
remotes := make([]*RemoteLeafOpts, 0, len(ra))
for _, r := range ra {
tk, r = unwrapValue(r, <)
// Check its a map/struct
rm, ok := r.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode entry to be a map/struct, got %v", r)})
continue
}
remote := &RemoteLeafOpts{}
for k, v := range rm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "url", "urls":
switch v := v.(type) {
case []interface{}, []string:
urls, errs := parseURLs(v.([]interface{}), "leafnode")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
remote.URLs = urls
case string:
url, err := parseURL(v, "leafnode")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.URLs = append(remote.URLs, url)
}
case "account", "local":
remote.LocalAccount = v.(string)
case "creds", "credentials":
p, err := expandPath(v.(string))
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.Credentials = p
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if remote.TLSConfig, err = GenTLSConfig(tc); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
// If ca_file is defined, GenTLSConfig() sets TLSConfig.ClientCAs.
// Set RootCAs since this tls.Config is used when soliciting
// a connection (therefore behaves as a client).
remote.TLSConfig.RootCAs = remote.TLSConfig.ClientCAs
if tc.Timeout > 0 {
remote.TLSTimeout = tc.Timeout
} else {
remote.TLSTimeout = float64(DEFAULT_LEAF_TLS_TIMEOUT)
}
case "hub":
remote.Hub = v.(bool)
case "deny_imports", "deny_import":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyImports = subjects
case "deny_exports", "deny_export":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyExports = subjects
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
remotes = append(remotes, remote)
}
return remotes, nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, *TLSConfigOpts, error) {
tc, err := parseTLS(tk)
if err != nil {
return nil, nil, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, nil, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g, <)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = tlsopts.Timeout
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
rt ServiceRespType
lat *serviceLatency
rthr time.Duration
}
type importStream struct {
acc *Account
an string
sub string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
share bool
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n, <)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, NewAccount(ns))
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv, <)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
var (
users []*User
nkeyUsr []*NkeyUser
usersTk token
)
acc := NewAccount(aname)
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "jetstream":
err := parseJetStreamForAccount(mv, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "users":
var err error
usersTk = tk
nkeyUsr, users, err = parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "default_permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
acc.defaultPerms = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
applyDefaultPermissions(users, nkeyUsr, acc.defaultPerms)
for _, u := range nkeyUsr {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeyUsr...)
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
}
}
lt = tk
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.AddStreamExport(stream.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.AddServiceExportWithResponse(service.sub, service.rt, accounts); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.rthr != 0 {
// Response threshold was set in options.
if err := service.acc.SetServiceExportResponseThreshold(service.sub, service.rthr); err != nil {
msg := fmt.Sprintf("Error adding service export response threshold for %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
if service.lat != nil {
if opts.SystemAccount == "" {
msg := fmt.Sprintf("Error adding service latency sampling for %q: %v", service.sub, ErrNoSysAccount.Error())
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.TrackServiceExportWithSampling(service.sub, service.lat.subject, int(service.lat.sampling)); err != nil {
msg := fmt.Sprintf("Error adding service latency sampling for %q on subject %q: %v", service.sub, service.lat.subject, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.SetServiceImportSharing(ta, service.sub, service.share); err != nil {
msg := fmt.Sprintf("Error setting service import sharing %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account exports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
svcSubjects := map[string]*importService{}
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
if dup := svcSubjects[service.to]; dup != nil {
tk, _ := unwrapValue(v, <)
err := &configErr{tk,
fmt.Sprintf("Duplicate service import subject %q, previously used in import for account %q, subject %q",
service.to, dup.an, dup.sub)}
*errors = append(*errors, err)
continue
}
svcSubjects[service.to] = service
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an export stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
rt ServiceRespType
rtSeen bool
rtToken token
lat *serviceLatency
threshSeen bool
thresh time.Duration
latToken token
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
if rtToken != nil {
err := &configErr{rtToken, "Detected response directive on non-service"}
*errors = append(*errors, err)
continue
}
if latToken != nil {
err := &configErr{latToken, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
if rtSeen {
curService.rt = rt
}
if lat != nil {
curService.lat = lat
}
if threshSeen {
curService.rthr = thresh
}
case "response", "response_type":
if rtSeen {
err := &configErr{tk, "Duplicate response type definition"}
*errors = append(*errors, err)
continue
}
rtSeen = true
rtToken = tk
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response type to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
switch strings.ToLower(mvs) {
case "single", "singleton":
rt = Singleton
case "stream":
rt = Streamed
case "chunk", "chunked":
rt = Chunked
default:
err := &configErr{tk, fmt.Sprintf("Unknown response type: %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rt = rt
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "threshold", "response_threshold", "response_max_time", "response_time":
if threshSeen {
err := &configErr{tk, "Duplicate response threshold detected"}
*errors = append(*errors, err)
continue
}
threshSeen = true
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %T", mv)}
*errors = append(*errors, err)
continue
}
var err error
thresh, err = time.ParseDuration(mvs)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rthr = thresh
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv, <)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
case "latency":
latToken = tk
var err error
lat, err = parseServiceLatency(tk, mv)
if err != nil {
*errors = append(*errors, err)
continue
}
if curStream != nil {
err = &configErr{tk, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.lat = lat
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// parseServiceLatency returns a latency config block.
func parseServiceLatency(root token, v interface{}) (l *serviceLatency, retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
if subject, ok := v.(string); ok {
return &serviceLatency{
subject: subject,
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}, nil
}
latency, ok := v.(map[string]interface{})
if !ok {
return nil, &configErr{token: root,
reason: fmt.Sprintf("Expected latency entry to be a map/struct or string, got %T", v)}
}
sl := serviceLatency{
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}
// Read sampling value.
if v, ok := latency["sampling"]; ok {
tk, v := unwrapValue(v, <)
var sample int64
switch vv := v.(type) {
case int64:
// Sample is an int, like 50.
sample = vv
case string:
// Sample is a string, like "50%".
s := strings.TrimSuffix(vv, "%")
n, err := strconv.Atoi(s)
if err != nil {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Failed to parse latency sample: %v", err)}
}
sample = int64(n)
default:
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency sample to be a string or map/struct, got %T", v)}
}
if sample < 1 || sample > 100 {
return nil, &configErr{token: tk,
reason: ErrBadSampling.Error()}
}
sl.sampling = int8(sample)
}
// Read subject value.
v, ok = latency["subject"]
if !ok {
return nil, &configErr{token: root,
reason: "Latency subject required, but missing"}
}
tk, v := unwrapValue(v, <)
subject, ok := v.(string)
if !ok {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency subject to be a string, got %T", subject)}
}
sl.subject = subject
return &sl, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, to: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
share bool
lt token
)
defer convertPanicToErrorList(<, errors)
tk, mv := unwrapValue(v, <)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, "Detected stream but already saw a service"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, "Detected service but already saw a stream"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
} else {
curService.to = subject
}
curService.share = share
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
case "share":
share = mv.(bool)
if curService != nil {
curService.share = share
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Apply permission defaults to users/nkeyuser that don't have their own.
func applyDefaultPermissions(users []*User, nkeys []*NkeyUser, defaultP *Permissions) {
if defaultP == nil {
return
}
for _, user := range users {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
for _, user := range nkeys {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
applyDefaultPermissions(auth.users, auth.nkeys, auth.defaultPermissions)
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
lt token
keys []*NkeyUser
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least an nkey or username <password> defined.
if nkey.Nkey == "" && user.Username == "" {
return nil, nil, &configErr{tk, "User entry requires a user"}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, "Not a valid public nkey for a user"}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, "Nkey users do not take usernames or passwords"}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
lt token
p = &Permissions{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, mv = unwrapValue(v, <)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
case "publish_allow_responses", "allow_responses":
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
// Try boolean first
responses, ok := mv.(bool)
if ok {
if responses {
p.Response = rp
}
} else {
p.Response = parseAllowResponses(v, errors, warnings)
}
if p.Response != nil {
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse subject singletons and/or arrays
func parseSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i, <)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, "Subject in permissions array cannot be cast to string"}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse a ResponsePermission.
func parseAllowResponses(v interface{}, errors, warnings *[]error) *ResponsePermission {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Check if this is a map.
pm, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "error parsing response permissions, expected a boolean or a map"}
*errors = append(*errors, err)
return nil
}
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
for k, v := range pm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "max", "max_msgs", "max_messages", "max_responses":
max := int(v.(int64))
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if max != 0 {
rp.MaxMsgs = max
}
case "expires", "expiration", "ttl":
wd, ok := v.(string)
if ok {
ttl, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing expires: %v", err)}
*errors = append(*errors, err)
return nil
}
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if ttl != 0 {
rp.Expires = ttl
}
} else {
err := &configErr{tk, "error parsing expires, not a duration string"}
*errors = append(*errors, err)
return nil
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return rp
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parseSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
var lt token
defer convertPanicToErrorList(<, errors)
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v, <)
switch strings.ToLower(k) {
case "allow":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate subjects, etc for account permissioning.
func checkSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
return fmt.Errorf("subject %q is not a valid subject", s)
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}) (t *TLSConfigOpts, retErr error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
lt token
)
defer convertPanicToError(<, &retErr)
_, v = unwrapValue(v, <)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'cert_file' to be filename"}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'key_file' to be filename"}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'ca_file' to be filename"}
}
tc.CaFile = caFile
case "insecure":
insecure, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'insecure' to be a boolean"}
}
tc.Insecure = insecure
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify' to be a boolean"}
}
tc.Verify = verify
case "verify_and_map":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_and_map' to be a boolean"}
}
tc.Verify = verify
tc.Map = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'cipher_suites' cannot be empty"}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'curve_preferences' cannot be empty"}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
tc.Timeout = at
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
func parseWebsocket(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected websocket to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.Host = hp.host
o.Websocket.Port = hp.port
case "port":
o.Websocket.Port = int(mv.(int64))
case "host", "net":
o.Websocket.Host = mv.(string)
case "advertise":
o.Websocket.Advertise = mv.(string)
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.Websocket.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.TLSMap = tc.Map
case "same_origin":
o.Websocket.SameOrigin = mv.(bool)
case "allowed_origins", "allowed_origin", "allow_origins", "allow_origin", "origins", "origin":
switch mv := mv.(type) {
case string:
o.Websocket.AllowedOrigins = []string{mv}
case []interface{}:
keys := make([]string, 0, len(mv))
for _, val := range mv {
tk, val = unwrapValue(val, <)
if key, ok := val.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing allowed origins: unsupported type in array %T", val)}
*errors = append(*errors, err)
continue
}
}
o.Websocket.AllowedOrigins = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing allowed origins: unsupported type %T", mv)}
*errors = append(*errors, err)
}
case "handshake_timeout":
ht := time.Duration(0)
switch mv := mv.(type) {
case int64:
ht = time.Duration(mv) * time.Second
case string:
var err error
ht, err = time.ParseDuration(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("error parsing handshake timeout: unsupported type %T", mv)}
*errors = append(*errors, err)
}
o.Websocket.HandshakeTimeout = ht
case "compression":
o.Websocket.Compression = mv.(bool)
case "authorization", "authentication":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Websocket.Username = auth.user
o.Websocket.Password = auth.pass
o.Websocket.Token = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, "Cannot have a user/pass and token"}
*errors = append(*errors, err)
continue
}
o.Websocket.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, "Can not have a single user/pass and a users array"}
*errors = append(*errors, err)
continue
}
if auth.token != "" {
err := &configErr{tk, "Can not have a token and a users array"}
*errors = append(*errors, err)
continue
}
// Users may have been added from Accounts parsing, so do an append here
o.Websocket.Users = append(o.Websocket.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
o.Websocket.Nkeys = append(o.Websocket.Nkeys, auth.nkeys...)
}
case "jwt_cookie":
o.Websocket.JWTCookie = mv.(string)
case "no_auth_user":
o.Websocket.NoAuthUser = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Create the tls.Config from our options before including the certs.
// It will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
InsecureSkipVerify: tc.Insecure,
}
switch {
case tc.CertFile != "" && tc.KeyFile == "":
return nil, fmt.Errorf("missing 'key_file' in TLS configuration")
case tc.CertFile == "" && tc.KeyFile != "":
return nil, fmt.Errorf("missing 'cert_file' in TLS configuration")
case tc.CertFile != "" && tc.KeyFile != "":
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
config.Certificates = []tls.Certificate{cert}
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.HTTPBasePath != "" {
opts.HTTPBasePath = flagOpts.HTTPBasePath
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func setBaselineOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
if opts.LeafNode.Port != 0 {
if opts.LeafNode.Host == "" {
opts.LeafNode.Host = DEFAULT_HOST
}
if opts.LeafNode.TLSTimeout == 0 {
opts.LeafNode.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.LeafNode.AuthTimeout == 0 {
opts.LeafNode.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
// Set baseline connect port for remotes.
for _, r := range opts.LeafNode.Remotes {
if r != nil {
for _, u := range r.URLs {
if u.Port() == "" {
u.Host = net.JoinHostPort(u.Host, strconv.Itoa(DEFAULT_LEAFNODE_PORT))
}
}
}
}
// Set this regardless of opts.LeafNode.Port
if opts.LeafNode.ReconnectInterval == 0 {
opts.LeafNode.ReconnectInterval = DEFAULT_LEAF_NODE_RECONNECT
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.LameDuckGracePeriod == 0 {
opts.LameDuckGracePeriod = DEFAULT_LAME_DUCK_GRACE_PERIOD
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
if opts.Gateway.TLSTimeout == 0 {
opts.Gateway.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Gateway.AuthTimeout == 0 {
opts.Gateway.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
if opts.ConnectErrorReports == 0 {
opts.ConnectErrorReports = DEFAULT_CONNECT_ERROR_REPORTS
}
if opts.ReconnectErrorReports == 0 {
opts.ReconnectErrorReports = DEFAULT_RECONNECT_ERROR_REPORTS
}
if opts.Websocket.Port != 0 {
if opts.Websocket.Host == "" {
opts.Websocket.Host = DEFAULT_HOST
}
}
// JetStream
if opts.JetStreamMaxMemory == 0 {
opts.JetStreamMaxMemory = -1
}
if opts.JetStreamMaxStore == 0 {
opts.JetStreamMaxStore = -1
}
}
// ConfigureOptions accepts a flag set and augments it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
dbgAndTrace bool
trcAndVerboseTrc bool
dbgAndTrcAndVerboseTrc bool
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&trcAndVerboseTrc, "VV", false, "Enable Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.BoolVar(&dbgAndTrace, "DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&dbgAndTrcAndVerboseTrc, "DVV", false, "Enable Debug and Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&signal, "signal", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports).")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.Int64Var(&opts.LogSizeLimit, "log_size_limit", 0, "Logfile size limit being auto-rotated")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method.")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port.")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries.")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
fs.IntVar(&opts.MaxTracedMsgLen, "max_traced_msg_len", 0, "Maximum printable length for traced messages. 0 for unlimited.")
fs.BoolVar(&opts.JetStream, "js", false, "Enable JetStream.")
fs.BoolVar(&opts.JetStream, "jetstream", false, "Enable JetStream.")
fs.StringVar(&opts.StoreDir, "sd", "", "Storage directory.")
fs.StringVar(&opts.StoreDir, "store_dir", "", "Storage directory.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Keep track of the boolean flags that were explicitly set with their value.
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "DVV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", dbgAndTrcAndVerboseTrc)
case "DV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrace)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrace)
case "D":
fallthrough
case "debug":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", FlagSnapshot.Debug)
case "VV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", trcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", trcAndVerboseTrc)
case "V":
fallthrough
case "trace":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", FlagSnapshot.Trace)
case "T":
fallthrough
case "logtime":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Logtime", FlagSnapshot.Logtime)
case "s":
fallthrough
case "syslog":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Syslog", FlagSnapshot.Syslog)
case "no_advertise":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Cluster.NoAdvertise", FlagSnapshot.Cluster.NoAdvertise)
}
})
// Process signal control.
if signal != "" {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != "" {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
if cerr, ok := err.(*processConfigErr); !ok || len(cerr.Errors()) != 0 {
return nil, err
}
// If we get here we only have warnings and can still continue
fmt.Fprint(os.Stderr, err)
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "VV":
opts.Trace, opts.TraceVerbose = trcAndVerboseTrc, trcAndVerboseTrc
case "DVV":
opts.Trace, opts.Debug, opts.TraceVerbose = dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc
case "DV":
// Check value to support -DV=false
opts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
func normalizeBasePath(p string) string {
if len(p) == 0 {
return "/"
}
// add leading slash
if p[0] != '/' {
p = "/" + p
}
return path.Clean(p)
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
// -1 will fail url.Parse, so if we have -1, change it to
// 0, and then after parse, replace the port with -1 so we get
// automatic port allocation
wantsRandom := false
if strings.HasSuffix(opts.Cluster.ListenStr, ":-1") {
wantsRandom = true
cls := fmt.Sprintf("%s:0", opts.Cluster.ListenStr[0:len(opts.Cluster.ListenStr)-3])
opts.Cluster.ListenStr = cls
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
if wantsRandom {
p = "-1"
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = maybeReadPidFile(commandAndPid[1])
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
// maybeReadPidFile returns a PID or Windows service name obtained via the following method:
// 1. Try to open a file with path "pidStr" (absolute or relative).
// 2. If such a file exists and can be read, return its contents.
// 3. Otherwise, return the original "pidStr" string.
func maybeReadPidFile(pidStr string) string {
if b, err := ioutil.ReadFile(pidStr); err == nil {
return string(b)
}
return pidStr
}
func homeDir() (string, error) {
if runtime.GOOS == "windows" {
homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
userProfile := os.Getenv("USERPROFILE")
home := filepath.Join(homeDrive, homePath)
if homeDrive == "" || homePath == "" {
if userProfile == "" {
return "", errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
}
home = userProfile
}
return home, nil
}
home := os.Getenv("HOME")
if home == "" {
return "", errors.New("failed to get home dir, require $HOME")
}
return home, nil
}
func expandPath(p string) (string, error) {
p = os.ExpandEnv(p)
if !strings.HasPrefix(p, "~") {
return p, nil
}
home, err := homeDir()
if err != nil {
return "", err
}
return filepath.Join(home, p[1:]), nil
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
pkg/etcd/etcd.go
|
package etcd
import (
"context"
"github.com/busgo/pink/pkg/log"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"sync"
"time"
)
type KeyChangeEvent int32
type KeyChangeChan <-chan *KeyChange
const (
KeyCreateChangeEvent = iota + 1 // create event
KeyUpdateChangeEvent // update event
KeyDeleteChangeEvent // delete event
KeyCancelChangeEvent // cancel event
defaultKeyChangeSize = 32
)
// etcd cli
type Cli struct {
c *clientv3.Client
kv clientv3.KV
lease clientv3.Lease
elections map[string]*concurrency.Election
sync.RWMutex
}
// etcd cli config
type CliConfig struct {
Endpoints []string
UserName string
Password string
DialTimeout time.Duration
}
type WatchKeyResponse struct {
Watcher clientv3.Watcher
KeyChangeCh chan *KeyChange
}
type KeyChange struct {
Event KeyChangeEvent
Key string
Value string
}
// new etcd cli
func NewEtcdCli(config *CliConfig) (*Cli, error) {
c, err := clientv3.New(clientv3.Config{
Endpoints: config.Endpoints,
Username: config.UserName,
Password: config.Password,
DialTimeout: config.DialTimeout,
})
if err != nil {
return nil, err
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
_, err = c.Get(ctx, "one")
if err != nil {
log.Errorf("can not connect the etcd endpoints %+v.......%+v", config.Endpoints, err)
return nil, err
}
return &Cli{
c: c,
kv: clientv3.NewKV(c),
lease: clientv3.NewLease(c),
elections: make(map[string]*concurrency.Election),
}, err
}
// get with key
func (cli *Cli) Get(ctx context.Context, key string) (string, error) {
resp, err := cli.kv.Get(ctx, key)
if err != nil {
return "", err
}
if len(resp.Kvs) == 0 {
return "", nil
}
return string(resp.Kvs[0].Value), err
}
// delete a key
func (cli *Cli) Delete(ctx context.Context, key string) error {
_, err := cli.kv.Delete(ctx, key, clientv3.WithPrevKV())
return err
}
// transfer a key
func (cli *Cli) Transfer(ctx context.Context, from, to string, value string) error {
_, err := cli.c.Txn(ctx).Then(clientv3.OpDelete(from), clientv3.OpPut(to, value)).Else(clientv3.OpPut(to, value)).Commit()
return err
}
// get with prefix
func (cli *Cli) GetWithPrefix(ctx context.Context, prefix string) ([]string, []string, error) {
resp, err := cli.kv.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend))
if err != nil {
return make([]string, 0), make([]string, 0), err
}
if len(resp.Kvs) == 0 {
return make([]string, 0), make([]string, 0), nil
}
keys := make([]string, 0)
values := make([]string, 0)
for _, kv := range resp.Kvs {
keys = append(keys, string(kv.Key))
values = append(values, string(kv.Value))
}
return keys, values, err
}
// put a key
func (cli *Cli) Put(ctx context.Context, key, value string) error {
_, err := cli.kv.Put(ctx, key, value)
return err
}
// put a key with ttl
func (cli *Cli) PutWithTTL(ctx context.Context, key, value string, ttl int64) (int64, error) {
leaseResponse, err := cli.lease.Grant(ctx, ttl)
if err != nil {
return 0, err
}
_, err = cli.kv.Put(ctx, key, value, clientv3.WithLease(leaseResponse.ID))
return int64(leaseResponse.ID), err
}
func (cli *Cli) PutWithNotExist(ctx context.Context, key, value string) error {
tx := cli.c.Txn(ctx).If(clientv3.Compare(clientv3.Version(key), "=", 0)).
Then(clientv3.OpPut(key, value))
_, err := tx.Commit()
return err
}
func (cli *Cli) PutWithNotExistTTL(ctx context.Context, key, value string, ttl int64) (int64, error) {
leaseResponse, err := cli.lease.Grant(ctx, ttl)
if err != nil {
return 0, err
}
_, err = cli.c.Txn(ctx).If(clientv3.Compare(clientv3.Version(key), "=", 0)).
Then(clientv3.OpPut(key, value, clientv3.WithLease(leaseResponse.ID))).
Commit()
return int64(leaseResponse.ID), nil
}
func (cli *Cli) Revoke(ctx context.Context, leaseId int64) error {
if leaseId <= 0 {
return nil
}
_, err := cli.lease.Revoke(ctx, clientv3.LeaseID(leaseId))
return err
}
func (cli *Cli) Keepalive(ctx context.Context, key, value string, ttl int64) (int64, error) {
resp, err := cli.lease.Grant(ctx, ttl)
if err != nil {
return 0, err
}
_, err = cli.kv.Put(ctx, key, value, clientv3.WithLease(resp.ID))
if err != nil {
return 0, err
}
// the key 'foo' will be kept forever
ch, err := cli.lease.KeepAlive(context.Background(), resp.ID)
if err != nil {
return 0, err
}
go keepaliveHandle(key, ch)
return int64(resp.ID), nil
}
// handle keep alive
func keepaliveHandle(key string, ch <-chan *clientv3.LeaseKeepAliveResponse) {
for {
select {
case c := <-ch:
if c == nil {
log.Warnf("the keep alive key:%s has closed", key)
return
}
}
}
}
func (cli *Cli) Watch(key string) *WatchKeyResponse {
watcher := clientv3.NewWatcher(cli.c)
watchChan := watcher.Watch(context.Background(), key)
keyChangeCh := make(chan *KeyChange, defaultKeyChangeSize)
// start watch
go keyChangeHandle(key, watchChan, keyChangeCh)
return &WatchKeyResponse{
Watcher: watcher,
KeyChangeCh: keyChangeCh,
}
}
func (cli *Cli) WatchWithPrefix(prefix string) *WatchKeyResponse {
watcher := clientv3.NewWatcher(cli.c)
watchChan := watcher.Watch(context.Background(), prefix, clientv3.WithPrefix())
keyChangeCh := make(chan *KeyChange, defaultKeyChangeSize)
// start watch
go keyChangeHandle(prefix, watchChan, keyChangeCh)
return &WatchKeyResponse{
Watcher: watcher,
KeyChangeCh: keyChangeCh,
}
}
func keyChangeHandle(prefix string, watchChan clientv3.WatchChan, keyChangeCh chan *KeyChange) {
for {
select {
case ch, ok := <-watchChan:
if !ok {
log.Warnf("the watch prefix key:%s has cancel", prefix)
keyChangeCh <- &KeyChange{
Event: KeyCancelChangeEvent,
Key: prefix,
}
return
}
for _, event := range ch.Events {
keyChangeEventHandle(event, keyChangeCh)
}
}
}
}
func keyChangeEventHandle(event *clientv3.Event, ch chan *KeyChange) {
c := &KeyChange{
Key: string(event.Kv.Key),
Value: "",
}
switch event.Type {
case mvccpb.PUT:
c.Value = string(event.Kv.Value)
c.Event = KeyCreateChangeEvent
if event.IsModify() {
c.Event = KeyUpdateChangeEvent
}
case mvccpb.DELETE:
c.Event = KeyDeleteChangeEvent
}
ch <- c
}
// campaign become leader
func (cli *Cli) Campaign(ctx context.Context, id, prefix string, ttl int64) error {
// create a session
session, err := concurrency.NewSession(cli.c, concurrency.WithTTL(int(ttl)))
if err != nil {
log.Errorf("new session fail,id:%s,prefix:%s,%+v", id, prefix, err)
return err
}
election := concurrency.NewElection(session, prefix)
cli.elections[prefix] = election
return election.Campaign(ctx, id)
}
func (cli *Cli) getElection(prefix string) (*concurrency.Election, error) {
election := cli.elections[prefix]
if election != nil {
return election, nil
}
// create a session
session, err := concurrency.NewSession(cli.c)
if err != nil {
log.Errorf("new session fail,prefix:%s,%+v", prefix, err)
return nil, err
}
election = concurrency.NewElection(session, prefix)
cli.elections[prefix] = election
return election, nil
}
// find leader
func (cli *Cli) Leader(ctx context.Context, prefix string) (id string, err error) {
election, err := cli.getElection(prefix)
if err != nil {
return
}
resp, err := election.Leader(ctx)
if err != nil {
return
}
return string(resp.Kvs[0].Value), nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
mixer/test/client/env/envoy.go
|
// Copyright 2017 Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package env
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"istio.io/istio/tests/util"
)
// Envoy stores data for Envoy process
type Envoy struct {
cmd *exec.Cmd
ports *Ports
}
// NewEnvoy creates a new Envoy struct and starts envoy.
func (s *TestSetup) NewEnvoy(stress bool, filtersBeforeMixer string, mfConf *MixerFilterConf, ports *Ports, epoch int,
confVersion string, disableHotRestart bool) (*Envoy, error) {
confPath := filepath.Join(util.IstioOut, fmt.Sprintf("config.conf.%v.json", ports.AdminPort))
log.Printf("Envoy config: in %v\n", confPath)
if err := s.CreateEnvoyConf(confPath, stress, filtersBeforeMixer, mfConf, ports, confVersion); err != nil {
return nil, err
}
debugLevel := os.Getenv("ENVOY_DEBUG")
if len(debugLevel) == 0 {
debugLevel = "info"
}
// Don't use hot-start, each Envoy re-start use different base-id
args := []string{"-c", confPath,
"--base-id", strconv.Itoa(int(ports.AdminPort) + epoch)}
if stress {
args = append(args, "--concurrency", "10")
} else {
// debug is far too verbose.
args = append(args, "-l", debugLevel, "--concurrency", "1")
}
if disableHotRestart {
args = append(args, "--disable-hot-restart")
}
if s.EnvoyParams != nil {
args = append(args, s.EnvoyParams...)
}
/* #nosec */
envoyPath := filepath.Join(util.IstioBin, "envoy")
cmd := exec.Command(envoyPath, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
return &Envoy{
cmd: cmd,
ports: ports,
}, nil
}
// Start starts the envoy process
func (s *Envoy) Start() error {
err := s.cmd.Start()
if err != nil {
return err
}
url := fmt.Sprintf("http://localhost:%v/server_info", s.ports.AdminPort)
WaitForHTTPServer(url)
return nil
}
// Stop stops the envoy process
func (s *Envoy) Stop() error {
log.Printf("Kill Envoy ...\n")
err := s.cmd.Process.Kill()
log.Printf("Kill Envoy ... Done\n")
return err
}
|
[
"\"ENVOY_DEBUG\""
] |
[] |
[
"ENVOY_DEBUG"
] |
[]
|
["ENVOY_DEBUG"]
|
go
| 1 | 0 | |
google-datacatalog-oracle-connector/system_tests/execution_results_test.py
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from google.cloud import datacatalog_v1beta1
datacatalog = datacatalog_v1beta1.DataCatalogClient()
class ExecutionResultsTest(unittest.TestCase):
def test_oracle_entries_should_exist_after_connector_execution(self):
query = 'system=oracle'
scope = datacatalog_v1beta1.types.SearchCatalogRequest.Scope()
scope.include_project_ids.append(
os.environ['ORACLE2DC_DATACATALOG_PROJECT_ID'])
search_results = [
result for result in datacatalog.search_catalog(
scope=scope, query=query, order_by='relevance', page_size=1000)
]
self.assertGreater(len(search_results), 0)
|
[] |
[] |
[
"ORACLE2DC_DATACATALOG_PROJECT_ID"
] |
[]
|
["ORACLE2DC_DATACATALOG_PROJECT_ID"]
|
python
| 1 | 0 | |
strava_analysis/__main__.py
|
import os
import sys
import get_data.get_data as get_data
import analyze_data.analyze_data as analyze_data
#import strava_analysis.get_data.get_data as get_data
#import strava_analysis.analyze_data.analyze_data as analyze_data
def main():
if sys.platform == "linux" or sys.platform == "linux2":
# linux
# define pre-existing variables
os.environ['historic_activities_raw'] =r'/Users/anna/Google Drive/Projects/strava_analysis/strava_analysis/data/historic_activities_raw.csv'
elif sys.platform == "darwin":
# OS X
# define pre-existing variables
os.environ['historic_activities_raw'] =r'/Users/anna/Google Drive/Projects/strava_analysis/strava_analysis/data/historic_activities_raw.csv'
elif sys.platform == "win32":
# Windows...
# define pre-existing variables
os.environ['historic_activities_raw'] =r'C:\Users\annab\Google Drive\Projects\strava_analysis\strava_analysis\data\historic_activities_raw.csv'
# # # gets the strava data via api requests and save to csv
get_data.run()
# analyzes the data from strava
analyze_data.run()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"historic_activities_raw"
] |
[]
|
["historic_activities_raw"]
|
python
| 1 | 0 | |
instagram/wsgi.py
|
"""
WSGI config for instagram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'instagram.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.