filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# estimagic documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 10:59:27 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Add custom CSS
def setup(app):
app.add_stylesheet("css/custom.css")
sys.path.insert(0, os.path.abspath("../.."))
# Set variable so that todos are shown in local build
on_rtd = os.environ.get("READTHEDOCS") == "True"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"nbsphinx",
"sphinxcontrib.bibtex",
]
autodoc_member_order = "bysource"
autodoc_mock_imports = [
"bokeh",
"cloudpickle",
"fuzzywuzzy",
"joblib",
"numba",
"numdifftools",
"numpy",
"pandas",
"pytest",
"pygmo",
"scipy",
"sqlalchemy",
"tornado",
"petsc4py",
]
extlinks = {
"ghuser": ("https://github.com/%s", "@"),
"gh": ("https://github.com/OpenSourceEconomics/estimagic/pulls/%s", "#"),
}
intersphinx_mapping = {
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"python": ("https://docs.python.org/3.6", None),
}
linkcheck_ignore = [
r"https://tinyurl\.com/*.",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
html_static_path = ["_static"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "estimagic"
copyright = "2020, Janos Gabler" # noqa
author = "Janos Gabler"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = "0.0.30"
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
if on_rtd:
pass
else:
todo_include_todos = True
todo_emit_warnings = True
# -- Options for nbsphinx ----------------------------------------
# Execute notebooks before conversion: 'always', 'never', 'auto' (default)
nbsphinx_execute = "never"
nbsphinx_prolog = r"""
{% set docname = 'docs/source/' + env.doc2path(env.docname, base=None) %}
.. only:: html
.. nbinfo::
Download the notebook :download:`here <https://nbviewer.jupyter.org/github/OpenSourceEconomics/estimagic/blob/master/{{ docname }}>`!
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"] # noqa: E800
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "estimagicdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
"papersize": "letterpaper",
# The font size ('10pt', '11pt' or '12pt').
"pointsize": "11pt",
# Latex figure (float) alignment
"figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "estimagic.tex", "estimagic Documentation", "Janos Gabler", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "estimagic", "estimagic Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"estimagic",
"estimagic Documentation",
author,
"estimagic",
"One line description of project.",
"Miscellaneous",
)
]
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
cmd/abapEnvironmentAssembleConfirm_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapEnvironmentAssembleConfirmOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
CfServiceKeyName string `json:"cfServiceKeyName,omitempty"`
Host string `json:"host,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
MaxRuntimeInMinutes int `json:"maxRuntimeInMinutes,omitempty"`
}
type abapEnvironmentAssembleConfirmCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapEnvironmentAssembleConfirmCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapEnvironmentAssembleConfirmCommand Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system
func AbapEnvironmentAssembleConfirmCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentAssembleConfirm"
metadata := abapEnvironmentAssembleConfirmMetadata()
var stepConfig abapEnvironmentAssembleConfirmOptions
var startTime time.Time
var commonPipelineEnvironment abapEnvironmentAssembleConfirmCommonPipelineEnvironment
var logCollector *log.CollectorHook
var createAbapEnvironmentAssembleConfirmCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system",
Long: `This step confirms the assemblies of provided [installations, support packages or patches] in SAP Cloud Platform ABAP Environment system`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapEnvironmentAssembleConfirm(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentAssembleConfirmFlags(createAbapEnvironmentAssembleConfirmCmd, &stepConfig)
return createAbapEnvironmentAssembleConfirmCmd
}
func addAbapEnvironmentAssembleConfirmFlags(cmd *cobra.Command, stepConfig *abapEnvironmentAssembleConfirmOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry target organization")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry target space")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Cloud Foundry Service Instance")
cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Cloud Foundry Service Key")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0582")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0582")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions")
cmd.Flags().IntVar(&stepConfig.MaxRuntimeInMinutes, "maxRuntimeInMinutes", 360, "maximal runtime of the step")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptor")
cmd.MarkFlagRequired("maxRuntimeInMinutes")
}
// retrieve step metadata
func abapEnvironmentAssembleConfirmMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentAssembleConfirm",
Aliases: []config.Alias{},
Description: "Confirm the Delivery of Assembly for installation, support package or patch in SAP Cloud Platform ABAP Environment system",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
},
{
Name: "cfServiceKeyName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKey"}},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "abap/addonDescriptor",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "maxRuntimeInMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:7"},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "abap/addonDescriptor"},
},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_host\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
]
| []
| [
"PIPER_addonDescriptor",
"PIPER_cfSpace",
"PIPER_host",
"PIPER_cfApiEndpoint",
"PIPER_password",
"PIPER_username",
"PIPER_cfServiceInstance",
"PIPER_cfServiceKeyName",
"PIPER_cfOrg"
]
| [] | ["PIPER_addonDescriptor", "PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg"] | go | 9 | 0 | |
tools/test_init.py | # encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import argparse
import os
import sys
from os import mkdir
import torch
from torch.backends import cudnn
sys.path.append('.')
from config import cfg
from data import make_data_loader
from engine.inference import inference
from modeling import build_model
from utils.logger import setup_logger
import functions
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Inference")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
mkdir(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
#train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
#model = build_model(cfg, num_classes)
#model.load_param(cfg.TEST.WEIGHT)
train_loader, val_loader, num_query, num_classes, num_classes2, image_map_label2 = make_data_loader(cfg)
model = build_model(cfg, num_classes, num_classes2)
print('--- resume from ', cfg.MODEL.PRETRAIN_PATH2)
if cfg.MODEL.ONCE_LOAD == 'yes':
print('\n---ONCE_LOAD...\n')
model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage))
else:
functions.load_state_dict(model, cfg.MODEL.PRETRAIN_PATH2, cfg.MODEL.ONLY_BASE, cfg.MODEL.WITHOUT_FC)
inference(cfg, model, val_loader, num_query)
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"WORLD_SIZE"
]
| [] | ["CUDA_VISIBLE_DEVICES", "WORLD_SIZE"] | python | 2 | 0 | |
solutions/Neo4jSolution/src/main/java/ttc2018/Solution.java | package ttc2018;
import com.google.common.collect.Iterators;
import org.neo4j.configuration.GraphDatabaseSettings;
import org.neo4j.dbms.api.DatabaseManagementService;
import org.neo4j.dbms.api.DatabaseManagementServiceBuilder;
import org.neo4j.exceptions.KernelException;
import org.neo4j.graphdb.*;
import org.neo4j.io.fs.FileUtils;
import org.neo4j.kernel.api.procedure.GlobalProcedures;
import org.neo4j.kernel.internal.GraphDatabaseAPI;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import static ttc2018.Labels.*;
import static ttc2018.Query.ID_COLUMN_NAME;
import static ttc2018.Query.SCORE_COLUMN_NAME;
import static ttc2018.RelationshipTypes.*;
public abstract class Solution implements AutoCloseable {
DatabaseManagementService managementService;
GraphDatabaseService graphDb;
public abstract String Initial();
/**
* Update reading changes from CSV file
*/
public abstract String Update(File changes);
private final static String NEO4J_HOME = System.getenv("NEO4J_HOME");
private final static Path DB_DIR = new File(NEO4J_HOME + "/data").toPath();
private final static String LOAD_SCRIPT = "load-scripts/load.sh";
private String DataPath;
Solution(String DataPath) throws IOException, InterruptedException {
this.DataPath = new File(DataPath).getCanonicalPath();
}
public GraphDatabaseService getDbConnection() {
if (graphDb == null) {
try {
initializeDb();
} catch (KernelException e) {
throw new RuntimeException(e);
}
}
return graphDb;
}
protected void initializeDb() throws KernelException {
managementService = new DatabaseManagementServiceBuilder(new File(NEO4J_HOME).toPath())
.setConfig(GraphDatabaseSettings.procedure_unrestricted, List.of("apoc.*", "gds.*"))
.build();
graphDb = managementService.database(GraphDatabaseSettings.DEFAULT_DATABASE_NAME);
Runtime.getRuntime().addShutdownHook(new Thread(this::close));
}
@Override
public void close() {
if (managementService != null) {
managementService.shutdown();
managementService = null;
}
}
// https://github.com/neo4j-contrib/neo4j-apoc-procedures/blob/3.5/src/test/java/apoc/util/TestUtil.java#L95
public static void registerProcedure(GraphDatabaseService db, Class<?>... procedures) throws KernelException, KernelException {
GlobalProcedures proceduresService = ((GraphDatabaseAPI) db).getDependencyResolver().resolveDependency(GlobalProcedures.class);
for (Class<?> procedure : procedures) {
proceduresService.registerProcedure(procedure, true);
proceduresService.registerFunction(procedure, true);
proceduresService.registerAggregationFunction(procedure, true);
}
}
String runReadQuery(Query q) {
return runReadQuery(q, Collections.emptyMap());
}
String runReadQuery(Query q, Map<String, Object> parameters) {
try (Transaction tx = graphDb.beginTx()) {
return runReadQuery(tx, q, parameters);
}
}
protected static final int resultLimit = 3;
String runReadQuery(Transaction tx, Query q, Map<String, Object> parameters) {
try (Result rs = q.execute(tx, parameters)) {
List<String> result = new ArrayList<>();
int rowCount = 0;
while (rs.hasNext()) {
Map<String, Object> row = rs.next();
String id = row.get(ID_COLUMN_NAME).toString();
if (LiveContestDriver.ShowScoresForValidation) {
result.add(String.format("%1$s,%2$s", id, row.get(SCORE_COLUMN_NAME)));
} else {
result.add(id);
}
++rowCount;
if (rowCount >= resultLimit)
break;
}
return String.join("|", result);
}
}
void runAndCommitVoidQuery(Query q) {
runAndCommitVoidQuery(q, Collections.emptyMap());
}
void runAndCommitVoidQuery(Query q, Map<String, Object> parameters) {
try (Transaction tx = graphDb.beginTx()) {
runVoidQuery(tx, q, parameters);
tx.commit();
}
}
void runVoidQuery(Transaction tx, Query q, Map<String, Object> parameters) {
try (Result rs = q.execute(tx, parameters)) {
rs.accept(row -> true);
}
}
void loadData() throws IOException, InterruptedException {
if (System.getenv("NEO4J_HOME") == null)
throw new RuntimeException("$NEO4J_HOME is not defined.");
// delete previous DB
FileUtils.deleteDirectory(DB_DIR);
ProcessBuilder pb = new ProcessBuilder(LOAD_SCRIPT);
Map<String, String> env = pb.environment();
env.put("NEO4J_DATA_DIR", DataPath);
File log = new File("log.txt");
pb.redirectErrorStream(true);
pb.redirectOutput(ProcessBuilder.Redirect.appendTo(log));
Process p = pb.start();
p.waitFor();
// DB initialization
GraphDatabaseService dbConnection = getDbConnection();
// add uniqueness constraints and indices
try (Transaction tx = dbConnection.beginTx()) {
addConstraintsAndIndicesInTx(tx);
tx.commit();
}
try (Transaction tx = dbConnection.beginTx()) {
tx.schema().awaitIndexesOnline(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // TODO: meaningful timeout
tx.commit();
}
}
protected void addConstraintsAndIndicesInTx(Transaction tx) {
for (Labels label : Labels.values()) {
tx.schema()
.constraintFor(label)
.assertPropertyIsUnique(NODE_ID_PROPERTY)
.create();
}
}
void beforeUpdate(File changes) {
try (Transaction tx = getDbConnection().beginTx()) {
processChangeSet(tx, changes);
tx.commit();
}
}
public static final String SEPARATOR = "|";
public static final String COMMENTS_CHANGE_TYPE = "Comments";
public static final String NODE_ID_PROPERTY = "id";
public static final String USER_NAME_PROPERTY = "name";
public static final String SUBMISSION_TIMESTAMP_PROPERTY = "timestamp";
public static final String SUBMISSION_CONTENT_PROPERTY = "content";
public static final String SUBMISSION_SCORE_PROPERTY = "score";
public static final long SUBMISSION_SCORE_DEFAULT = 0L;
public static final String FRIEND_OVERLAY_EDGE_COMMENT_ID_PROPERTY = "commentId";
private void processChangeSet(Transaction tx, File changeSet) {
try (Stream<String> stream = Files.lines(changeSet.toPath())) {
stream.forEachOrdered(s -> {
String[] line = s.split(Pattern.quote(SEPARATOR));
switch (line[0]) {
case "Posts":
case COMMENTS_CHANGE_TYPE: {
long id = Long.parseLong(line[1]);
addSubmissionVertex(tx, line);
break;
}
case "Friends": {
// add edges only once
if (Long.parseLong(line[1]) <= Long.parseLong(line[2])) {
addFriendEdge(tx, line);
}
break;
}
case "Likes": {
addLikesEdge(tx, line);
break;
}
case "Users": {
addUserVertex(tx, line);
break;
}
default:
throw new RuntimeException("Invalid record type received from CSV input: " + line[0]);
}
});
} catch (IOException e) {
throw new RuntimeException(e);
}
}
protected Node addSubmissionVertex(Transaction tx, String[] line) {
long id = Long.parseLong(line[1]);
String timestamp = line[2];
String content = line[3];
long submitterId = Long.parseLong(line[4]);
Node submitter = findSingleNodeByIdProperty(tx, User, submitterId);
Label[] labels = line[0].equals(COMMENTS_CHANGE_TYPE) ? CommentLabelSet : PostLabelSet;
Node submission = tx.createNode(labels);
submission.setProperty(NODE_ID_PROPERTY, id);
submission.setProperty(SUBMISSION_TIMESTAMP_PROPERTY, timestamp);
submission.setProperty(SUBMISSION_CONTENT_PROPERTY, content);
submission.createRelationshipTo(submitter, SUBMITTER);
if (line[0].equals(COMMENTS_CHANGE_TYPE)) {
long previousSubmissionId = Long.parseLong(line[5]);
Node previousSubmission = findSingleNodeByIdProperty(tx, Submission, previousSubmissionId);
submission.createRelationshipTo(previousSubmission, COMMENT_TO);
afterNewComment(tx, submission, submitter, previousSubmission);
} else {
afterNewPost(tx, submission, submitter);
}
return submission;
}
protected void afterNewComment(Transaction tx, Node comment, Node submitter, Node previousSubmission) {
}
protected void afterNewPost(Transaction tx, Node post, Node submitter) {
}
protected Relationship addFriendEdge(Transaction tx, String[] line) {
return insertEdge(tx, line, FRIEND, User, User);
}
protected Relationship addLikesEdge(Transaction tx, String[] line) {
return insertEdge(tx, line, LIKES, User, Comment);
}
protected Node addUserVertex(Transaction tx, String[] line) {
long id = Long.parseLong(line[1]);
String name = line[2];
Node user = tx.createNode(User);
user.setProperty(NODE_ID_PROPERTY, id);
user.setProperty(USER_NAME_PROPERTY, name);
return user;
}
private Node findSingleNodeByIdProperty(Transaction tx, Labels label, long id) {
try (ResourceIterator<Node> nodes = tx.findNodes(label, NODE_ID_PROPERTY, id)) {
return Iterators.getOnlyElement(nodes);
}
}
private Relationship insertEdge(Transaction tx, String[] line, RelationshipTypes relationshipType, Labels sourceLabel, Labels targetLabel) {
long sourceId = Long.parseLong(line[1]);
long targetId = Long.parseLong(line[2]);
Node source = findSingleNodeByIdProperty(tx, sourceLabel, sourceId);
Node target = findSingleNodeByIdProperty(tx, targetLabel, targetId);
return source.createRelationshipTo(target, relationshipType);
}
}
| [
"\"NEO4J_HOME\"",
"\"NEO4J_HOME\""
]
| []
| [
"NEO4J_HOME"
]
| [] | ["NEO4J_HOME"] | java | 1 | 0 | |
net/net_linux_test.go | package net
import (
"fmt"
"io/ioutil"
"net"
"os"
"strings"
"syscall"
"testing"
"github.com/f00stx/gopsutil/internal/common"
"github.com/stretchr/testify/assert"
)
func TestIOCountersByFileParsing(t *testing.T) {
// Prpare a temporary file, which will be read during the test
tmpfile, err := ioutil.TempFile("", "proc_dev_net")
defer os.Remove(tmpfile.Name()) // clean up
assert.Nil(t, err, "Temporary file creation failed: ", err)
cases := [4][2]string{
[2]string{"eth0: ", "eth1: "},
[2]string{"eth0:0: ", "eth1:0: "},
[2]string{"eth0:", "eth1:"},
[2]string{"eth0:0:", "eth1:0:"},
}
for _, testCase := range cases {
err = tmpfile.Truncate(0)
assert.Nil(t, err, "Temporary file truncating problem: ", err)
// Parse interface name for assertion
interface0 := strings.TrimSpace(testCase[0])
interface0 = interface0[:len(interface0)-1]
interface1 := strings.TrimSpace(testCase[1])
interface1 = interface1[:len(interface1)-1]
// Replace the interfaces from the test case
proc := []byte(fmt.Sprintf("Inter-| Receive | Transmit\n face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n %s1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n %s100 200 300 400 500 600 700 800 900 1000 1100 1200 1300 1400 1500 1600\n", testCase[0], testCase[1]))
// Write /proc/net/dev sample output
_, err = tmpfile.Write(proc)
assert.Nil(t, err, "Temporary file writing failed: ", err)
counters, err := IOCountersByFile(true, tmpfile.Name())
assert.Nil(t, err)
assert.NotEmpty(t, counters)
assert.Equal(t, 2, len(counters))
assert.Equal(t, interface0, counters[0].Name)
assert.Equal(t, 1, int(counters[0].BytesRecv))
assert.Equal(t, 2, int(counters[0].PacketsRecv))
assert.Equal(t, 3, int(counters[0].Errin))
assert.Equal(t, 4, int(counters[0].Dropin))
assert.Equal(t, 5, int(counters[0].Fifoin))
assert.Equal(t, 9, int(counters[0].BytesSent))
assert.Equal(t, 10, int(counters[0].PacketsSent))
assert.Equal(t, 11, int(counters[0].Errout))
assert.Equal(t, 12, int(counters[0].Dropout))
assert.Equal(t, 13, int(counters[0].Fifoout))
assert.Equal(t, interface1, counters[1].Name)
assert.Equal(t, 100, int(counters[1].BytesRecv))
assert.Equal(t, 200, int(counters[1].PacketsRecv))
assert.Equal(t, 300, int(counters[1].Errin))
assert.Equal(t, 400, int(counters[1].Dropin))
assert.Equal(t, 500, int(counters[1].Fifoin))
assert.Equal(t, 900, int(counters[1].BytesSent))
assert.Equal(t, 1000, int(counters[1].PacketsSent))
assert.Equal(t, 1100, int(counters[1].Errout))
assert.Equal(t, 1200, int(counters[1].Dropout))
assert.Equal(t, 1300, int(counters[1].Fifoout))
}
err = tmpfile.Close()
assert.Nil(t, err, "Temporary file closing failed: ", err)
}
func TestGetProcInodesAll(t *testing.T) {
waitForServer := make(chan bool)
go func() { // TCP listening goroutine to have some opened inodes even in CI
addr, err := net.ResolveTCPAddr("tcp", "localhost:0") // dynamically get a random open port from OS
if err != nil {
t.Skip("unable to resolve localhost:", err)
}
l, err := net.ListenTCP(addr.Network(), addr)
if err != nil {
t.Skip(fmt.Sprintf("unable to listen on %v: %v", addr, err))
}
defer l.Close()
waitForServer <- true
for {
conn, err := l.Accept()
if err != nil {
t.Skip("unable to accept connection:", err)
}
defer conn.Close()
}
}()
<-waitForServer
root := common.HostProc("")
v, err := getProcInodesAll(root, 0)
assert.Nil(t, err)
assert.NotEmpty(t, v)
}
func TestConnectionsMax(t *testing.T) {
if os.Getenv("CI") != "" {
t.Skip("Skip CI")
}
max := 10
v, err := ConnectionsMax("tcp", max)
assert.Nil(t, err)
assert.NotEmpty(t, v)
cxByPid := map[int32]int{}
for _, cx := range v {
if cx.Pid > 0 {
cxByPid[cx.Pid]++
}
}
for _, c := range cxByPid {
assert.True(t, c <= max)
}
}
type AddrTest struct {
IP string
Port int
Error bool
}
func TestDecodeAddress(t *testing.T) {
assert := assert.New(t)
addr := map[string]AddrTest{
"0500000A:0016": {
IP: "10.0.0.5",
Port: 22,
},
"0100007F:D1C2": {
IP: "127.0.0.1",
Port: 53698,
},
"11111:0035": {
Error: true,
},
"0100007F:BLAH": {
Error: true,
},
"0085002452100113070057A13F025401:0035": {
IP: "2400:8500:1301:1052:a157:7:154:23f",
Port: 53,
},
"00855210011307F025401:0035": {
Error: true,
},
}
for src, dst := range addr {
family := syscall.AF_INET
if len(src) > 13 {
family = syscall.AF_INET6
}
addr, err := decodeAddress(uint32(family), src)
if dst.Error {
assert.NotNil(err, src)
} else {
assert.Nil(err, src)
assert.Equal(dst.IP, addr.IP, src)
assert.Equal(dst.Port, int(addr.Port), src)
}
}
}
func TestReverse(t *testing.T) {
src := []byte{0x01, 0x02, 0x03}
assert.Equal(t, []byte{0x03, 0x02, 0x01}, Reverse(src))
}
func TestConntrackStatFileParsing(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "proc_net_stat_conntrack")
defer os.Remove(tmpfile.Name())
assert.Nil(t, err, "Temporary file creation failed: ", err)
data := []byte(`
entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart
0000007b 00000000 00000000 00000000 000b115a 00000084 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000004a
0000007b 00000000 00000000 00000000 0007eee5 00000068 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000035
0000007b 00000000 00000000 00000000 0090346b 00000057 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000025
0000007b 00000000 00000000 00000000 0005920f 00000069 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000064
0000007b 00000000 00000000 00000000 000331ff 00000059 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000003b
0000007b 00000000 00000000 00000000 000314ea 00000066 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000054
0000007b 00000000 00000000 00000000 0002b270 00000055 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000003d
0000007b 00000000 00000000 00000000 0002f67d 00000057 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000042
`)
// Expected results
slist := NewConntrackStatList()
slist.Append(&ConntrackStat{
Entries: 123,
Searched: 0,
Found: 0,
New: 0,
Invalid: 725338,
Ignore: 132,
Delete: 0,
DeleteList: 0,
Insert: 0,
InsertFailed: 0,
Drop: 0,
EarlyDrop: 0,
IcmpError: 0,
ExpectNew: 0,
ExpectCreate: 0,
ExpectDelete: 0,
SearchRestart: 74,
})
slist.Append(&ConntrackStat{123, 0, 0, 0, 519909, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 53})
slist.Append(&ConntrackStat{123, 0, 0, 0, 9450603, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 37})
slist.Append(&ConntrackStat{123, 0, 0, 0, 365071, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100})
slist.Append(&ConntrackStat{123, 0, 0, 0, 209407, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59})
slist.Append(&ConntrackStat{123, 0, 0, 0, 201962, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84})
slist.Append(&ConntrackStat{123, 0, 0, 0, 176752, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61})
slist.Append(&ConntrackStat{123, 0, 0, 0, 194173, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66})
// Write data to tempfile
_, err = tmpfile.Write(data)
assert.Nil(t, err, "Temporary file writing failed: ", err)
// Function under test
stats, err := conntrackStatsFromFile(tmpfile.Name(), true)
assert.Equal(t, 8, len(stats), "Expected 8 results")
summary := &ConntrackStat{}
for i, exp := range slist.Items() {
st := stats[i]
assert.Equal(t, exp.Entries, st.Entries)
summary.Entries += st.Entries
assert.Equal(t, exp.Searched, st.Searched)
summary.Searched += st.Searched
assert.Equal(t, exp.Found, st.Found)
summary.Found += st.Found
assert.Equal(t, exp.New, st.New)
summary.New += st.New
assert.Equal(t, exp.Invalid, st.Invalid)
summary.Invalid += st.Invalid
assert.Equal(t, exp.Ignore, st.Ignore)
summary.Ignore += st.Ignore
assert.Equal(t, exp.Delete, st.Delete)
summary.Delete += st.Delete
assert.Equal(t, exp.DeleteList, st.DeleteList)
summary.DeleteList += st.DeleteList
assert.Equal(t, exp.Insert, st.Insert)
summary.Insert += st.Insert
assert.Equal(t, exp.InsertFailed, st.InsertFailed)
summary.InsertFailed += st.InsertFailed
assert.Equal(t, exp.Drop, st.Drop)
summary.Drop += st.Drop
assert.Equal(t, exp.EarlyDrop, st.EarlyDrop)
summary.EarlyDrop += st.EarlyDrop
assert.Equal(t, exp.IcmpError, st.IcmpError)
summary.IcmpError += st.IcmpError
assert.Equal(t, exp.ExpectNew, st.ExpectNew)
summary.ExpectNew += st.ExpectNew
assert.Equal(t, exp.ExpectCreate, st.ExpectCreate)
summary.ExpectCreate += st.ExpectCreate
assert.Equal(t, exp.ExpectDelete, st.ExpectDelete)
summary.ExpectDelete += st.ExpectDelete
assert.Equal(t, exp.SearchRestart, st.SearchRestart)
summary.SearchRestart += st.SearchRestart
}
// Test summary grouping
totals, err := conntrackStatsFromFile(tmpfile.Name(), false)
for i, st := range totals {
assert.Equal(t, summary.Entries, st.Entries)
assert.Equal(t, summary.Searched, st.Searched)
assert.Equal(t, summary.Found, st.Found)
assert.Equal(t, summary.New, st.New)
assert.Equal(t, summary.Invalid, st.Invalid)
assert.Equal(t, summary.Ignore, st.Ignore)
assert.Equal(t, summary.Delete, st.Delete)
assert.Equal(t, summary.DeleteList, st.DeleteList)
assert.Equal(t, summary.Insert, st.Insert)
assert.Equal(t, summary.InsertFailed, st.InsertFailed)
assert.Equal(t, summary.Drop, st.Drop)
assert.Equal(t, summary.EarlyDrop, st.EarlyDrop)
assert.Equal(t, summary.IcmpError, st.IcmpError)
assert.Equal(t, summary.ExpectNew, st.ExpectNew)
assert.Equal(t, summary.ExpectCreate, st.ExpectCreate)
assert.Equal(t, summary.ExpectDelete, st.ExpectDelete)
assert.Equal(t, summary.SearchRestart, st.SearchRestart)
assert.Equal(t, 0, i) // Should only have one element
}
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
r3x.go | package r3x
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
func Execute (r3xFunc func(map[string]interface{}) []byte) {
HTTPStream(r3xFunc)
}
func HTTPStream(r3xFunc func(map[string]interface{}) []byte){
port := os.Getenv("PORT")
if port == "" {
log.Fatal("PORT environment variable was not set")
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request){
if r.Method != "POST" {
errorHandler(w, "Invalid Request", http.StatusInternalServerError)
return
}
m := jsonHandler(w, r)
b := r3xFunc(m)
var f interface{}
err := json.Unmarshal(b, &f)
if err != nil {
errorHandler(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.MarshalIndent(&f, "", "\t")
if err != nil {
errorHandler(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(js)
})
err := http.ListenAndServe(":"+port, nil)
if err != nil {
log.Fatal("Could not listen: ", err)
}
}
func jsonHandler(w http.ResponseWriter, r *http.Request) map[string]interface{} {
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorHandler(w, err.Error(), http.StatusInternalServerError)
}
var m map[string]interface{}
if len(body) > 0 {
var bf interface{}
err = json.Unmarshal(body, &bf)
if err != nil {
errorHandler(w, err.Error(), http.StatusInternalServerError)
}
m = bf.(map[string]interface{})
}
return m
}
func errorHandler(w http.ResponseWriter, error string, num int){
fmt.Println("Error : " , error)
http.Error(w,error, num)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
web/bottle.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2015, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(usage="usage: %sprog [options] package.module:app")
opt = parser.add_argument
opt('app', help='WSGI app entry point.')
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
cli_args = parser.parse_args(args)
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._add_change_listener(functools.partial(self.trigger_hook, 'config'))
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
def _inner_handle():
# Maybe pass variables as locals for better performance?
try:
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse as E:
return E
except RouteReset:
route.reset()
return _inner_handle()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", E, stacktrace)
try:
out = None
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
except HTTPResponse as E:
return E
out = _inner_handle()
return out
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
self.trigger_hook('after_request')
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', 'Content-Length')),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError as error:
rv = error
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_change_listener', '_fallbacks')
def __init__(self):
self._meta = {}
self._change_listener = []
self._fallbacks = []
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commends
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def __missing__(self, key):
for fallback in self._fallbacks:
if key in fallback:
value = self[key] = fallback[key]
self.meta_set(key, 'fallback', fallback)
return value
raise KeyError(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def _set_fallback(self, fallback):
self._fallbacks.append(fallback)
@fallback._add_change_listener
def fallback_update(conf, key, value):
if self.meta_get(key, 'fallback') is conf:
self.meta_set(key, 'fallback', None)
dict.__delitem__(self, key)
@self._add_change_listener
def self_update(conf, key, value):
if conf.meta_get(key, 'fallback'):
conf.meta_set(key, 'fallback', None)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| []
| []
| [
"BOTTLE_LOCKFILE",
"BOTTLE_CHILD"
]
| [] | ["BOTTLE_LOCKFILE", "BOTTLE_CHILD"] | python | 2 | 0 | |
main.go | package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"math/rand"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/gocolly/colly/v2"
)
//Film struct for http response
type film struct {
Slug string `json:"slug"` //url of film
Image string `json:"image_url"` //url of image
Year string `json:"release_year"`
Name string `json:"film_name"`
}
//struct for channel to send film and whether is has finshed a user
type filmSend struct {
film film //film to be sent over channel
done bool //if user is done
}
type nothingReason int
const (
INTERSECT = iota
UNION
)
type nothingError struct {
reason nothingReason
}
func (e *nothingError) ToString() string {
switch e.reason {
case INTERSECT:
return "empty intersect"
case UNION:
return "empty union"
default:
return "big error"
}
}
func (e *nothingError) Error() string {
return e.ToString()
}
const url = "https://letterboxd.com/ajax/poster" //first part of url for getting full info on film
const urlEnd = "menu/linked/125x187/" // second part of url for getting full info on film
const site = "https://letterboxd.com"
func main() {
getFilmHandler := http.HandlerFunc(getFilm)
http.Handle("/film", getFilmHandler)
log.Println("serving at :8080")
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
http.ListenAndServe(":"+port, nil)
}
var year int
func init() {
year = time.Now().Year()
}
func getFilm(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
log.Println(year)
query := r.URL.Query() //Get URL Params(type map)
users, ok := query["users"]
log.Println(len(users))
if !ok || len(users) == 0 {
http.Error(w, "no users", 400)
return
}
_, inter := query["intersect"]
_, ignore := query["ignore_unreleased"]
var userFilm film
var err error
if ignore {
if inter {
if len(users) == 1 {
userFilm, err = scrapeUser(users, false, true)
} else {
userFilm, err = scrapeUser(users, true, true)
}
} else {
userFilm, err = scrapeUser(users, false, true)
}
} else {
if inter {
if len(users) == 1 {
userFilm, err = scrapeUser(users, false, false)
} else {
userFilm, err = scrapeUser(users, true, false)
}
} else {
userFilm, err = scrapeUser(users, false, false)
}
}
if err != nil {
var e *nothingError
if errors.As(err, &e) {
switch e.reason {
case INTERSECT:
http.Error(w, "Intersect error", 406)
return
case UNION:
http.Error(w, "Union error", 404)
return
}
}
}
js, err := json.Marshal(userFilm)
if err != nil {
http.Error(w, "internal error", 500)
return
}
w.Write(js)
}
//main scraping function
func scrapeUser(users []string, intersect bool, ignore bool) (film, error) {
var user int = 0 //conuter for number of users increses by one when a users page starts being scraped decreses when user has finished think kinda like a semaphore
var totalFilms []film //final list to hold all film
ch := make(chan filmSend) //channel to send films over
// start go routine to scrape each user
for _, a := range users {
log.Println(a)
user++
if strings.Contains(a, "/") {
go scrapeList(a, ch)
} else {
go scrape(a, ch)
}
}
for {
userFilm := <-ch
if userFilm.done { //if users channel is don't then the scapre for that user has finished so decrease the user count
user--
if user == 0 {
break
}
} else {
totalFilms = append(totalFilms, userFilm.film) //append feilm recieved over channel to list
}
}
//chose random film from list
if len(totalFilms) == 0 {
return film{}, ¬hingError{reason: UNION}
}
log.Print("results")
var finalFilm film
if intersect {
intersectList := getintersect(totalFilms,len(users))
length := len(intersectList)
if length == 0 {
return film{}, ¬hingError{reason: INTERSECT}
}
if ignore {
fmt.Println("ignore")
intersectList = removeCurrentYear(intersectList)
length = len(intersectList)
}
rand.Seed(time.Now().UTC().UnixNano())
n := rand.Intn(length)
log.Println(length)
log.Println(n)
log.Println(intersectList[n])
finalFilm = intersectList[n]
} else {
rand.Seed(time.Now().UTC().UnixNano())
if ignore {
fmt.Println("ignore")
totalFilms = removeCurrentYear(totalFilms)
}
n := rand.Intn(len(totalFilms))
log.Println(len(totalFilms))
log.Println(n)
log.Println(totalFilms[n])
finalFilm = totalFilms[n]
}
return finalFilm, nil
}
//function to scapre an single user
func scrape(userName string, ch chan filmSend) {
siteToVisit := site + "/" + userName + "/watchlist"
ajc := colly.NewCollector(
colly.Async(true),
)
ajc.OnHTML("div.film-poster", func(e *colly.HTMLElement) { //secondard cleector to get main data for film
name := e.Attr("data-film-name")
slug := e.Attr("data-target-link")
img := e.ChildAttr("img", "src")
year := e.Attr("data-film-release-year")
tempfilm := film{
Slug: (site + slug),
Image: makeBigger(img),
Year: year,
Name: name,
}
ch <- ok(tempfilm)
})
c := colly.NewCollector(
colly.Async(true),
)
c.Limit(&colly.LimitRule{DomainGlob: "*", Parallelism: 100})
c.OnHTML(".poster-container", func(e *colly.HTMLElement) { //primary scarer to get url of each film that contian full information
e.ForEach("div.film-poster", func(i int, ein *colly.HTMLElement) {
slug := ein.Attr("data-film-slug")
ajc.Visit(url + slug + urlEnd) //start go routine to collect all film data
})
})
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
link := e.Attr("href")
if strings.Contains(link, "watchlist/page") {
e.Request.Visit(e.Request.AbsoluteURL(link))
}
})
c.Visit(siteToVisit)
c.Wait()
ajc.Wait()
ch <- done() // users has finished so send done through channel
}
func scrapeList(listnameIn string, ch chan filmSend) {
siteToVisit := ""
listname := strings.ToLower(listnameIn)
if strings.Contains(listname, "/list/") {
siteToVisit = site + "/" + listname
} else {
strslice := strings.Split(listname, "/") //strslice[0] is user name strslice[1] is listname
siteToVisit = site + "/" + strslice[0] + "/list/" + strslice[1]
}
log.Println(siteToVisit)
ajc := colly.NewCollector(
colly.Async(true),
)
ajc.OnHTML("div.film-poster", func(e *colly.HTMLElement) {
name := e.Attr("data-film-name")
slug := e.Attr("data-target-link")
img := e.ChildAttr("img", "src")
year := e.Attr("data-film-release-year")
tempfilm := film{
Slug: (site + slug),
Image: makeBigger(img),
Year: year,
Name: name,
}
ch <- ok(tempfilm)
})
c := colly.NewCollector(
colly.Async(true),
)
c.Limit(&colly.LimitRule{DomainGlob: "*", Parallelism: 100})
c.OnHTML(".poster-container", func(e *colly.HTMLElement) {
e.ForEach("div.film-poster", func(i int, ein *colly.HTMLElement) {
slug := ein.Attr("data-film-slug")
ajc.Visit(url + slug + urlEnd)
})
})
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
link := e.Attr("href")
if strings.Contains(link, "/page") {
e.Request.Visit(e.Request.AbsoluteURL(link))
}
})
c.Visit(siteToVisit)
c.Wait()
ajc.Wait()
ch <- done()
}
func ok(f film) filmSend {
return filmSend{
film: f,
done: false,
}
}
func done() filmSend {
return filmSend{
film: film{},
done: true,
}
}
func getintersect(filmSlice []film, numOfUsers int) []film {
keys := make(map[film]int)
list := []film{}
for _, entry := range filmSlice {
i, _ := keys[entry]
if i < (numOfUsers - 1) {
keys[entry] ++
} else {
list = append(list, entry)
}
}
return list
}
func enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
}
func makeBigger(url string) string {
return strings.ReplaceAll(url, "-0-125-0-187-", "-0-230-0-345-")
}
func removeCurrentYear(filmSlice []film) []film {
list := []film{}
for _, entry := range filmSlice {
if entry.Year == "" {
continue
}
filmYear, _ := strconv.Atoi(entry.Year)
if filmYear < year {
list = append(list, entry)
}
}
return list
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
cmd/box/main.go | package main
import (
"bytes"
"context"
"fmt"
"os"
"strconv"
"strings"
"github.com/3uxi/steam-box/pkg/steambox"
"github.com/google/go-github/github"
)
func main() {
steamAPIKey := os.Getenv("STEAM_API_KEY")
steamID, _ := strconv.ParseUint(os.Getenv("STEAM_ID"), 10, 64)
appIDs := os.Getenv("APP_ID")
appIDList := make([]uint32, 0)
for _, appID := range strings.Split(appIDs, ",") {
appid, err := strconv.ParseUint(appID, 10, 32)
if err != nil {
continue
}
appIDList = append(appIDList, uint32(appid))
}
ghToken := os.Getenv("GH_TOKEN")
ghUsername := os.Getenv("GH_USER")
allTimeGistID := os.Getenv("ALL_TIME_GIST_ID")
recentTimeGistID := os.Getenv("RECENT_TIME_GIST_ID")
updateOption := os.Getenv("UPDATE_OPTION") // options for update: GIST,MARKDOWN,GIST_AND_MARKDOWN
markdownFile := os.Getenv("MARKDOWN_FILE") // the markdown filename
var updateGist, updateMarkdown bool
if updateOption == "MARKDOWN" {
updateMarkdown = true
} else if updateOption == "GIST_AND_MARKDOWN" {
updateGist = true
updateMarkdown = true
} else {
updateGist = true
}
box := steambox.NewBox(steamAPIKey, ghUsername, ghToken)
ctx := context.Background()
allTimeLines, err := box.GetPlayTime(ctx, steamID, appIDList...)
if err != nil {
panic("GetPlayTime err:" + err.Error())
}
recentTimeLines, err := box.GetRecentPlayGanesWithTime(ctx, steamID, 5)
if err != nil {
panic("GetRecentTime err:" + err.Error())
}
type info struct {
gistID string
lines []string
filename string
}
tasks := []info{info{allTimeGistID, allTimeLines, "🎮 Steam playtime leaderboard"},
info{recentTimeGistID, recentTimeLines, "🎮 Steam recent games leaderboard"}}
for _, v := range tasks {
if updateGist {
gist, err := box.GetGist(ctx, v.gistID)
if err != nil {
panic("GetGist err:" + err.Error())
}
f := gist.Files[github.GistFilename(v.filename)]
f.Content = github.String(strings.Join(v.lines, "\n"))
gist.Files[github.GistFilename(v.filename)] = f
err = box.UpdateGist(ctx, v.gistID, gist)
if err != nil {
panic("UpdateGist err:" + err.Error())
}
}
if updateMarkdown && markdownFile != "" {
title := v.filename
if updateGist {
title = fmt.Sprintf(`#### <a href="https://gist.github.com/%s" target="_blank">%s</a>`, v.gistID, title)
}
content := bytes.NewBuffer(nil)
content.WriteString(strings.Join(v.lines, "\n"))
err = box.UpdateMarkdown(ctx, title, markdownFile, content.Bytes())
if err != nil {
fmt.Println(err)
}
fmt.Println("updating markdown successfully on", markdownFile)
}
}
}
| [
"\"STEAM_API_KEY\"",
"\"STEAM_ID\"",
"\"APP_ID\"",
"\"GH_TOKEN\"",
"\"GH_USER\"",
"\"ALL_TIME_GIST_ID\"",
"\"RECENT_TIME_GIST_ID\"",
"\"UPDATE_OPTION\"",
"\"MARKDOWN_FILE\""
]
| []
| [
"RECENT_TIME_GIST_ID",
"GH_TOKEN",
"GH_USER",
"ALL_TIME_GIST_ID",
"MARKDOWN_FILE",
"APP_ID",
"STEAM_API_KEY",
"STEAM_ID",
"UPDATE_OPTION"
]
| [] | ["RECENT_TIME_GIST_ID", "GH_TOKEN", "GH_USER", "ALL_TIME_GIST_ID", "MARKDOWN_FILE", "APP_ID", "STEAM_API_KEY", "STEAM_ID", "UPDATE_OPTION"] | go | 9 | 0 | |
pkg/gtc/mock.go | package gtc
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
type Mock struct {
// Local Client
C Client
// Rmote Repository Client
RC *Client
}
type MockCommit struct {
Message string
Files map[string][]byte
}
type MockOpt struct {
OriginURL string
CurrentBranch string
Branches []string
Commits []MockCommit
StagedFile map[string][]byte
UnstagedFile map[string][]byte
Remote *MockOpt
}
type SubmoduleOpt struct {
Path string
}
func NewMock(o MockOpt) (Mock, error) {
mock := Mock{}
if o.Remote != nil {
rm, err := NewMock(*o.Remote)
if err != nil {
return Mock{}, err
}
mock.RC = &rm.C
}
dir, _ := ioutil.TempDir("/tmp", "gtc-")
if mock.RC != nil {
opt := ClientOpt{
DirPath: dir,
OriginURL: mock.RC.opt.DirPath,
CreateBranch: false,
Revision: o.CurrentBranch,
AuthorName: "bob",
AuthorEmail: "[email protected]",
}
c, err := Clone(opt)
if err != nil {
return Mock{}, err
}
mock.C = c
} else {
opt := ClientOpt{
DirPath: dir,
OriginURL: "",
CreateBranch: false,
Revision: o.CurrentBranch,
AuthorName: "bob",
AuthorEmail: "[email protected]",
}
c, err := Init(opt)
if err != nil {
return Mock{}, err
}
mock.C = c
}
if err := mock.compose(o); err != nil {
return Mock{}, err
}
return mock, nil
}
func (m *Mock) compose(o MockOpt) error {
if m.RC != nil {
if err := m.C.Pull(o.CurrentBranch); err != nil {
return err
}
}
// create commit
for _, commit := range o.Commits {
for name, blob := range commit.Files {
os.MkdirAll(filepath.Dir(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name)), 0755)
if err := os.WriteFile(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name), blob, 0644); err != nil {
return err
}
if err := m.C.Add(name); err != nil {
return err
}
}
if err := m.C.Commit(commit.Message); err != nil {
return err
}
}
for _, b := range o.Branches {
m.C.CreateBranch(b, false)
m.C.Checkout(o.CurrentBranch, false)
}
// create staged file
for name, blob := range o.StagedFile {
os.MkdirAll(filepath.Dir(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name)), 0755)
if err := os.WriteFile(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name), blob, 0644); err != nil {
return err
}
if err := m.C.Add(name); err != nil {
return err
}
}
// create unstaged file
for name, blob := range o.UnstagedFile {
os.MkdirAll(filepath.Dir(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name)), 0755)
if err := os.WriteFile(fmt.Sprintf("%s/%s", m.C.opt.DirPath, name), blob, 0644); err != nil {
return err
}
}
return nil
}
func (m *Mock) DirPath() string {
return m.C.opt.DirPath
}
func (m *Mock) ClientOpt() ClientOpt {
return m.C.opt
}
func (m *Mock) RemoteClientOpt() ClientOpt {
return m.RC.opt
}
func mockOpt() ClientOpt {
dir, _ := ioutil.TempDir("/tmp", "gtc-")
return ClientOpt{
DirPath: dir,
CreateBranch: false,
OriginURL: "https://github.com/takutakahashi/gtc.git",
Revision: "master",
AuthorName: "bob",
AuthorEmail: "[email protected]",
}
}
func mockBranchOpt() ClientOpt {
dir, _ := ioutil.TempDir("/tmp", "gtc-")
return ClientOpt{
DirPath: dir,
OriginURL: "https://github.com/takutakahashi/gtc.git",
Revision: "test",
AuthorName: "bob",
AuthorEmail: "[email protected]",
}
}
func mockNoExistsBranchOpt() ClientOpt {
dir, _ := ioutil.TempDir("/tmp", "gtc-")
return ClientOpt{
DirPath: dir,
CreateBranch: true,
OriginURL: "https://github.com/takutakahashi/gtc.git",
Revision: "new-branch",
AuthorName: "bob",
AuthorEmail: "[email protected]",
}
}
func mockOptBasicAuth() ClientOpt {
o := mockOpt()
o.Revision = "main"
o.OriginURL = "https://github.com/takutakahashi/gtc.git"
auth, _ := GetAuth(os.Getenv("TEST_BASIC_AUTH_USERNAME"), os.Getenv("TEST_BASIC_AUTH_PASSWORD"), "")
o.Auth = auth
return o
}
func mockOptSSHAuth() ClientOpt {
o := mockOpt()
o.Revision = "main"
o.OriginURL = "[email protected]:takutakahashi/gtc.git"
auth, _ := GetAuth("git", "", os.Getenv("TEST_SSH_PRIVATE_KEY_PATH"))
o.Auth = auth
return o
}
func mockInit() Client {
m, err := NewMock(MockOpt{
CurrentBranch: "master",
Commits: []MockCommit{
{
Message: "init",
Files: map[string][]byte{
"file": {0, 0},
"dir/dir_file": {0, 0},
},
},
},
})
if err != nil {
panic(err)
}
return m.C
}
func mockWithRemote() Client {
m, err := NewMock(MockOpt{
Remote: &MockOpt{
Branches: []string{
"master", "test",
},
CurrentBranch: "master",
Commits: []MockCommit{
{
Message: "init",
Files: map[string][]byte{
"file": {0, 0},
"dir/dir_file": {0, 0},
},
},
},
},
CurrentBranch: "master",
})
if err != nil {
panic(err)
}
return m.C
}
func mockWithUnstagedFile() Client {
m, err := NewMock(MockOpt{
CurrentBranch: "master",
UnstagedFile: map[string][]byte{
"file_mockwithunstagedfile": {0, 0},
},
})
if err != nil {
panic(err)
}
return m.C
}
func mockGtc() Client {
opt := mockOpt()
opt.Revision = "main"
c, err := Clone(opt)
if err != nil {
panic(err)
}
return c
}
func mockWithTags(tagNames []string) Client {
c := mockInit()
for i, name := range tagNames {
os.WriteFile(fmt.Sprintf("%s/%s", c.opt.DirPath, name), []byte{0, 0, 0}, 0644)
c.Add(name)
c.commit(name, time.Now().AddDate(0, 0, i))
c.gitExec([]string{"tag", name})
}
return c
}
func mockWithRemoteTags(tagNames []string) Client {
rc := mockInit()
opt := mockOpt()
opt.OriginURL = rc.opt.DirPath
c, err := Clone(opt)
if err != nil {
panic(err)
}
for i, name := range tagNames {
os.WriteFile(fmt.Sprintf("%s/%s", rc.opt.DirPath, name), []byte{0, 0, 0}, 0644)
rc.Add(name)
rc.commit(name, time.Now().AddDate(0, 0, i))
rc.gitExec([]string{"tag", name})
}
return c
}
func mockWithBehindFromRemote() Client {
rc := mockInit()
opt := mockOpt()
opt.OriginURL = rc.opt.DirPath
c, err := Clone(opt)
if err != nil {
panic(err)
}
os.WriteFile(fmt.Sprintf("%s/%s", rc.opt.DirPath, "file2"), []byte{0, 0}, 0644)
rc.Add("file2")
rc.Commit("commit")
return c
}
func mockWithRemoteAndDirty() Client {
c := mockWithRemote()
os.WriteFile(fmt.Sprintf("%s/%s", c.opt.DirPath, "file2"), []byte{0, 0}, 0644)
c.Add("file2")
c.Commit("add")
return c
}
func mockWithSubmodule() Client {
c1 := mockWithRemote()
m2, _ := NewMock(MockOpt{
CurrentBranch: "master",
Commits: []MockCommit{
{
Message: "test2",
Files: map[string][]byte{
"test2": {0, 1, 2, 3, 4},
},
},
},
Remote: &MockOpt{
CurrentBranch: "master",
Commits: []MockCommit{
{
Message: "test2",
Files: map[string][]byte{
"test2": {0, 1, 2, 3, 4},
},
},
},
},
})
c2 := m2.C
c2.AddClientAsSubmodule("test", c1)
return c2
}
| [
"\"TEST_BASIC_AUTH_USERNAME\"",
"\"TEST_BASIC_AUTH_PASSWORD\"",
"\"TEST_SSH_PRIVATE_KEY_PATH\""
]
| []
| [
"TEST_BASIC_AUTH_USERNAME",
"TEST_SSH_PRIVATE_KEY_PATH",
"TEST_BASIC_AUTH_PASSWORD"
]
| [] | ["TEST_BASIC_AUTH_USERNAME", "TEST_SSH_PRIVATE_KEY_PATH", "TEST_BASIC_AUTH_PASSWORD"] | go | 3 | 0 | |
tools/tensorpack/examples/Inception/inception-bn.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: inception-bn.py
# Author: Yuxin Wu <[email protected]>
import cv2
import argparse
import numpy as np
import os
import tensorflow as tf
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
TOTAL_BATCH_SIZE = 64 * 6
NR_GPU = 6
BATCH_SIZE = TOTAL_BATCH_SIZE // NR_GPU
INPUT_SHAPE = 224
"""
Inception-BN model on ILSVRC12.
See "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift", arxiv:1502.03167
This config reaches 71% single-crop validation accuracy after 150k steps with 6 TitanX.
Learning rate may need a different schedule for different number of GPUs (because batch size will be different).
"""
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputVar(tf.int32, [None], 'label') ]
def _build_graph(self, input_vars):
image, label = input_vars
image = image / 128.0
def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype):
stride = 2 if nr1x1 == 0 else 1
with tf.variable_scope(name) as scope:
outs = []
if nr1x1 != 0:
outs.append(Conv2D('conv1x1', x, nr1x1, 1))
x2 = Conv2D('conv3x3r', x, nr3x3r, 1)
outs.append(Conv2D('conv3x3', x2, nr3x3, 3, stride=stride))
x3 = Conv2D('conv233r', x, nr233r, 1)
x3 = Conv2D('conv233a', x3, nr233, 3)
outs.append(Conv2D('conv233b', x3, nr233, 3, stride=stride))
if pooltype == 'max':
x4 = MaxPooling('mpool', x, 3, stride, padding='SAME')
else:
assert pooltype == 'avg'
x4 = AvgPooling('apool', x, 3, stride, padding='SAME')
if nrpool != 0: # pool + passthrough if nrpool == 0
x4 = Conv2D('poolproj', x4, nrpool, 1)
outs.append(x4)
return tf.concat(3, outs, name='concat')
with argscope(Conv2D, nl=BNReLU, use_bias=False):
l = Conv2D('conv0', image, 64, 7, stride=2)
l = MaxPooling('pool0', l, 3, 2, padding='SAME')
l = Conv2D('conv1', l, 64, 1)
l = Conv2D('conv2', l, 192, 3)
l = MaxPooling('pool2', l, 3, 2, padding='SAME')
# 28
l = inception('incep3a', l, 64, 64, 64, 64, 96, 32, 'avg')
l = inception('incep3b', l, 64, 64, 96, 64, 96, 64, 'avg')
l = inception('incep3c', l, 0, 128, 160, 64, 96, 0, 'max')
br1 = Conv2D('loss1conv', l, 128, 1)
br1 = FullyConnected('loss1fc', br1, 1024, nl=tf.nn.relu)
br1 = FullyConnected('loss1logit', br1, 1000, nl=tf.identity)
loss1 = tf.nn.sparse_softmax_cross_entropy_with_logits(br1, label)
loss1 = tf.reduce_mean(loss1, name='loss1')
# 14
l = inception('incep4a', l, 224, 64, 96, 96, 128, 128, 'avg')
l = inception('incep4b', l, 192, 96, 128, 96, 128, 128, 'avg')
l = inception('incep4c', l, 160, 128, 160, 128, 160, 128, 'avg')
l = inception('incep4d', l, 96, 128, 192, 160, 192, 128, 'avg')
l = inception('incep4e', l, 0, 128, 192, 192, 256, 0, 'max')
br2 = Conv2D('loss2conv', l, 128, 1)
br2 = FullyConnected('loss2fc', br2, 1024, nl=tf.nn.relu)
br2 = FullyConnected('loss2logit', br2, 1000, nl=tf.identity)
loss2 = tf.nn.sparse_softmax_cross_entropy_with_logits(br2, label)
loss2 = tf.reduce_mean(loss2, name='loss2')
# 7
l = inception('incep5a', l, 352, 192, 320, 160, 224, 128, 'avg')
l = inception('incep5b', l, 352, 192, 320, 192, 224, 128, 'max')
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=1000, nl=tf.identity)
prob = tf.nn.softmax(logits, name='output')
loss3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)
loss3 = tf.reduce_mean(loss3, name='loss3')
cost = tf.add_n([loss3, 0.3 * loss2, 0.3 * loss1], name='weighted_cost')
add_moving_summary([cost, loss1, loss2, loss3])
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train_error_top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train_error_top5'))
# weight decay on all W of fc layers
wd_w = tf.train.exponential_decay(0.0002, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.mul(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='l2_regularize_loss')
add_moving_summary(wd_cost)
add_param_summary([('.*/W', ['histogram'])]) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.ILSVRC12(args.data, train_or_test, shuffle=True if isTrain else False)
meta = dataset.ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean()
if isTrain:
# TODO use the augmentor in GoogleNet
augmentors = [
imgaug.Resize((256, 256)),
imgaug.Brightness(30, False),
imgaug.Contrast((0.8,1.2), True),
imgaug.MapImage(lambda x: x - pp_mean),
imgaug.RandomCrop((224, 224)),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.Resize((256, 256)),
imgaug.MapImage(lambda x: x - pp_mean),
imgaug.CenterCrop((224, 224)),
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = PrefetchDataZMQ(ds, 6)
return ds
def get_config():
logger.auto_set_dir()
# prepare dataset
dataset_train = get_data('train')
step_per_epoch = 5000
dataset_val = get_data('val')
lr = get_scalar_var('learning_rate', 0.045, summary=True)
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.MomentumOptimizer(lr, 0.9),
callbacks=Callbacks([
StatPrinter(), ModelSaver(),
InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-top1-error'),
ClassificationError('wrong-top5', 'val-top5-error')]),
#HumanHyperParamSetter('learning_rate', 'hyper-googlenet.txt')
ScheduledHyperParamSetter('learning_rate',
[(8, 0.03), (14, 0.02), (17, 5e-3),
(19, 3e-3), (24, 1e-3), (26, 2e-4),
(30, 5e-5) ])
]),
session_config=get_default_sess_config(0.99),
model=Model(),
step_per_epoch=step_per_epoch,
max_epoch=80,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--data', help='ImageNet data root directory', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
SyncMultiGPUTrainer(config).train()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
examples/pwr_run/checkpointing/final/no_threshold/job63.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.01
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_threshold/' + job_name + '*'
total_epochs = 11
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_threshold/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
pkg/util/utils.go | package util
import (
"encoding/json"
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/signal"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
var containerConfig *config.Config
func init() {
var err error
containerConfig, err = config.Default()
if err != nil {
logrus.Error(err)
os.Exit(1)
}
}
// Helper function to determine the username/password passed
// in the creds string. It could be either or both.
func parseCreds(creds string) (string, string) {
if creds == "" {
return "", ""
}
up := strings.SplitN(creds, ":", 2)
if len(up) == 1 {
return up[0], ""
}
return up[0], up[1]
}
// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD
// and returns a DockerAuthConfig
func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
username, password := parseCreds(creds)
if username == "" {
fmt.Print("Username: ")
fmt.Scanln(&username)
}
if password == "" {
fmt.Print("Password: ")
termPassword, err := terminal.ReadPassword(0)
if err != nil {
return nil, errors.Wrapf(err, "could not read password from terminal")
}
password = string(termPassword)
}
return &types.DockerAuthConfig{
Username: username,
Password: password,
}, nil
}
// StringInSlice determines if a string is in a string slice, returns bool
func StringInSlice(s string, sl []string) bool {
for _, i := range sl {
if i == s {
return true
}
}
return false
}
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
// by containers/image, but containing additional fields that are not supported
// by OCIv1 (but are by Docker v2) - notably OnBuild.
type ImageConfig struct {
v1.ImageConfig
OnBuild []string
}
// GetImageConfig produces a v1.ImageConfig from the --change flag that is
// accepted by several Podman commands. It accepts a (limited subset) of
// Dockerfile instructions.
func GetImageConfig(changes []string) (ImageConfig, error) {
// Valid changes:
// USER
// EXPOSE
// ENV
// ENTRYPOINT
// CMD
// VOLUME
// WORKDIR
// LABEL
// STOPSIGNAL
// ONBUILD
config := ImageConfig{}
for _, change := range changes {
// First, let's assume proper Dockerfile format - space
// separator between instruction and value
split := strings.SplitN(change, " ", 2)
if len(split) != 2 {
split = strings.SplitN(change, "=", 2)
if len(split) != 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
}
}
outerKey := strings.ToUpper(strings.TrimSpace(split[0]))
value := strings.TrimSpace(split[1])
switch outerKey {
case "USER":
// Assume literal contents are the user.
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change)
}
config.User = value
case "EXPOSE":
// EXPOSE is either [portnum] or
// [portnum]/[proto]
// Protocol must be "tcp" or "udp"
splitPort := strings.Split(value, "/")
if len(splitPort) > 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
}
portNum, err := strconv.Atoi(splitPort[0])
if err != nil {
return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
}
if portNum > 65535 || portNum <= 0 {
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
}
proto := "tcp"
if len(splitPort) > 1 {
testProto := strings.ToLower(splitPort[1])
switch testProto {
case "tcp", "udp":
proto = testProto
default:
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
}
}
if config.ExposedPorts == nil {
config.ExposedPorts = make(map[string]struct{})
}
config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{}
case "ENV":
// Format is either:
// ENV key=value
// ENV key=value key=value ...
// ENV key value
// Both keys and values can be surrounded by quotes to group them.
// For now: we only support key=value
// We will attempt to strip quotation marks if present.
var (
key, val string
)
splitEnv := strings.SplitN(value, "=", 2)
key = splitEnv[0]
// We do need a key
if key == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
}
// Perfectly valid to not have a value
if len(splitEnv) == 2 {
val = splitEnv[1]
}
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val))
case "ENTRYPOINT":
// Two valid forms.
// First, JSON array.
// Second, not a JSON array - we interpret this as an
// argument to `sh -c`, unless empty, in which case we
// just use a blank entrypoint.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c if not empty.
if value != "" {
config.Entrypoint = []string{"/bin/sh", "-c", value}
} else {
config.Entrypoint = []string{}
}
} else {
// Valid JSON
config.Entrypoint = testUnmarshal
}
case "CMD":
// Same valid forms as entrypoint.
// However, where ENTRYPOINT assumes that 'ENTRYPOINT '
// means no entrypoint, CMD assumes it is 'sh -c' with
// no third argument.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c.
// Only include volume if it's not ""
config.Cmd = []string{"/bin/sh", "-c"}
if value != "" {
config.Cmd = append(config.Cmd, value)
}
} else {
// Valid JSON
config.Cmd = testUnmarshal
}
case "VOLUME":
// Either a JSON array or a set of space-separated
// paths.
// Acts rather similar to ENTRYPOINT and CMD, but always
// appends rather than replacing, and no sh -c prepend.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// Not valid JSON, so split on spaces
testUnmarshal = strings.Split(value, " ")
}
if len(testUnmarshal) == 0 {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
}
for _, vol := range testUnmarshal {
if vol == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
}
if config.Volumes == nil {
config.Volumes = make(map[string]struct{})
}
config.Volumes[vol] = struct{}{}
}
case "WORKDIR":
// This can be passed multiple times.
// Each successive invocation is treated as relative to
// the previous one - so WORKDIR /A, WORKDIR b,
// WORKDIR c results in /A/b/c
// Just need to check it's not empty...
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
}
config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
// Same general idea as ENV, but we no longer allow " "
// as a separator.
// We didn't do that for ENV either, so nice and easy.
// Potentially problematic: LABEL might theoretically
// allow an = in the key? If people really do this, we
// may need to investigate more advanced parsing.
var (
key, val string
)
splitLabel := strings.SplitN(value, "=", 2)
// Unlike ENV, LABEL must have a value
if len(splitLabel) != 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
}
key = splitLabel[0]
val = splitLabel[1]
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
// Check key after we strip quotations
if key == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
config.Labels[key] = val
case "STOPSIGNAL":
// Check the provided signal for validity.
killSignal, err := ParseSignal(value)
if err != nil {
return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
}
config.StopSignal = fmt.Sprintf("%d", killSignal)
case "ONBUILD":
// Onbuild always appends.
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
}
config.OnBuild = append(config.OnBuild, value)
default:
return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
return config, nil
}
// ParseSignal parses and validates a signal name or number.
func ParseSignal(rawSignal string) (syscall.Signal, error) {
// Strip off leading dash, to allow -1 or -HUP
basename := strings.TrimPrefix(rawSignal, "-")
sig, err := signal.ParseSignal(basename)
if err != nil {
return -1, err
}
// 64 is SIGRTMAX; wish we could get this from a standard Go library
if sig < 1 || sig > 64 {
return -1, errors.Errorf("valid signals are 1 through 64")
}
return sig, nil
}
// GetKeepIDMapping returns the mappings and the user to use when keep-id is used
func GetKeepIDMapping() (*storage.IDMappingOptions, int, int, error) {
options := storage.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
uid, gid := 0, 0
if rootless.IsRootless() {
min := func(a, b int) int {
if a < b {
return a
}
return b
}
uid = rootless.GetRootlessUID()
gid = rootless.GetRootlessGID()
uids, gids, err := rootless.GetConfiguredMappings()
if err != nil {
return nil, -1, -1, errors.Wrapf(err, "cannot read mappings")
}
maxUID, maxGID := 0, 0
for _, u := range uids {
maxUID += u.Size
}
for _, g := range gids {
maxGID += g.Size
}
options.UIDMap, options.GIDMap = nil, nil
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)})
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1})
if maxUID > uid {
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid})
}
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)})
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1})
if maxGID > gid {
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid})
}
options.HostUIDMapping = false
options.HostGIDMapping = false
}
// Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
return &options, uid, gid, nil
}
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) {
options := storage.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
if mode.IsAuto() {
var err error
options.HostUIDMapping = false
options.HostGIDMapping = false
options.AutoUserNs = true
opts, err := mode.GetAutoOptions()
if err != nil {
return nil, err
}
options.AutoUserNsOpts = *opts
return &options, nil
}
if mode.IsKeepID() {
options.HostUIDMapping = false
options.HostGIDMapping = false
return &options, nil
}
if subGIDMap == "" && subUIDMap != "" {
subGIDMap = subUIDMap
}
if subUIDMap == "" && subGIDMap != "" {
subUIDMap = subGIDMap
}
if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 {
gidMapSlice = uidMapSlice
}
if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 {
uidMapSlice = gidMapSlice
}
if len(uidMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
uidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
}
if len(gidMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
gidMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
}
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
return nil, err
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID")
if err != nil {
return nil, err
}
parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID")
if err != nil {
return nil, err
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
if len(options.UIDMap) > 0 {
options.HostUIDMapping = false
}
if len(options.GIDMap) > 0 {
options.HostGIDMapping = false
}
return &options, nil
}
var (
rootlessConfigHomeDirOnce sync.Once
rootlessConfigHomeDir string
rootlessRuntimeDirOnce sync.Once
rootlessRuntimeDir string
)
type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"`
}
type tomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
GraphRoot string `toml:"graphroot"`
Options struct{ tomlOptionsConfig } `toml:"options"`
} `toml:"storage"`
}
func getTomlStorage(storeOptions *storage.StoreOptions) *tomlConfig {
config := new(tomlConfig)
config.Storage.Driver = storeOptions.GraphDriverName
config.Storage.RunRoot = storeOptions.RunRoot
config.Storage.GraphRoot = storeOptions.GraphRoot
for _, i := range storeOptions.GraphDriverOptions {
s := strings.Split(i, "=")
if s[0] == "overlay.mount_program" {
config.Storage.Options.MountProgram = s[1]
}
}
return config
}
// WriteStorageConfigFile writes the configuration to a file
func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf string) error {
if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
return err
}
storageFile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return errors.Wrapf(err, "cannot open %s", storageConf)
}
tomlConfiguration := getTomlStorage(storageOpts)
defer errorhandling.CloseQuiet(storageFile)
enc := toml.NewEncoder(storageFile)
if err := enc.Encode(tomlConfiguration); err != nil {
if err := os.Remove(storageConf); err != nil {
logrus.Errorf("unable to remove file %s", storageConf)
}
return err
}
return nil
}
// ParseInputTime takes the users input and to determine if it is valid and
// returns a time format and error. The input is compared to known time formats
// or a duration which implies no-duration
func ParseInputTime(inputTime string) (time.Time, error) {
timeFormats := []string{time.RFC3339Nano, time.RFC3339, "2006-01-02T15:04:05", "2006-01-02T15:04:05.999999999",
"2006-01-02Z07:00", "2006-01-02"}
// iterate the supported time formats
for _, tf := range timeFormats {
t, err := time.Parse(tf, inputTime)
if err == nil {
return t, nil
}
}
// input might be a duration
duration, err := time.ParseDuration(inputTime)
if err != nil {
return time.Time{}, errors.Errorf("unable to interpret time value")
}
return time.Now().Add(-duration), nil
}
// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
func OpenExclusiveFile(path string) (*os.File, error) {
baseDir := filepath.Dir(path)
if baseDir != "" {
if _, err := os.Stat(baseDir); err != nil {
return nil, err
}
}
return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}
// PullType whether to pull new image
type PullType int
const (
// PullImageAlways always try to pull new image when create or run
PullImageAlways PullType = iota
// PullImageMissing pulls image if it is not locally
PullImageMissing
// PullImageNever will never pull new image
PullImageNever
)
// ValidatePullType check if the pullType from CLI is valid and returns the valid enum type
// if the value from CLI is invalid returns the error
func ValidatePullType(pullType string) (PullType, error) {
switch pullType {
case "always":
return PullImageAlways, nil
case "missing":
return PullImageMissing, nil
case "never":
return PullImageNever, nil
case "":
return PullImageMissing, nil
default:
return PullImageMissing, errors.Errorf("invalid pull type %q", pullType)
}
}
// ExitCode reads the error message when failing to executing container process
// and then returns 0 if no error, 126 if command does not exist, or 127 for
// all other errors
func ExitCode(err error) int {
if err == nil {
return 0
}
e := strings.ToLower(err.Error())
if strings.Contains(e, "file not found") ||
strings.Contains(e, "no such file or directory") {
return 127
}
return 126
}
// HomeDir returns the home directory for the current user.
func HomeDir() (string, error) {
home := os.Getenv("HOME")
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
if err != nil {
return "", errors.Wrapf(err, "unable to resolve HOME directory")
}
home = usr.HomeDir
}
return home, nil
}
func Tmpdir() string {
tmpdir := os.Getenv("TMPDIR")
if tmpdir == "" {
tmpdir = "/var/tmp"
}
return tmpdir
}
// ValidateSysctls validates a list of sysctl and returns it.
func ValidateSysctls(strSlice []string) (map[string]string, error) {
sysctl := make(map[string]string)
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
for _, val := range strSlice {
foundMatch := false
arr := strings.Split(val, "=")
if len(arr) < 2 {
return nil, errors.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val)
}
if validSysctlMap[arr[0]] {
sysctl[arr[0]] = arr[1]
continue
}
for _, prefix := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], prefix) {
sysctl[arr[0]] = arr[1]
foundMatch = true
break
}
}
if !foundMatch {
return nil, errors.Errorf("sysctl '%s' is not whitelisted", arr[0])
}
}
return sysctl, nil
}
func DefaultContainerConfig() *config.Config {
return containerConfig
}
| [
"\"HOME\"",
"\"TMPDIR\""
]
| []
| [
"HOME",
"TMPDIR"
]
| [] | ["HOME", "TMPDIR"] | go | 2 | 0 | |
examples/get_entries/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/grokify/go-simplekpi/simplekpiutil"
"github.com/grokify/mogo/config"
"github.com/grokify/mogo/fmt/fmtutil"
"github.com/jessevdk/go-flags"
)
type Options struct {
EnvPath string `short:"e" long:"envpath" description:".env Filepath" required:"false"`
Site string `short:"s" long:"site" description:"Your site" required:"false"`
Username string `short:"u" long:"username" description:"Your username" required:"false"`
Password string `short:"p" long:"password" description:"Your password" required:"false"`
}
func main() {
opts := Options{}
_, err := flags.Parse(&opts)
if err != nil {
log.Fatal(err)
}
if len(opts.Site) == 0 {
err := config.LoadDotEnvSkipEmpty(opts.EnvPath, ".env", os.Getenv("ENV_PATH"))
if err != nil {
log.Fatal(err)
}
opts.Site = os.Getenv("SIMPLEKPI_SITE")
opts.Username = os.Getenv("SIMPLEKPI_USERNAME")
opts.Password = os.Getenv("SIMPLEKPI_TOKEN")
}
client, err := simplekpiutil.NewApiClient(opts.Site, opts.Username, opts.Password)
if err != nil {
log.Fatal(err)
}
info, resp, err := client.UsersApi.GetAllUsers(context.Background())
if err != nil {
log.Fatal(err)
} else if resp.StatusCode > 299 {
log.Fatal(resp.StatusCode)
}
fmtutil.PrintJSON(info)
if 1 == 0 {
kpis, resp, err := client.KPIsApi.GetAllKPIs(context.Background())
if err != nil {
log.Fatal(err)
} else if resp.StatusCode > 299 {
log.Fatal(resp.StatusCode)
}
fmtutil.PrintJSON(kpis)
for _, kpi := range kpis {
if strings.Index(kpi.Name, "MAU") > -1 {
fmtutil.PrintJSON(kpi)
}
}
}
fmt.Println("DONE")
}
| [
"\"ENV_PATH\"",
"\"SIMPLEKPI_SITE\"",
"\"SIMPLEKPI_USERNAME\"",
"\"SIMPLEKPI_TOKEN\""
]
| []
| [
"SIMPLEKPI_TOKEN",
"ENV_PATH",
"SIMPLEKPI_SITE",
"SIMPLEKPI_USERNAME"
]
| [] | ["SIMPLEKPI_TOKEN", "ENV_PATH", "SIMPLEKPI_SITE", "SIMPLEKPI_USERNAME"] | go | 4 | 0 | |
utilities/data_collection.py | from weatherbit.api import Api
from datetime import datetime, timedelta
import requests
import psycopg2
import json
import os
import pandas as pd
import re
import pycountry_convert as pc
def store_to_db(lat, lon, start_date, end_date, db_name, db_password, db_user, db_host):
"""
Queries the weatherbit using API/requests to retrieve information from desired <start_date> to <end_date>
and stores in weather table.
:param lat: lattitude coordinate
:param lon: longtitude coordinate
:param start_date: date object
:param end_date: date object
:return void:
"""
api_key = os.environ.get('API_KEY')
api = Api(api_key)
try:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
conn = psycopg2.connect(dbname=db_name, password=db_password, user=db_user, host=db_host)
cur = conn.cursor()
if start_date < end_date:
for n in range((end_date - start_date).days):
sdate = start_date + timedelta(n)
edate = start_date + timedelta(n+1)
# Using an API Wrapper
# api.set_granularity('daily')
# history = api.get_history(lat=lat, lon=lon, start_date=str(sdate.date()), end_date=str(edate.date()))
# print(history.get_series(['temp','min_temp','max_temp', 'min_temp_ts', 'max_temp_ts']))
## Using the API directly
response = requests.get("https://api.weatherbit.io/v2.0/history/daily?lat="+str(lat)+"&lon="+str(lon)+"&start_date="+str(sdate.date())+"&end_date="+str(edate.date())+"&key="+api_key)
if response.status_code == 200:
query = """ INSERT INTO weather (lat, lon, mean_temp, min_temp, min_temp_time, max_temp, max_temp_time) VALUES (%s,%s,%s,%s,%s,%s,%s) """
# query = """ INSERT INTO test_weather (lat, lon, mean_temp, min_temp, min_temp_time, max_temp, max_temp_time) VALUES (%s,%s,%s,%s,%s,%s,%s) """
record = (lat,
lon,
response.json()["data"][0]["temp"],
response.json()["data"][0]["min_temp"],
datetime.fromtimestamp(
int(response.json()["data"][0]["min_temp_ts"])
).strftime('%Y-%m-%d %H:%M:%S'),
response.json()["data"][0]["max_temp"],
datetime.fromtimestamp(
int(response.json()["data"][0]["max_temp_ts"])
).strftime('%Y-%m-%d %H:%M:%S'))
cur.execute(query, record)
conn.commit()
conn.close()
except Exception as e:
print("Exiting store_to_db due to exception: ", e.__class__)
finally:
if conn:
cur.close()
conn.close()
def collect_data(filename, start_date, end_date):
"""
For each city in json file <filename>, store_to_db() method is invoked to store history information to database.
:param filename: cities json object
:param start_date: date object
:param end_date: date object
:return void:
"""
root_dir = os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))
filepath = os.path.join(root_dir, "data")
with open(os.path.join(filepath, filename)) as json_data:
data = json.load(json_data)
for continent in data:
for city in data[continent]:
store_to_db(city["lat"], city["lon"], start_date, end_date, db_name, db_password, db_user, db_host)
def load_master_data():
"""
Continent details for each city from weatherbit metadata is pulled using pycountry_convert API and stored in
city_country_mdata table.
:return void:
"""
db_name = "postgres"
db_password = "postgres"
db_user = "postgres"
db_host = "localhost"
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filepath = os.path.join(root_dir, "data")
data = pd.read_csv(os.path.join(filepath, "cities_all.csv"))
data["city_country"] = data["city_name"].str.cat(data["country_full"],sep=", ")
data = data.dropna()
try:
conn = psycopg2.connect(dbname=db_name, password=db_password, user=db_user, host=db_host)
cur = conn.cursor()
for item in data.itertuples():
continent_flag = 0
continent_name = ""
if re.sub("[^a-zA-Z0-9, ]+", "", item[8]):
try:
country_code = pc.country_name_to_country_alpha2(item[5], cn_name_format="default")
continent_name = pc.country_alpha2_to_continent_code(country_code)
except:
continent_flag = 1
if continent_flag == 0:
# query = """ INSERT INTO city_country_mdata (lat, lon, citycountry, continent) VALUES (%s,%s,%s,%s) """
record = (item[6], item[7], re.sub("[^a-zA-Z0-9, ]+", "",item[8]), continent_name)
cur.execute(query, record)
conn.commit()
conn.close()
except Exception as e:
print("Exiting load_master_data due to exception: ", e.__class__)
finally:
if conn:
cur.close()
conn.close()
def get_cities_for_continent(continent, db_name, db_password, db_user, db_host):
"""
Given the <continent>, its corresponding cities are queried from city_country_mdata table.
:param continent: string with 2 characters
:return result: list of cities
"""
try:
conn = psycopg2.connect(dbname=db_name, password=db_password, user=db_user, host=db_host)
cur = conn.cursor()
query = """ SELECT citycountry from city_country_mdata WHERE continent = %s """
record = (continent,)
cur.execute(query, record)
result = cur.fetchall()
result = [tup[0] for tup in result]
conn.commit()
conn.close()
return result
except Exception as e:
print("Exiting get_cities_for_continent due to exception: ", e.__class__)
finally:
if conn:
cur.close()
conn.close() | []
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | python | 1 | 0 | |
python/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "googleSheet.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ChangePop/utils.py | import random
import re
import string
from mailjet_rest import Client
import os
from ChangePop.models import Notifications
def api_resp(code, mtype, msg):
# TODO Doc
r = {
"code": str(code),
"type": str(mtype),
"message": str(msg)}
return r
def random_string(string_length=20):
"""Generate a random string of fixed length """
letters = "abcdefghijklmnopqrstuvwxyz0123456789"
return ''.join(random.choice(letters) for i in range(string_length))
def push_notify(user_id, text, product=None, category=None):
Notifications.push(user_id, text, product=product, category=category)
def fix_str(string):
string = re.sub('[\'(),]', '', string)
return string
def send_mail(mail,name,subject,textPart,htmlPart): # pragma: no cover
api_key = os.environ.get('MAIL_API')
api_secret = os.environ.get('MAIL_KEY')
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
data = {
'Messages': [
{
"From": {
"Email": "[email protected]",
"Name": "Kalepa Info"
},
"To": [
{
"Email": mail,
"Name": name
}
],
"Subject": subject,
"TextPart": textPart,
"HTMLPart": htmlPart
}
]
}
result = mailjet.send.create(data=data)
return result.json()
| []
| []
| [
"MAIL_KEY",
"MAIL_API"
]
| [] | ["MAIL_KEY", "MAIL_API"] | python | 2 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/theucoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *theucoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("theucoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"XGETTEXT"
]
| [] | ["XGETTEXT"] | python | 1 | 0 | |
project2/sct/flags.py | import os
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
def define_flags() -> None:
# Directories
flags.DEFINE_string('logdir', 'outputs', 'Logdir name.')
flags.DEFINE_string('expname', 'RoemmeleSentences-gru1000-epochs10-lr1e-3-bs100-es4800', 'Experiment name.')
# Checkpoint
flags.DEFINE_string('load_checkpoint', None, 'Full path to the checkpoint to load.')
# Data files
flags.DEFINE_string('train_file', 'data/stories.train.csv', 'Train data file.')
flags.DEFINE_string('eval_file', 'data/stories.eval.csv', 'Evaluation data file.')
flags.DEFINE_list('test_files', ['data/stories.test.csv', 'data/stories.spring2016.csv'], 'Test data files.')
flags.DEFINE_string('skip_thought_folder', os.environ['SCRATCH'] + '/st', 'Skip-thought embeddings folder.')
# Model choice
flags.DEFINE_string(
'model', 'RoemmeleSentences',
'Model class name. Models that have "sentences" in their name have different data preprocessing steps.')
flags.DEFINE_integer('roemmele_multiplicative_factor', 6, 'How many negative endings to sample. Need 1 for '
'`add` not None.')
flags.DEFINE_string('add', None, 'Whether and which constant add to use for negative labels.')
flags.DEFINE_bool('eval_train', False, 'Train on first 80% of eval dataset, eval on rest.')
flags.DEFINE_bool('balanced_batches', False, 'Train with label-balanced batches.')
flags.DEFINE_string('attention', None, 'Attention type (add ~ Bahdanau, mult ~ Luong, None). Only for Roemmele '
'models.')
flags.DEFINE_integer('attention_size', 1000, 'Attention size.')
# TF parameters
flags.DEFINE_boolean("no_gpu", False, 'Disables GPU usage even if a GPU is available')
flags.DEFINE_integer('threads', 8, 'Maximum number of threads to use.')
flags.DEFINE_integer('seed', 42, 'Random seed')
# Optimization parameters
flags.DEFINE_integer('epochs', 10, 'Training epoch count')
flags.DEFINE_integer('batch_size', 100, 'Training batch size')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate')
flags.DEFINE_float('grad_clip', 10.0, 'Gradient clipped to L2 norm smaller than or equal to.')
flags.DEFINE_integer('evaluate_every_steps', 2000, 'Evaluate every N steps.')
# Jupyter notebook params
# Only to avoid raising UnrecognizedFlagError
flags.DEFINE_string('f', 'kernel', 'Kernel')
# Other
flags.DEFINE_string('rnn_cell', "GRU", 'RNN cell type. If None, attention-only model.')
flags.DEFINE_integer('rnn_cell_dim', 1000, 'RNN cell dimension.')
flags.DEFINE_integer('word_embedding', 620, 'word_embedding')
flags.DEFINE_integer('char_embedding', 200, 'char_embedding')
flags.DEFINE_integer('sentence_embedding', 4800, 'sentence_embedding')
flags.DEFINE_float('keep_prob', 0.5, 'dropout probability')
| []
| []
| [
"SCRATCH"
]
| [] | ["SCRATCH"] | python | 1 | 0 | |
umbrella.py | """Helpers for configuring API backends on API Umbrella."""
import os
import requests
headers = {
'X-Api-Key': os.environ.get('AUTOAPI_UMBRELLA_KEY'),
'X-Admin-Auth-Token': os.environ.get('AUTOAPI_UMBRELLA_TOKEN'),
}
base = 'https://api.data.gov/api-umbrella/v1'
endpoints = {
'apis': os.path.join(base, 'apis'),
'publish': os.path.join(base, 'config', 'publish'),
}
def make_backend(name, host):
"""Create or update API backend."""
backend = get_backend(name, host)
payload = get_payload(name, host)
if backend:
method = 'PUT'
url = os.path.join(endpoints['apis'], backend['id'])
version = backend['version'] + 1
else:
method = 'POST'
url = endpoints['apis']
version = 1
response = requests.request(method, url, json=payload, headers=headers)
publish_backend(backend if backend else response.json()['api'], version)
def get_backend(name, host):
"""Get existing API backend matching name and host."""
response = requests.get(endpoints['apis'], headers=headers)
backends = response.json()['data']
predicate = lambda backend: backend['name'] == name and backend['backend_host'] == host
return next(filter(predicate, backends), None)
def get_payload(name, host):
"""Build payload to create or update API backend."""
route = os.path.join('/api-program', name)
return {
'api': {
'name': name,
'frontend_host': 'api.data.gov',
'backend_host': host,
'backend_protocol': 'https',
'balance_algorithm': 'least_conn',
'servers': [
{
'host': host,
'port': 443,
}
],
'url_matches': [
{
'frontend_prefix': route,
'backend_prefix': route,
}
],
}
}
def publish_backend(payload, version):
id = payload['id']
form = {
'config[apis][{0}][pending_version]'.format(id): version,
'config[apis][{0}][publish]'.format(id): version,
}
return requests.post(endpoints['publish'], headers=headers, data=form)
| []
| []
| [
"AUTOAPI_UMBRELLA_KEY",
"AUTOAPI_UMBRELLA_TOKEN"
]
| [] | ["AUTOAPI_UMBRELLA_KEY", "AUTOAPI_UMBRELLA_TOKEN"] | python | 2 | 0 | |
exchanges/exchanges_test.go | // Copyright (c) 2019, The Decred developers
// See LICENSE for details.
package exchanges
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"testing"
"time"
"github.com/carterjones/signalr"
"github.com/carterjones/signalr/hubs"
"github.com/decred/dcrdata/dcrrates"
"github.com/decred/slog"
)
func enableTestLog() {
if log == slog.Disabled {
UseLogger(slog.NewBackend(os.Stdout).Logger("EXE"))
log.SetLevel(slog.LevelTrace)
}
}
func makeKillSwitch() chan os.Signal {
killSwitch := make(chan os.Signal, 1)
signal.Notify(killSwitch, os.Interrupt)
return killSwitch
}
func testExchanges(asSlave, quickTest bool, t *testing.T) {
enableTestLog()
// Skip this test during automated testing.
if os.Getenv("GORACE") != "" {
t.Skip("Skipping exchange test")
}
ctx, shutdown := context.WithCancel(context.Background())
killSwitch := makeKillSwitch()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
select {
case <-killSwitch:
shutdown()
case <-ctx.Done():
}
wg.Done()
}()
config := new(ExchangeBotConfig)
config.Disabled = make([]string, 0)
config.Indent = true
if asSlave {
config.MasterBot = ":7778"
config.MasterCertFile = filepath.Join(dcrrates.DefaultAppDirectory, dcrrates.DefaultCertName)
} else {
config.DataExpiry = "2m"
config.RequestExpiry = "4m"
}
bot, err := NewExchangeBot(config)
if err != nil {
shutdown()
t.Fatalf("Error creating bot. Shutting down: %v", err)
}
updateCounts := make(map[string]int)
for token := range bot.Exchanges {
updateCounts[token] = 0
}
logUpdate := func(token string) {
if !quickTest {
return
}
updateCounts[token]++
lowest := updateCounts[token]
for _, v := range updateCounts {
if v < lowest {
lowest = v
}
}
if lowest > 0 {
log.Infof("Quick test conditions met. Shutting down early")
shutdown()
}
}
wg.Add(1)
go bot.Start(ctx, wg)
quitTimer := time.NewTimer(time.Minute * 7)
ch := bot.UpdateChannels()
out:
for {
select {
case update := <-ch.Exchange:
logUpdate(update.Token)
log.Infof("Update received from exchange %s", update.Token)
case update := <-ch.Index:
logUpdate(update.Token)
log.Infof("Update received from index %s", update.Token)
case <-ch.Quit:
t.Errorf("Exchange bot has quit.")
break out
case <-quitTimer.C:
break out
case <-ctx.Done():
break out
}
}
if bot.IsFailed() {
log.Infof("ExchangeBot is in failed state")
}
logMissing := func(token string) {
for xc := range updateCounts {
if xc == token {
return
}
}
t.Errorf("No update received for %s", token)
}
for _, token := range Tokens() {
logMissing(token)
}
depth, err := bot.QuickDepth(aggregatedOrderbookKey)
if err != nil {
t.Errorf("Failed to create aggregated orderbook")
}
log.Infof("aggregated orderbook size: %d kiB", len(depth)/1024)
log.Infof("%d Bitcoin indices available", len(bot.AvailableIndices()))
log.Infof("final state is %d kiB", len(bot.StateBytes())/1024)
shutdown()
wg.Wait()
}
func TestExchanges(t *testing.T) {
testExchanges(false, false, t)
}
func TestSlaveBot(t *testing.T) {
// Points to DCRData on local machine port 7778.
// Start server with --exchange-refresh=1m --exchange-expiry=2m
testExchanges(true, false, t)
}
func TestQuickExchanges(t *testing.T) {
testExchanges(false, true, t)
}
var initialPoloniexOrderbook = []byte(`[
14,
8767,
[
[
"i",
{
"currencyPair": "BTC_BTS",
"orderBook": [
{
"0.00011358": "127734.81648491",
"0.00011359": "667.14834444",
"0.00011360": "3651.66059723",
"0.00011361": "200.14590282",
"0.00011362": "4816.12553510",
"0.00011363": "37.08390161",
"0.00011365": "3419.78939376",
"0.00011366": "8.05270863",
"0.00011367": "73239.96650974",
"0.00011368": "7958.06486028",
"0.00011369": "142.68135365",
"0.00011370": "24411.40000000",
"0.00011372": "244147.92356157"
},
{
"0.00001358": "27734.81648491",
"0.00001359": "67.14834444",
"0.00001360": "651.66059723",
"0.00001361": "20.14590282",
"0.00001362": "816.12553510",
"0.00001363": "7.08390161",
"0.00001365": "419.78939376",
"0.00001366": ".05270863",
"0.00001367": "3239.96650974",
"0.00001368": "958.06486028",
"0.00001369": "42.68135365",
"0.00001370": "4411.40000000",
"0.00001371": "44147.92356157"
}
]
}
]
]
]`)
var poloniexEmptyUpdate = []byte(`[
1010
]`)
var poloniexOrderbookUpdate = []byte(`[
14,
8768,
[
[
"o",
0,
"0.00011358",
"0.00000000"
],
[
"o",
1,
"0.00001372",
"1.00000000"
]
]
]`)
var poloniexTrade = []byte(`[
14,
8769,
[
[
"t",
"10115654",
1,
"0.00011359",
"667.14834444",
1554856977
]
]
]`)
// Satisfies the websocketFeed interface
type fakePoloniexWebsocket struct{}
var poloniexDoneChannel = make(chan struct{})
var poloniexReadCount int
// Done() chan struct{}
// Read() ([]byte, error)
// Write(interface{}) error
// Close()
func (p *fakePoloniexWebsocket) Done() chan struct{} {
return poloniexDoneChannel
}
func (p *fakePoloniexWebsocket) Read() ([]byte, error) {
poloniexReadCount++
switch poloniexReadCount {
case 1:
return initialPoloniexOrderbook, nil
case 2:
time.Sleep(100 * time.Millisecond)
return poloniexEmptyUpdate, nil
case 3:
time.Sleep(100 * time.Millisecond)
return poloniexOrderbookUpdate, nil
}
<-poloniexDoneChannel
return nil, fmt.Errorf("closed (expected)")
}
func (p *fakePoloniexWebsocket) Write(interface{}) error {
return nil
}
func (p *fakePoloniexWebsocket) Close() {
close(poloniexDoneChannel)
}
func newTestPoloniexExchange() *PoloniexExchange {
return &PoloniexExchange{
CommonExchange: &CommonExchange{
token: Poloniex,
currentState: &ExchangeState{
Price: 1,
},
channels: &BotChannels{
exchange: make(chan *ExchangeUpdate, 2),
},
asks: make(wsOrders),
buys: make(wsOrders),
},
}
}
func TestPoloniexWebsocket(t *testing.T) {
enableTestLog()
poloniex := newTestPoloniexExchange()
poloniex.ws = &fakePoloniexWebsocket{}
checkLengths := func(askLen, buyLen int) {
if len(poloniex.asks) != askLen || len(poloniex.buys) != buyLen {
t.Errorf("unexpected order book lengths (%d, %d). expected (%d, %d)",
len(poloniex.asks), len(poloniex.buys), askLen, buyLen)
}
}
poloniex.processWsMessage(initialPoloniexOrderbook)
checkLengths(13, 13)
poloniex.processWsMessage(poloniexEmptyUpdate)
checkLengths(13, 13)
// The update includes a deletion in the asks and a new bin in the buys.
poloniex.processWsMessage(poloniexOrderbookUpdate)
checkLengths(12, 14)
depth := poloniex.wsDepths()
poloniex.processWsMessage(poloniexTrade)
if len(depth.Asks) != 12 || len(depth.Bids) != 14 {
t.Errorf("unexpected depth data lengths (%d, %d). expected (12, 14)", len(depth.Asks), len(depth.Bids))
}
poloniex.wsProcessor = poloniex.processWsMessage
poloniex.buys = make(wsOrders)
poloniex.asks = make(wsOrders)
poloniex.currentState = &ExchangeState{Price: 1}
poloniex.startWebsocket()
time.Sleep(300 * time.Millisecond)
poloniex.ws.Close()
time.Sleep(100 * time.Millisecond)
depth = poloniex.wsDepths()
if len(depth.Asks) != 12 || len(depth.Bids) != 14 {
t.Errorf("unexpected depth data lengths (%d, %d). expected (12, 14)", len(depth.Asks), len(depth.Bids))
}
if poloniex.wsListening() {
t.Errorf("poloniex websocket unexpectedly listening")
}
if !poloniex.wsFailed() {
t.Errorf("poloniex should be in failed state, but isn't")
}
if poloniex.wsErrorCount() != 1 {
t.Errorf("unexpected poloniex websocket error count: %d", poloniex.wsErrorCount())
}
}
func TestPoloniexLiveWebsocket(t *testing.T) {
enableTestLog()
// Skip this test during automated testing.
if os.Getenv("GORACE") != "" {
t.Skip("Skipping Poloniex websocket test")
}
killSwitch := makeKillSwitch()
poloniex := newTestPoloniexExchange()
var msgs int
processor := func(b []byte) {
msgs++
var s string
if len(b) >= 128 {
s = string(b[:128]) + "..."
} else {
s = string(b)
}
if s == "[1010]" {
log.Infof("heartbeat")
} else {
log.Infof("message received: %s", s)
}
}
testConnectWs := func() {
poloniexDoneChannel = make(chan struct{})
poloniex.connectWebsocket(processor, &socketConfig{
address: PoloniexURLs.Websocket,
})
poloniex.wsSend(poloniexOrderbookSubscription)
}
testConnectWs()
select {
case <-time.NewTimer(30 * time.Second).C:
case <-killSwitch:
t.Errorf("ctrl+c detected")
return
}
// Test reconnection
poloniex.ws.Close()
testConnectWs()
select {
case <-time.NewTimer(30 * time.Second).C:
case <-killSwitch:
t.Errorf("ctrl+c detected")
return
}
log.Infof("%d messages received", msgs)
}
var (
bittrexSignalrTemplate = signalr.Message{}
bittrexMsgTemplate = hubs.ClientMsg{}
bittrexTestUpdateChan = make(chan *BittrexOrderbookUpdate)
)
type testBittrexConnection struct {
xc *BittrexExchange
}
func (conn testBittrexConnection) Close() {}
func (conn testBittrexConnection) IsOpen() bool {
// Doesn't matter right now.
return false
}
func (conn testBittrexConnection) Send(subscription hubs.ClientMsg) error {
if subscription.M == "SubscribeToExchangeDeltas" {
go func() {
for update := range bittrexTestUpdateChan {
if update == nil {
return
}
conn.xc.msgHandler(signalr.Message{
M: []hubs.ClientMsg{
hubs.ClientMsg{
M: updateMsgKey,
A: []interface{}{update},
},
},
})
}
}()
}
if subscription.M == "QueryExchangeState" {
go func() {
book := BittrexOrderbookUpdate{
Nonce: 2,
MarketName: "BTC-DCR",
Buys: []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 5.,
Rate: 5.,
Type: BittrexOrderAdd,
},
&BittrexWsOrder{
Quantity: 5.,
Rate: 6.,
Type: BittrexOrderAdd,
},
},
Sells: []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 5.,
Rate: 105.,
Type: BittrexOrderAdd,
},
&BittrexWsOrder{
Quantity: 5.,
Rate: 106.,
Type: BittrexOrderAdd,
},
},
Fills: []*BittrexWsFill{},
}
msgBytes, _ := json.Marshal(book)
conn.xc.msgHandler(signalr.Message{
I: "1",
R: json.RawMessage(msgBytes),
})
}()
}
return nil
}
func newTestBittrexExchange() *BittrexExchange {
bittrex := &BittrexExchange{
CommonExchange: &CommonExchange{
token: Bittrex,
currentState: &ExchangeState{
Price: 1,
},
channels: &BotChannels{
exchange: make(chan *ExchangeUpdate, 2),
},
asks: make(wsOrders),
buys: make(wsOrders),
},
queue: make([]*BittrexOrderbookUpdate, 0),
}
bittrex.sr = testBittrexConnection{xc: bittrex}
return bittrex
}
func TestBittrexWebsocket(t *testing.T) {
defer close(bittrexTestUpdateChan)
bittrex := newTestBittrexExchange()
template := func() BittrexOrderbookUpdate {
return BittrexOrderbookUpdate{
Buys: []*BittrexWsOrder{},
Sells: []*BittrexWsOrder{},
Fills: []*BittrexWsFill{},
}
}
bittrex.sr.Send(bittrexWsOrderUpdateRequest)
checkUpdate := func(test string, update *BittrexOrderbookUpdate, askLen, buyLen int) {
bittrexTestUpdateChan <- update
// That should trigger the order book to be requested
<-time.NewTimer(time.Millisecond * 100).C
// Check that the initial orderbook was processed.
bittrex.orderMtx.RLock()
defer bittrex.orderMtx.RUnlock()
if len(bittrex.asks) != askLen {
t.Fatalf("bittrex asks slice has unexpected length %d for test ''%s'", len(bittrex.asks), test)
}
if len(bittrex.buys) != buyLen {
t.Fatalf("bittrex buys slice has unexpected length %d for test ''%s'", len(bittrex.buys), test)
}
}
// Set up a buy order that should be ignored because the nonce is lower than
// the initial nonce is too low. This update should be queued and eventually
// discarded.
update := template()
update.Buys = []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 5.,
Rate: 4.,
Type: BittrexOrderAdd,
},
}
update.Nonce = 2
checkUpdate("add early nonce", &update, 2, 2)
// Remove a buy order
update = template()
update.Nonce = 3
update.Buys = []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 0.,
Rate: 5.,
Type: BittrexOrderRemove,
},
}
checkUpdate("remove buy", &update, 2, 1)
// Add a sell order
update = template()
update.Nonce = 4
update.Sells = []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 0.,
Rate: 107.,
Type: BittrexOrderAdd,
},
}
checkUpdate("add sell", &update, 3, 1)
// Update a sell order
update = template()
update.Nonce = 5
update.Sells = []*BittrexWsOrder{
&BittrexWsOrder{
Quantity: 0.,
Rate: 107.,
Type: BittrexOrderUpdate,
},
}
checkUpdate("update sell", &update, 3, 1)
if bittrex.wsFailed() {
t.Fatalf("bittrex websocket unexpectedly failed")
}
// Add too many out of order updates. Should trigger a failed state.
for i := 0; i < maxBittrexQueueSize+1; i++ {
update := template()
update.Nonce = 1000
bittrexTestUpdateChan <- &update
}
<-time.NewTimer(time.Millisecond * 100).C
if !bittrex.wsFailed() {
t.Fatalf("bittrex not in failed state as expected")
}
}
func TestBittrexLiveWebsocket(t *testing.T) {
enableTestLog()
// Skip this test during automated testing.
if os.Getenv("GORACE") != "" {
t.Skip("Skipping Bittrex websocket test")
}
killSwitch := makeKillSwitch()
bittrex := newTestBittrexExchange()
bittrex.connectWs()
defer bittrex.sr.Close()
testDuration := 450
log.Infof("listening for %d seconds", testDuration)
select {
case <-time.NewTimer(time.Second * time.Duration(testDuration)).C:
case <-killSwitch:
t.Errorf("ctrl+c detected")
return
}
if bittrex.wsFailed() {
bittrex.sr.Close()
t.Fatalf("bittrex connection in failed state")
}
depths := bittrex.wsDepths()
log.Infof("%d asks", len(depths.Asks))
log.Infof("%d bids", len(depths.Bids))
}
| [
"\"GORACE\"",
"\"GORACE\"",
"\"GORACE\""
]
| []
| [
"GORACE"
]
| [] | ["GORACE"] | go | 1 | 0 | |
utils/utils.go | package utils
import (
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"index/suffixarray"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
var (
IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary
INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch)
)
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
Fatal(args ...interface{})
}
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string) (*http.Response, error) {
var resp *http.Response
var err error
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
func logf(level string, format string, a ...interface{}) {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
logf("debug", format, a...)
}
}
func Errorf(format string, a ...interface{}) {
logf("error", format, a...)
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%f years", d.Hours()/24/365)
}
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
func HumanSize(size int64) string {
i := 0
var sizef float64
sizef = float64(size)
units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
for sizef >= 1000.0 {
sizef = sizef / 1000.0
i++
}
return fmt.Sprintf("%.4g %s", sizef, units[i])
}
// Parses a human-readable string representing an amount of RAM
// in bytes, kibibytes, mebibytes or gibibytes, and returns the
// number of bytes, or -1 if the string is unparseable.
// Units are case-insensitive, and the 'b' suffix is optional.
func RAMInBytes(size string) (bytes int64, err error) {
re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
if error != nil {
return -1, error
}
matches := re.FindStringSubmatch(size)
if len(matches) != 3 {
return -1, fmt.Errorf("Invalid size: '%s'", size)
}
memLimit, error := strconv.ParseInt(matches[1], 10, 0)
if error != nil {
return -1, error
}
unit := strings.ToLower(matches[2])
if unit == "k" {
memLimit *= 1024
} else if unit == "m" {
memLimit *= 1024 * 1024
} else if unit == "g" {
memLimit *= 1024 * 1024 * 1024
}
return memLimit, nil
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary (if it's still around).
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
if os.IsNotExist(err) {
return ""
}
if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) {
return ""
}
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
if os.IsNotExist(err) {
return ""
}
panic(err)
}
return path
}
func dockerInitSha1(target string) string {
f, err := os.Open(target)
if err != nil {
return ""
}
defer f.Close()
h := sha1.New()
_, err = io.Copy(h, f)
if err != nil {
return ""
}
return hex.EncodeToString(h.Sum(nil))
}
func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
if target == "" {
return false
}
if IAMSTATIC {
if selfPath == "" {
return false
}
if target == selfPath {
return true
}
targetFileInfo, err := os.Lstat(target)
if err != nil {
return false
}
selfPathFileInfo, err := os.Lstat(selfPath)
if err != nil {
return false
}
return os.SameFile(targetFileInfo, selfPathFileInfo)
}
return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1
}
// Figure out the path of our dockerinit (which may be SelfPath())
func DockerInitPath(localCopy string) string {
selfPath := SelfPath()
if isValidDockerInitPath(selfPath, selfPath) {
// if we're valid, don't bother checking anything else
return selfPath
}
var possibleInits = []string{
localCopy,
INITPATH,
filepath.Join(filepath.Dir(selfPath), "dockerinit"),
// FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec."
// http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec
"/usr/libexec/docker/dockerinit",
"/usr/local/libexec/docker/dockerinit",
// FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts."
// http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA
"/usr/lib/docker/dockerinit",
"/usr/local/lib/docker/dockerinit",
}
for _, dockerInit := range possibleInits {
if dockerInit == "" {
continue
}
path, err := exec.LookPath(dockerInit)
if err == nil {
path, err = filepath.Abs(path)
if err != nil {
// LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail?
panic(err)
}
if isValidDockerInitPath(path, selfPath) {
return path
}
}
}
return ""
}
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
}
func NewBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.Lock()
defer r.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type WriteBroadcaster struct {
sync.Mutex
buf *bytes.Buffer
writers map[StreamWriter]bool
}
type StreamWriter struct {
wc io.WriteCloser
stream string
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
w.Lock()
sw := StreamWriter{wc: writer, stream: stream}
w.writers[sw] = true
w.Unlock()
}
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
w.buf.Write(p)
for sw := range w.writers {
lp := p
if sw.stream != "" {
lp = nil
for {
line, err := w.buf.ReadString('\n')
if err != nil {
w.buf.Write([]byte(line))
break
}
b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()})
if err != nil {
// On error, evict the writer
delete(w.writers, sw)
continue
}
lp = append(lp, b...)
lp = append(lp, '\n')
}
}
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
// On error, evict the writer
delete(w.writers, sw)
}
}
return len(p), nil
}
func (w *WriteBroadcaster) CloseWriters() error {
w.Lock()
defer w.Unlock()
for sw := range w.writers {
sw.wc.Close()
}
w.writers = make(map[StreamWriter]bool)
return nil
}
func NewWriteBroadcaster() *WriteBroadcaster {
return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
}
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
index *suffixarray.Index
ids map[string]bool
bytes []byte
}
func NewTruncIndex() *TruncIndex {
return &TruncIndex{
index: suffixarray.New([]byte{' '}),
ids: make(map[string]bool),
bytes: []byte{' '},
}
}
func (idx *TruncIndex) Add(id string) error {
if strings.Contains(id, " ") {
return fmt.Errorf("Illegal character: ' '")
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("Id already exists: %s", id)
}
idx.ids[id] = true
idx.bytes = append(idx.bytes, []byte(id+" ")...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) Delete(id string) error {
if _, exists := idx.ids[id]; !exists {
return fmt.Errorf("No such id: %s", id)
}
before, after, err := idx.lookup(id)
if err != nil {
return err
}
delete(idx.ids, id)
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) lookup(s string) (int, int, error) {
offsets := idx.index.Lookup([]byte(" "+s), -1)
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
return -1, -1, fmt.Errorf("No such id: %s", s)
}
offsetBefore := offsets[0] + 1
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
return offsetBefore, offsetAfter, nil
}
func (idx *TruncIndex) Get(s string) (string, error) {
before, after, err := idx.lookup(s)
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
if err != nil {
return "", err
}
return string(idx.bytes[before:after]), err
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, nil
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
func (k *KernelVersionInfo) String() string {
flavor := ""
if len(k.Flavor) > 0 {
flavor = fmt.Sprintf("-%s", k.Flavor)
}
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
func GetKernelVersion() (*KernelVersionInfo, error) {
var (
err error
)
uts, err := uname()
if err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
return ParseRelease(string(release))
}
func ParseRelease(release string) (*KernelVersionInfo, error) {
var (
flavor string
kernel, major, minor int
err error
)
tmp := strings.SplitN(release, "-", 2)
tmp2 := strings.Split(tmp[0], ".")
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
// Removes "+" because git kernels might set it
minorUnparsed := strings.Trim(tmp2[2], "+")
minor, err = strconv.Atoi(minorUnparsed)
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}
// FIXME: this is deprecated by CopyWithTar in archive.go
func CopyDirectory(source, dest string) error {
if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
return fmt.Errorf("Error copy: %s (%s)", err, output)
}
return nil
}
type NopFlusher struct{}
func (f *NopFlusher) Flush() {}
type WriteFlusher struct {
sync.Mutex
w io.Writer
flusher http.Flusher
}
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.Lock()
defer wf.Unlock()
n, err = wf.w.Write(b)
wf.flusher.Flush()
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.Lock()
defer wf.Unlock()
wf.flusher.Flush()
}
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}
func NewHTTPRequestError(msg string, res *http.Response) error {
return &JSONError{
Message: msg,
Code: res.StatusCode,
}
}
func IsURL(str string) bool {
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
}
func IsGIT(str string) bool {
return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
}
// GetResolvConf opens and read the content of /etc/resolv.conf.
// It returns it as byte slice.
func GetResolvConf() ([]byte, error) {
resolv, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
Errorf("Error openning resolv.conf: %s", err)
return nil, err
}
return resolv, nil
}
// CheckLocalDns looks into the /etc/resolv.conf,
// it returns true if there is a local nameserver or if there is no nameserver.
func CheckLocalDns(resolvConf []byte) bool {
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
if !bytes.Contains(parsedResolvConf, []byte("nameserver")) {
return true
}
for _, ip := range [][]byte{
[]byte("127.0.0.1"),
[]byte("127.0.1.1"),
} {
if bytes.Contains(parsedResolvConf, ip) {
return true
}
}
return false
}
// StripComments parses input into lines and strips away comments.
func StripComments(input []byte, commentMarker []byte) []byte {
lines := bytes.Split(input, []byte("\n"))
var output []byte
for _, currentLine := range lines {
var commentIndex = bytes.Index(currentLine, commentMarker)
if commentIndex == -1 {
output = append(output, currentLine...)
} else {
output = append(output, currentLine[:commentIndex]...)
}
output = append(output, []byte("\n")...)
}
return output
}
// GetNameserversAsCIDR returns nameservers (if any) listed in
// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
// This function's output is intended for net.ParseCIDR
func GetNameserversAsCIDR(resolvConf []byte) []string {
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
nameservers := []string{}
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
var ns = re.FindSubmatch(line)
if len(ns) > 0 {
nameservers = append(nameservers, string(ns[1])+"/32")
}
}
return nameservers
}
// FIXME: Change this not to receive default value as parameter
func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (string, error) {
var (
proto string
host string
port int
)
switch {
case strings.HasPrefix(addr, "unix://"):
proto = "unix"
addr = strings.TrimPrefix(addr, "unix://")
if addr == "" {
addr = defaultUnix
}
case strings.HasPrefix(addr, "tcp://"):
proto = "tcp"
addr = strings.TrimPrefix(addr, "tcp://")
default:
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
}
proto = "tcp"
}
if proto != "unix" && strings.Contains(addr, ":") {
hostParts := strings.Split(addr, ":")
if len(hostParts) != 2 {
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
if hostParts[0] != "" {
host = hostParts[0]
} else {
host = defaultHost
}
if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 {
port = p
} else {
port = defaultPort
}
} else {
host = addr
port = defaultPort
}
if proto == "unix" {
return fmt.Sprintf("%s://%s", proto, host), nil
}
return fmt.Sprintf("%s://%s:%d", proto, host, port), nil
}
func GetReleaseVersion() string {
resp, err := http.Get("http://get.docker.io/latest")
if err != nil {
return ""
}
defer resp.Body.Close()
if resp.ContentLength > 24 || resp.StatusCode != 200 {
return ""
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return ""
}
return strings.TrimSpace(string(body))
}
// Get a repos name and returns the right reposName + tag
// The tag can be confusing because of a port in a repository name.
// Ex: localhost.localdomain:5000/samalba/hipache:latest
func ParseRepositoryTag(repos string) (string, string) {
n := strings.LastIndex(repos, ":")
if n < 0 {
return repos, ""
}
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
return repos[:n], tag
}
return repos, ""
}
type User struct {
Uid string // user id
Gid string // primary group id
Username string
Name string
HomeDir string
}
// UserLookup check if the given username or uid is present in /etc/passwd
// and returns the user struct.
// If the username is not found, an error is returned.
func UserLookup(uid string) (*User, error) {
file, err := ioutil.ReadFile("/etc/passwd")
if err != nil {
return nil, err
}
for _, line := range strings.Split(string(file), "\n") {
data := strings.Split(line, ":")
if len(data) > 5 && (data[0] == uid || data[2] == uid) {
return &User{
Uid: data[2],
Gid: data[3],
Username: data[0],
Name: data[4],
HomeDir: data[5],
}, nil
}
}
return nil, fmt.Errorf("User not found in /etc/passwd")
}
type DependencyGraph struct {
nodes map[string]*DependencyNode
}
type DependencyNode struct {
id string
deps map[*DependencyNode]bool
}
func NewDependencyGraph() DependencyGraph {
return DependencyGraph{
nodes: map[string]*DependencyNode{},
}
}
func (graph *DependencyGraph) addNode(node *DependencyNode) string {
if graph.nodes[node.id] == nil {
graph.nodes[node.id] = node
}
return node.id
}
func (graph *DependencyGraph) NewNode(id string) string {
if graph.nodes[id] != nil {
return id
}
nd := &DependencyNode{
id: id,
deps: map[*DependencyNode]bool{},
}
graph.addNode(nd)
return id
}
func (graph *DependencyGraph) AddDependency(node, to string) error {
if graph.nodes[node] == nil {
return fmt.Errorf("Node %s does not belong to this graph", node)
}
if graph.nodes[to] == nil {
return fmt.Errorf("Node %s does not belong to this graph", to)
}
if node == to {
return fmt.Errorf("Dependency loops are forbidden!")
}
graph.nodes[node].addDependency(graph.nodes[to])
return nil
}
func (node *DependencyNode) addDependency(to *DependencyNode) bool {
node.deps[to] = true
return node.deps[to]
}
func (node *DependencyNode) Degree() int {
return len(node.deps)
}
// The magic happens here ::
func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
Debugf("Generating traversal map. Nodes: %d", len(graph.nodes))
result := [][]string{}
processed := map[*DependencyNode]bool{}
// As long as we haven't processed all nodes...
for len(processed) < len(graph.nodes) {
// Use a temporary buffer for processed nodes, otherwise
// nodes that depend on each other could end up in the same round.
tmpProcessed := []*DependencyNode{}
for _, node := range graph.nodes {
// If the node has more dependencies than what we have cleared,
// it won't be valid for this round.
if node.Degree() > len(processed) {
continue
}
// If it's already processed, get to the next one
if processed[node] {
continue
}
// It's not been processed yet and has 0 deps. Add it!
// (this is a shortcut for what we're doing below)
if node.Degree() == 0 {
tmpProcessed = append(tmpProcessed, node)
continue
}
// If at least one dep hasn't been processed yet, we can't
// add it.
ok := true
for dep := range node.deps {
if !processed[dep] {
ok = false
break
}
}
// All deps have already been processed. Add it!
if ok {
tmpProcessed = append(tmpProcessed, node)
}
}
Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed))
// If no progress has been made this round,
// that means we have circular dependencies.
if len(tmpProcessed) == 0 {
return nil, fmt.Errorf("Could not find a solution to this dependency graph")
}
round := []string{}
for _, nd := range tmpProcessed {
round = append(round, nd.id)
processed[nd] = true
}
result = append(result, round)
}
return result, nil
}
// An StatusError reports an unsuccessful exit by a command.
type StatusError struct {
Status string
StatusCode int
}
func (e *StatusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}
func quote(word string, buf *bytes.Buffer) {
// Bail out early for "simple" strings
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
buf.WriteString(word)
return
}
buf.WriteString("'")
for i := 0; i < len(word); i++ {
b := word[i]
if b == '\'' {
// Replace literal ' with a close ', a \', and a open '
buf.WriteString("'\\''")
} else {
buf.WriteByte(b)
}
}
buf.WriteString("'")
}
// Take a list of strings and escape them so they will be handled right
// when passed as arguments to an program via a shell
func ShellQuoteArguments(args []string) string {
var buf bytes.Buffer
for i, arg := range args {
if i != 0 {
buf.WriteByte(' ')
}
quote(arg, &buf)
}
return buf.String()
}
func IsClosedError(err error) bool {
/* This comparison is ugly, but unfortunately, net.go doesn't export errClosing.
* See:
* http://golang.org/src/pkg/net/net.go
* https://code.google.com/p/go/issues/detail?id=4337
* https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ
*/
return strings.HasSuffix(err.Error(), "use of closed network connection")
}
func PartParser(template, data string) (map[string]string, error) {
// ip:public:private
var (
templateParts = strings.Split(template, ":")
parts = strings.Split(data, ":")
out = make(map[string]string, len(templateParts))
)
if len(parts) != len(templateParts) {
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
}
for i, t := range templateParts {
value := ""
if len(parts) > i {
value = parts[i]
}
out[t] = value
}
return out, nil
}
var globalTestID string
// TestDirectory creates a new temporary directory and returns its path.
// The contents of directory at path `templateDir` is copied into the
// new directory.
func TestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = RandomString()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if templateDir != "" {
if err = CopyDirectory(templateDir, dir); err != nil {
return
}
}
return
}
// GetCallerName introspects the call stack and returns the name of the
// function `depth` levels down in the stack.
func GetCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}
func CopyFile(src, dst string) (int64, error) {
if src == dst {
return 0, nil
}
sf, err := os.Open(src)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(dst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(dst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// Returns the relative path to the cgroup docker is running in.
func GetThisCgroup(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
return "", err
}
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, ":")
// any type used by docker should work
if parts[1] == cgroupType {
return parts[2], nil
}
}
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", cgroupType)
}
// Returns a list of pids for the given container.
func GetPidsForContainer(id string) ([]int, error) {
pids := []int{}
// memory is chosen randomly, any cgroup used by docker works
cgroupType := "memory"
cgroupRoot, err := FindCgroupMountpoint(cgroupType)
if err != nil {
return pids, err
}
cgroupThis, err := GetThisCgroup(cgroupType)
if err != nil {
return pids, err
}
filename := filepath.Join(cgroupRoot, cgroupThis, id, "tasks")
if _, err := os.Stat(filename); os.IsNotExist(err) {
// With more recent lxc versions use, cgroup will be in lxc/
filename = filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks")
}
output, err := ioutil.ReadFile(filename)
if err != nil {
return pids, err
}
for _, p := range strings.Split(string(output), "\n") {
if len(p) == 0 {
continue
}
pid, err := strconv.Atoi(p)
if err != nil {
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
}
pids = append(pids, pid)
}
return pids, nil
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
HackerRank/Algorithms/Implementation/3d-surface-area/Solution.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
class Result {
/*
* Complete the 'surfaceArea' function below.
*
* The function is expected to return an INTEGER.
* The function accepts 2D_INTEGER_ARRAY A as parameter.
*/
public static int surfaceArea(List<List<Integer>> list) {
int n = list.size(), m = list.get(0).size(), surface;
int[][] mat = new int[n + 2][m + 2];
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
mat[i + 1][j + 1] = list.get(i).get(j);
}
}
surface = 2 * n * m;
for (int i = 1; i < n + 2; i++) {
for (int j = 1; j < m + 2; j++) {
surface += Math.abs(mat[i][j] - mat[i - 1][j]);
surface += Math.abs(mat[i][j] - mat[i][j - 1]);
}
}
return surface;
}
}
// https://www.hackerrank.com/challenges/3d-surface-area/problem
public class Solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] firstMultipleInput = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
int H = Integer.parseInt(firstMultipleInput[0]);
int W = Integer.parseInt(firstMultipleInput[1]);
List<List<Integer>> A = new ArrayList<>();
for (int i = 0; i < H; i++) {
String[] ARowTempItems = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
List<Integer> ARowItems = new ArrayList<>();
for (int j = 0; j < W; j++) {
int AItem = Integer.parseInt(ARowTempItems[j]);
ARowItems.add(AItem);
}
A.add(ARowItems);
}
int result = Result.surfaceArea(A);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
cmd/vulndb/flags.go | // Copyright (c) Facebook, Inc. and its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/facebookincubator/nvdtools/vulndb"
"github.com/facebookincubator/nvdtools/vulndb/debug"
)
var (
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for DSN.
// And libfb/go/fbmysql for fbmysql DSN.
gFlagMySQL = os.Getenv("MYSQL")
// General purpose flags.
gFlagOwner = os.Getenv("USER")
gFlagCollector = ""
gFlagProvider = ""
gFlagMetadata = ""
gFlagFormat = "csv"
gFlagDeadline deadlineFlag
gFlagDeleteAll bool
gFlagCSVNoHeader = false
)
func init() {
fs := RootCmd.PersistentFlags()
fs.VarP(&debug.Level, "debug", "v", "set verbosity level")
}
func addRequiredFlags(cmd *cobra.Command, names ...string) {
addFlags(cmd, true, names...)
}
func addOptionalFlags(cmd *cobra.Command, names ...string) {
addFlags(cmd, false, names...)
}
func addFlags(cmd *cobra.Command, required bool, names ...string) {
for _, name := range names {
f, exists := supportedFlags[name]
if !exists {
panic("unsupported flag: " + name)
}
f(cmd.Flags())
if required {
// This requires calling fs.Set for flags that
// have a default value.
cmd.MarkFlagRequired(name)
}
}
}
var supportedFlags = map[string]func(*pflag.FlagSet){
"mysql": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagMySQL, "mysql", gFlagMySQL, "set mysql dsn (or use $MYSQL)")
if gFlagMySQL != "" {
fs.Set("mysql", gFlagMySQL)
}
},
"owner": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagOwner, "owner", gFlagOwner, "set owner of the records")
fs.Set("owner", gFlagOwner)
},
"collector": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagCollector, "collector", gFlagCollector, "set unique name of the data collector")
},
"provider": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagProvider, "provider", gFlagProvider, "set short name of the data provider")
},
"metadata": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagMetadata, "metadata", gFlagMetadata, "set metadata")
},
"format": func(fs *pflag.FlagSet) {
fs.StringVar(&gFlagFormat, "format", gFlagFormat, "set output format (csv or nvdcvejson)")
fs.Set("format", gFlagFormat)
},
"deadline": func(fs *pflag.FlagSet) {
fs.Var(&gFlagDeadline, "deadline", fmt.Sprintf("set deadline in absolute time or duration (e.g. %s or 24h, 30d)", vulndb.TimeLayout))
},
"delete_all": func(fs *pflag.FlagSet) {
fs.BoolVarP(&gFlagDeleteAll, "all", "a", gFlagDeleteAll, "delete all records from database")
},
"csv_noheader": func(fs *pflag.FlagSet) {
fs.BoolVarP(&gFlagCSVNoHeader, "csvnoheader", "n", gFlagCSVNoHeader, "omit csv header in output")
},
}
// deadlineFlag implements the pflag.Value interface.
type deadlineFlag struct {
Time time.Time
}
func (d *deadlineFlag) Type() string {
return "string"
}
func (d *deadlineFlag) String() string {
if d.Time.IsZero() {
return ""
}
return d.Time.String()
}
// Set sets v as the deadline's time. Takes same input as time.ParseDuration
// but supports using 'd' (e.g. 30d) for representing days as d*24h.
func (d *deadlineFlag) Set(v string) error {
t, err := vulndb.ParseTime(v)
if err == nil {
d.Time = t
return nil
}
dd, err := time.ParseDuration(v)
if err == nil {
d.Time = time.Now().Add(dd)
return nil
}
idx := strings.Index(v, "d")
if idx < 1 {
return fmt.Errorf("invalid deadline: %q", v)
}
n, err := strconv.Atoi(v[0:idx])
if err != nil {
return fmt.Errorf("invalid deadline: %q", v)
}
dd, _ = time.ParseDuration(strconv.Itoa(n*24) + "h")
d.Time = time.Now().Add(dd)
return nil
}
| [
"\"MYSQL\"",
"\"USER\""
]
| []
| [
"MYSQL",
"USER"
]
| [] | ["MYSQL", "USER"] | go | 2 | 0 | |
src/rdpg-backup-tests/service/service_test.go | package service_test
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/pborman/uuid"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry-incubator/cf-test-helpers/runner"
"github.com/cloudfoundry-incubator/cf-test-helpers/services"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var config = loadConfig()
var httpClient = initHttpClient()
// services.Config Structure required for use of the CF-CLI Go API.
// ServiceName Name of the service to be tested.
// PlanNames Names of the plans to be tested.
// APIPort The port that the RDPG admin API listens on.
// APIUsername The username used for admin API HTTP authentication.
// APIPassword The password used for admin API HTTP authentication.
type backupTestConfig struct {
services.Config
ServiceName string `json:"service_name"`
PlanNames []string `json:"plan_names"`
RestorePlanNames []string `json:"restore_plan_names"`
APIPort int `json:"rdpg_api_port"`
APIUsername string `json:"rdpg_api_username"`
APIPassword string `json:"rdpg_api_password"`
TestQueueBackup bool `json:"test_queue_backup"`
WorkerWaitPeriod int `json:"worker_wait_period"`
BackupWaitPeriod int `json:"backup_wait_period"`
S3Enabled bool `json:"s3_enabled"`
}
// Takes config file from environment variable CONFIG_PATH and parses it as
// JSON into the backupTestConfig structure, which is returned.
func loadConfig() (testConfig backupTestConfig) {
path := os.Getenv("CONFIG_PATH")
if path == "" {
panic("No Config Path was Set!")
}
configFile, err := os.Open(path)
if err != nil {
panic(err)
}
decoder := json.NewDecoder(configFile)
err = decoder.Decode(&testConfig)
if err != nil {
panic(err)
}
return testConfig
}
func initHttpClient() *http.Client {
trans := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
return &http.Client{Transport: trans}
}
// Returns the base location string of the launched app in the network.
func appUri(appName string) string {
return "http://" + appName + "." + config.AppsDomain
}
func rdpgUri(clusterLocation string) string {
return fmt.Sprintf("http://%s:%s@%s:%d", config.APIUsername, config.APIPassword,
clusterLocation, config.APIPort)
}
//Returns true if the string is in the given slice. False otherwise.
func sliceContainsString(slice []string, target string) bool {
for _, v := range slice {
if v == target {
return true
}
}
return false
}
//Beginning of the test function.
var _ = Describe("RDPG Service Broker", func() {
var timeout = time.Second * 60
var retryInterval = time.Second / 2
var appPath = "../assets/postgres-test-app"
var appName string
validChars := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6,", "7", "8", "9"}
randomNameStartingWithLetter := func() string {
ret := validChars[rand.Int31n(52)]
for i := 0; i < 9; i++ {
ret = ret + validChars[rand.Int31n(62)]
}
return ret
}
randomServiceName := func() string {
return uuid.NewRandom().String()
}
assertAppIsRunning := func(appName string) {
pingUri := appUri(appName) + "/ping"
fmt.Println("Checking that the app is responding at url: ", pingUri)
Eventually(runner.Curl(pingUri, "-k"), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
fmt.Println("\n")
}
getBackups := func(uri, dbname string) []map[string]string {
request, err := http.NewRequest("GET", uri+"?dbname="+dbname, nil)
Ω(err).ShouldNot(HaveOccurred())
resp, err := httpClient.Do(request)
Ω(err).ShouldNot(HaveOccurred())
Ω(resp.StatusCode).Should(Equal(http.StatusOK))
backupsMap := make(map[string]interface{})
backupsJSON, err := ioutil.ReadAll(resp.Body)
fmt.Printf("backupsJSON:\n%s", backupsJSON)
Ω(err).ShouldNot(HaveOccurred())
fmt.Println("\n--Unmarshaling JSON")
err = json.Unmarshal(backupsJSON, &backupsMap)
Ω(err).ShouldNot(HaveOccurred())
//If there isn't a backup for this database.....
if backupsMap == nil || len(backupsMap) == 0 || backupsMap[dbname] == nil {
//Then, hand back an empty array
return []map[string]string{}
}
//Otherwise, hand back this database's array of backups.
// Go is annoying and makes me make a new map to return, basically.
retMaps := make([]map[string]string, 0)
for i, m := range backupsMap[dbname].([]interface{}) {
thisMap := m.(map[string]interface{})
retMaps = append(retMaps, map[string]string{})
for k, v := range thisMap {
retMaps[i][k] = v.(string)
}
}
return retMaps
}
getLocalBackups := func(location, dbname string) []map[string]string {
uri := rdpgUri(location) + "/backup/list/local"
return getBackups(uri, dbname)
}
getRemoteBackups := func(location, dbname string) []map[string]string {
uri := rdpgUri(location) + "/backup/list/remote"
return getBackups(uri, dbname)
}
backupInList := func(target string, list []map[string]string) bool {
for _, v := range list {
if v["Name"] == target {
return true
}
}
return false
}
assertNewBackup := func(oldList, newList []map[string]string) {
//Check that the newest backup is... newer... than the old-newest backup.
//Note that the name of the backup is a timestamp
//First condition: If there are no backups... there is no new backup.
cond := len(newList) != 0
Ω(cond).Should(BeTrue())
// If there were no old backups, the existance of a backup now means that one was made.
// Otherwise, check their names, which are timestamps, and assert that the most recent one
// in the newList is newer than that of the oldList.
cond = len(oldList) == 0 ||
newList[len(newList)-1]["Name"] > oldList[len(oldList)-1]["Name"]
Ω(cond).Should(BeTrue())
//...And that the new backup file isn't empty
numBytes, err := strconv.Atoi(newList[len(newList)-1]["Bytes"])
Ω(err).ShouldNot(HaveOccurred())
cond = numBytes > 0
Ω(cond).Should(BeTrue())
}
BeforeSuite(func() {
config.TimeoutScale = 3
services.NewContext(config.Config, "rdpg-postgres-smoke-test").Setup()
})
AssertBackupBehavior := func(planName string) {
serviceInstanceName := randomServiceName()
serviceCreated := false
serviceBound := false
appName = randomServiceName()
It("Can create a service and bind an app", func() {
Eventually(cf.Cf("push", appName, "-m", "256M", "-p", appPath, "-s", "cflinuxfs2", "--no-start"), config.ScaledTimeout(timeout)).Should(Exit(0))
Eventually(cf.Cf("create-service", config.ServiceName, planName, serviceInstanceName), config.ScaledTimeout(timeout)).Should(Exit(0))
serviceCreated = true
Eventually(cf.Cf("bind-service", appName, serviceInstanceName), config.ScaledTimeout(timeout)).Should(Exit(0))
serviceBound = true
Eventually(cf.Cf("start", appName), config.ScaledTimeout(5*time.Minute)).Should(Exit(0))
assertAppIsRunning(appName)
})
It(fmt.Sprintf("successfully creates backups on service cluster for plan %s", planName), func() {
//Successful endpoint calls respond 200 and their first line is "SUCCESS"
//Let's first confirm that the application was able to get the uri of the database.
uri := appUri(appName) + "/uri"
fmt.Println("\n--Checking if the application received a database uri")
Eventually(runner.Curl(uri, "-k", "-X", "GET"), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
//If we can get a timestamp, we are connected to the database
uri = appUri(appName) + "/timestamp"
fmt.Println("\n--Checking that the a connection to the database can be made")
Eventually(runner.Curl(uri, "-k", "-X", "GET"), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
//Inserting a table into the database.
//This is less of a test and more of setting up a later restore test.
//But it doesn't hurt as a test... basic functionality
firstTableName := randomNameStartingWithLetter()
fmt.Printf("--Creating new table: public.%s\n", firstTableName)
sql := fmt.Sprintf("CREATE TABLE public.%s (key varchar(255) PRIMARY KEY, value int);", firstTableName)
uri = appUri(appName) + "/exec"
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
fmt.Println("--Verifying that newly created table is present.")
sql = fmt.Sprintf("SELECT * FROM public.%s;", firstTableName)
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
uri = appUri(appName) + "/uri/location"
fmt.Println("\n--Getting the location of the targeted database")
locationBuffer := runner.Curl(uri, "-k", "-X", "GET").Buffer()
Eventually(locationBuffer, config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
location := strings.TrimPrefix(string(locationBuffer.Contents()), "SUCCESS\n")
uri = appUri(appName) + "/uri/dbname"
fmt.Println("\n--Getting the name of the targeted database")
nameBuffer := runner.Curl(uri, "-k", "-X", "GET").Buffer()
Eventually(nameBuffer, config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
dbname := strings.TrimPrefix(string(nameBuffer.Contents()), "SUCCESS\n")
fmt.Println("\n--Getting the list of preexisting backups")
oldBackups := getLocalBackups(location, dbname)
uri = rdpgUri(location) + "/backup/now"
fmt.Println("\n--Waiting before directly initiating a backup")
time.Sleep(time.Duration(config.BackupWaitPeriod) * time.Second)
fmt.Println("\n--Directly initiating a backup")
resp, err := httpClient.PostForm(uri, url.Values{"dbname": {dbname}})
Ω(err).ShouldNot(HaveOccurred())
Ω(resp.StatusCode).Should(Equal(http.StatusOK))
fmt.Println("\n--Checking the list of backups again")
newBackups := getLocalBackups(location, dbname)
assertNewBackup(oldBackups, newBackups)
if config.TestQueueBackup {
uri = rdpgUri(location) + "/backup/enqueue"
fmt.Println("\n--Enqueuing a backup with RDPG's task system")
resp, err = httpClient.PostForm(uri, url.Values{"dbname": {dbname}})
Ω(err).ShouldNot(HaveOccurred())
Ω(resp.StatusCode).Should(Equal(http.StatusOK))
fmt.Printf("\n--Waiting for %d seconds before checking to see if backup was completed.\n", config.WorkerWaitPeriod)
time.Sleep(time.Duration(config.WorkerWaitPeriod) * time.Second)
fmt.Println("\n--Checking if backup is present in local backups")
oldBackups = newBackups
newBackups := getLocalBackups(location, dbname)
assertNewBackup(oldBackups, newBackups)
} else {
fmt.Println("\n--SKIPPING QUEUE PORTION OF BACKUP TEST")
}
if sliceContainsString(config.RestorePlanNames, planName) {
fmt.Printf("--Testing in-place restores for plan %s\n", planName)
currentBackup := newBackups[len(newBackups)-1]["Name"]
//Make a change.
secondTableName := ""
for secondTableName == "" || secondTableName == firstTableName {
secondTableName = randomNameStartingWithLetter()
}
fmt.Printf("--Creating new table: public.%s\n", secondTableName)
sql = fmt.Sprintf("CREATE TABLE public.%s (key varchar(255) PRIMARY KEY, value int);", secondTableName)
uri = appUri(appName) + "/exec"
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
fmt.Println("--Verifying that newly created table is present.")
sql = fmt.Sprintf("SELECT * FROM public.%s;", secondTableName)
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
//Let's do a restore to revert that change.
uri = rdpgUri(location) + "/restore/inplace"
fmt.Println("\n--Initiating a restore.")
resp, err = httpClient.PostForm(uri, url.Values{"dbname": {dbname}, "filename": {currentBackup}})
Ω(err).ShouldNot(HaveOccurred())
Ω(resp.StatusCode).Should(Equal(http.StatusOK))
uri = appUri(appName) + "/exec"
//Is the database actually restored? Or just deleted?
fmt.Println("\n--Verifying that the table that was made before the backup can still be accessed")
sql = fmt.Sprintf("SELECT * FROM public.%s;", firstTableName)
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("SUCCESS"))
//Let's make sure the newer changes were reverted.
fmt.Println("\n--Verifying that previously created table is no longer present. THE CURL SHOULD RETURN FAILURE!")
sql = fmt.Sprintf("SELECT * FROM public.%s;", secondTableName)
Eventually(runner.Curl(uri, "-k", "-X", "POST", "-d", "sql="+sql), config.ScaledTimeout(timeout), retryInterval).Should(Say("FAILURE"))
} else {
fmt.Printf("SKIPPING RESTORE TESTS FOR PLAN %s. NOT ENABLED IN CONFIG\n", planName)
}
//Test S3 Storage
if config.S3Enabled {
currentBackup := newBackups[len(newBackups)-1]["Name"]
//Move the backup up to the remote storage.
uri := rdpgUri(location) + "/backup/remote/copyto"
fmt.Println("\n--Moving backups up to remote storage.")
request, err := http.NewRequest("PUT", uri, nil)
Ω(err).ShouldNot(HaveOccurred())
request.Form = url.Values{"dbname": {dbname}, "filename": {currentBackup}}
resp, err = httpClient.Do(request)
Ω(err).ShouldNot(HaveOccurred())
Ω(resp.StatusCode).Should(Equal(http.StatusOK))
//Now check to see that the backup is actually up on remote storage.
uri = rdpgUri(location) + "/backup/list/remote"
fmt.Println("\n--Checking to see that the backup is in the cloud.")
remoteBackups := getRemoteBackups(location, dbname)
Ω(backupInList(currentBackup, remoteBackups)).Should(BeTrue())
}
})
It("Can unbind and delete the service", func() {
if serviceBound {
Eventually(cf.Cf("unbind-service", appName, serviceInstanceName), config.ScaledTimeout(timeout)).Should(Exit(0))
}
if serviceCreated {
Eventually(cf.Cf("delete-service", "-f", serviceInstanceName), config.ScaledTimeout(timeout)).Should(Exit(0))
}
})
}
Context("for each plan", func() {
for _, planName := range config.PlanNames {
AssertBackupBehavior(planName)
}
})
})
| [
"\"CONFIG_PATH\""
]
| []
| [
"CONFIG_PATH"
]
| [] | ["CONFIG_PATH"] | go | 1 | 0 | |
QQP.py | #! -*- coding:utf-8 -*-
# https://github.com/nishiwen1214/GLUE-bert4keras
# 句子对分类任务,QQP数据集
# val_acc: 88.7071, test_acc: 87.0320
from bert4keras.backend import keras, set_gelu, K
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from keras.layers import Dropout, Dense
from sklearn import metrics
import numpy as np
import os
from tqdm import tqdm
import csv
# 选择使用第几张GPU卡,'0'为第一张
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
set_gelu('tanh') # 切换gelu版本
maxlen = 128
batch_size = 32
config_path = './uncased_L-12_H-768_A-12/bert_config.json'
checkpoint_path = './uncased_L-12_H-768_A-12/bert_model.ckpt'
dict_path = './uncased_L-12_H-768_A-12/vocab.txt'
def load_data(filename):
"""加载数据
单条格式:(文本1, 文本2, 标签id)
"""
D = []
i = 1
with open(filename, encoding='utf-8') as f:
for l in f:
if i == 1: # 跳过数据第一行
i = 2
else:
_,_,_, text1, text2, label = l.strip().split('\t')
D.append((text1, text2, int(label)))
return D
def load_data_test(filename):
"""加载数据
单条格式:(文本1, 文本2, 标签id)
"""
D = []
i = 1
with open(filename, encoding='utf-8') as f:
for l in f:
if i == 1: # 跳过数据第一行
i = 2
else:
_, text1, text2 = l.strip().split('\t')
D.append((text1, text2, 0))
return D
# 加载数据集
train_data = load_data(
'./datasets/QQP/train.tsv'
)
valid_data = load_data(
'./datasets/QQP/dev.tsv'
)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (text1, text2, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(
text1, text2, maxlen=maxlen
)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 加载预训练模型
bert = build_transformer_model(
config_path=config_path,
checkpoint_path=checkpoint_path,
with_pool=True,
return_keras_model=False,
)
output = Dropout(rate=0.1)(bert.model.output)
output = Dense(
units=2, activation='softmax', kernel_initializer=bert.initializer
)(output)
model = keras.models.Model(bert.model.input, output)
model.summary()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=Adam(2e-5), # 用足够小的学习率
metrics=['accuracy'],
)
# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
def evaluate(data):
total, right = 0., 0.
y_true_all = np.array([], dtype=int)
y_pred_all = np.array([], dtype=int)
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
y_true = y_true[:, 0]
y_pred_all = np.append(y_pred_all, y_pred)
y_true_all = np.append(y_true_all, y_true)
total += len(y_true)
right += (y_true == y_pred).sum()
f1 = metrics.f1_score(y_true_all,y_pred_all)
return right / total, f1
class Evaluator(keras.callbacks.Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, epoch, logs=None):
val_acc, f1 = evaluate(valid_generator)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('best_model_QQP.weights')
print(
u'val_acc: %.5f, best_val_acc: %.5f, F1: %.5f\n' %
(val_acc, self.best_val_acc, f1)
)
def test_predict(in_file, out_file):
"""输出测试结果到文件
结果文件可以提交到 https://gluebenchmark.com 评测。
"""
test_data = load_data_test(in_file)
test_generator = data_generator(test_data, batch_size)
results = []
for x_true, _ in tqdm(test_generator, ncols=0):
y_pred = model.predict(x_true).argmax(axis=1)
results.extend(y_pred)
with open(out_file,'w',encoding='utf-8') as f:
csv_writer = csv.writer(f, delimiter='\t')
csv_writer.writerow(["index","prediction"])
# 写入tsv文件内容
for i, pred in enumerate(results):
csv_writer.writerow([i,pred])
# 关闭文件
f.close()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=10,
callbacks=[evaluator]
)
model.load_weights('best_model_QQP.weights')
# 预测测试集,输出到结果文件
test_predict(
in_file = './datasets/QQP/test.tsv',
out_file = './results/QQP.tsv'
)
else:
model.load_weights('best_model_QQP.weights')
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tema1/gym-master/gym/envs/tests/spec_list.py | from gym import envs, logger
import os
def should_skip_env_spec_for_tests(spec):
# We skip tests for envs that require dependencies or are otherwise
# troublesome to run frequently
ep = spec._entry_point
# Skip mujoco tests for pull request CI
skip_mujoco = not (os.environ.get('MUJOCO_KEY_BUNDLE') or os.path.exists(os.path.expanduser('~/.mujoco/mjkey.txt')))
if skip_mujoco and (ep.startswith('gym.envs.mujoco:') or ep.startswith('gym.envs.robotics:')):
return True
if ( 'GoEnv' in ep or
'HexEnv' in ep or
(ep.startswith("gym.envs.atari") and not spec.id.startswith("Pong") and not spec.id.startswith("Seaquest"))
):
logger.warn("Skipping tests for env {}".format(ep))
return True
return False
spec_list = [spec for spec in sorted(envs.registry.all(), key=lambda x: x.id) if spec._entry_point is not None and not should_skip_env_spec_for_tests(spec)]
| []
| []
| [
"MUJOCO_KEY_BUNDLE"
]
| [] | ["MUJOCO_KEY_BUNDLE"] | python | 1 | 0 | |
config/wsgi.py | """
WSGI config for Diamat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Diamat.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 118823 if testnet else 18823
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
util/progress/progressui/printer.go | package progressui
import (
"container/ring"
"context"
"fmt"
"io"
"os"
"sort"
"strings"
"time"
digest "github.com/opencontainers/go-digest"
"github.com/tonistiigi/units"
)
const antiFlicker = 5 * time.Second
const maxDelay = 10 * time.Second
const minTimeDelta = 5 * time.Second
const minProgressDelta = 0.05 // %
const logsBufferSize = 10
type lastStatus struct {
Current int64
Timestamp time.Time
}
type textMux struct {
w io.Writer
current digest.Digest
last map[string]lastStatus
notFirst bool
nextIndex int
}
func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
if p.last == nil {
p.last = make(map[string]lastStatus)
}
v, ok := t.byDigest[dgst]
if !ok {
return
}
if v.index == 0 {
p.nextIndex++
v.index = p.nextIndex
}
if dgst != p.current {
if p.current != "" {
old := t.byDigest[p.current]
if old.logsPartial {
fmt.Fprintln(p.w, "")
}
old.logsOffset = 0
old.count = 0
fmt.Fprintf(p.w, "#%d ...\n", old.index)
}
if p.notFirst {
fmt.Fprintln(p.w, "")
} else {
p.notFirst = true
}
if os.Getenv("PROGRESS_NO_TRUNC") == "0" {
fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72))
} else {
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name)
}
}
if len(v.events) != 0 {
v.logsOffset = 0
}
for _, ev := range v.events {
fmt.Fprintf(p.w, "#%d %s\n", v.index, ev)
}
v.events = v.events[:0]
isOpenStatus := false // remote cache loading can currently produce status updates without active vertex
for _, s := range v.statuses {
if _, ok := v.statusUpdates[s.ID]; ok {
doPrint := true
if last, ok := p.last[s.ID]; ok && s.Completed == nil {
var progressDelta float64
if s.Total > 0 {
progressDelta = float64(s.Current-last.Current) / float64(s.Total)
}
timeDelta := s.Timestamp.Sub(last.Timestamp)
if progressDelta < minProgressDelta && timeDelta < minTimeDelta {
doPrint = false
}
}
if !doPrint {
continue
}
p.last[s.ID] = lastStatus{
Timestamp: s.Timestamp,
Current: s.Current,
}
var bytes string
if s.Total != 0 {
bytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total))
} else if s.Current != 0 {
bytes = fmt.Sprintf(" %.2f", units.Bytes(s.Current))
}
var tm string
endTime := s.Timestamp
if s.Completed != nil {
endTime = *s.Completed
}
if s.Started != nil {
diff := endTime.Sub(*s.Started).Seconds()
if diff > 0.01 {
tm = fmt.Sprintf(" %.1fs", diff)
}
}
if s.Completed != nil {
tm += " done"
} else {
isOpenStatus = true
}
fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm)
}
}
v.statusUpdates = map[string]struct{}{}
for _, w := range v.warnings[v.warningIdx:] {
fmt.Fprintf(p.w, "#%d WARN: %s\n", v.index, w.Short)
v.warningIdx++
}
for i, l := range v.logs {
if i == 0 {
l = l[v.logsOffset:]
}
fmt.Fprintf(p.w, "%s", []byte(l))
if i != len(v.logs)-1 || !v.logsPartial {
fmt.Fprintln(p.w, "")
}
if v.logsBuffer == nil {
v.logsBuffer = ring.New(logsBufferSize)
}
v.logsBuffer.Value = l
if !v.logsPartial {
v.logsBuffer = v.logsBuffer.Next()
}
}
if len(v.logs) > 0 {
if v.logsPartial {
v.logs = v.logs[len(v.logs)-1:]
v.logsOffset = len(v.logs[0])
} else {
v.logs = nil
v.logsOffset = 0
}
}
p.current = dgst
if v.Completed != nil && !isOpenStatus {
p.current = ""
v.count = 0
if v.Error != "" {
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
fmt.Fprintf(p.w, "#%d CANCELED\n", v.index)
} else {
fmt.Fprintf(p.w, "#%d ERROR: %s\n", v.index, v.Error)
}
} else if v.Cached {
fmt.Fprintf(p.w, "#%d CACHED\n", v.index)
} else {
tm := ""
if v.Started != nil {
tm = fmt.Sprintf(" %.1fs", v.Completed.Sub(*v.Started).Seconds())
}
fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm)
}
}
delete(t.updates, dgst)
}
func sortCompleted(t *trace, m map[digest.Digest]struct{}) []digest.Digest {
out := make([]digest.Digest, 0, len(m))
for k := range m {
out = append(out, k)
}
sort.Slice(out, func(i, j int) bool {
return t.byDigest[out[i]].Completed.Before(*t.byDigest[out[j]].Completed)
})
return out
}
func (p *textMux) print(t *trace) {
completed := map[digest.Digest]struct{}{}
rest := map[digest.Digest]struct{}{}
for dgst := range t.updates {
v, ok := t.byDigest[dgst]
if !ok {
continue
}
if v.Vertex.Completed != nil {
completed[dgst] = struct{}{}
} else {
rest[dgst] = struct{}{}
}
}
current := p.current
// items that have completed need to be printed first
if _, ok := completed[current]; ok {
p.printVtx(t, current)
}
for _, dgst := range sortCompleted(t, completed) {
if dgst != current {
p.printVtx(t, dgst)
}
}
if len(rest) == 0 {
if current != "" {
if v := t.byDigest[current]; v.Started != nil && v.Completed == nil {
return
}
}
// make any open vertex active
for dgst, v := range t.byDigest {
if v.Started != nil && v.Completed == nil {
p.printVtx(t, dgst)
return
}
}
return
}
// now print the active one
if _, ok := rest[current]; ok {
p.printVtx(t, current)
}
stats := map[digest.Digest]*vtxStat{}
now := time.Now()
sum := 0.0
var max digest.Digest
if current != "" {
rest[current] = struct{}{}
}
for dgst := range rest {
v, ok := t.byDigest[dgst]
if !ok {
continue
}
tm := now.Sub(*v.lastBlockTime)
speed := float64(v.count) / tm.Seconds()
overLimit := tm > maxDelay && dgst != current
stats[dgst] = &vtxStat{blockTime: tm, speed: speed, overLimit: overLimit}
sum += speed
if overLimit || max == "" || stats[max].speed < speed {
max = dgst
}
}
for dgst := range stats {
stats[dgst].share = stats[dgst].speed / sum
}
if _, ok := completed[current]; ok || current == "" {
p.printVtx(t, max)
return
}
// show items that were hidden
for dgst := range rest {
if stats[dgst].overLimit {
p.printVtx(t, dgst)
return
}
}
// fair split between vertexes
if 1.0/(1.0-stats[current].share)*antiFlicker.Seconds() < stats[current].blockTime.Seconds() {
p.printVtx(t, max)
return
}
}
type vtxStat struct {
blockTime time.Duration
speed float64
share float64
overLimit bool
}
func limitString(s string, l int) string {
if len(s) > l {
return s[:l] + "..."
}
return s
}
| [
"\"PROGRESS_NO_TRUNC\""
]
| []
| [
"PROGRESS_NO_TRUNC"
]
| [] | ["PROGRESS_NO_TRUNC"] | go | 1 | 0 | |
cmd/server/main.go | // Copyright 2018 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/moov-io/base/admin"
moovhttp "github.com/moov-io/base/http"
"github.com/moov-io/base/http/bind"
"github.com/moov-io/ofac"
"github.com/moov-io/ofac/internal/database"
"github.com/go-kit/kit/log"
"github.com/gorilla/mux"
)
var (
httpAddr = flag.String("http.addr", bind.HTTP("ofac"), "HTTP listen address")
adminAddr = flag.String("admin.addr", bind.Admin("ofac"), "Admin HTTP listen address")
flagLogFormat = flag.String("log.format", "", "Format for log lines (Options: json, plain")
ofacDataRefreshInterval = 12 * time.Hour
)
func main() {
flag.Parse()
var logger log.Logger
if v := os.Getenv("LOG_FORMAT"); v != "" {
*flagLogFormat = v
}
if strings.ToLower(*flagLogFormat) == "json" {
logger = log.NewJSONLogger(os.Stderr)
} else {
logger = log.NewLogfmtLogger(os.Stderr)
}
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
logger.Log("startup", fmt.Sprintf("Starting ofac server version %s", ofac.Version))
// Channel for errors
errs := make(chan error)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
errs <- fmt.Errorf("%s", <-c)
}()
// Setup database connection
db, err := database.New(logger, os.Getenv("DATABASE_TYPE"))
if err != nil {
logger.Log("main", err)
os.Exit(1)
}
defer func() {
if err := db.Close(); err != nil {
logger.Log("main", err)
}
}()
// Setup business HTTP routes
router := mux.NewRouter()
moovhttp.AddCORSHandler(router)
addPingRoute(router)
// Start business HTTP server
readTimeout, _ := time.ParseDuration("30s")
writTimeout, _ := time.ParseDuration("30s")
idleTimeout, _ := time.ParseDuration("60s")
// Check to see if our -http.addr flag has been overridden
if v := os.Getenv("HTTP_BIND_ADDRESS"); v != "" {
*httpAddr = v
}
serve := &http.Server{
Addr: *httpAddr,
Handler: router,
TLSConfig: &tls.Config{
InsecureSkipVerify: false,
PreferServerCipherSuites: true,
MinVersion: tls.VersionTLS12,
},
ReadTimeout: readTimeout,
WriteTimeout: writTimeout,
IdleTimeout: idleTimeout,
}
shutdownServer := func() {
if err := serve.Shutdown(context.TODO()); err != nil {
logger.Log("shutdown", err)
}
}
// Check to see if our -admin.addr flag has been overridden
if v := os.Getenv("HTTP_ADMIN_BIND_ADDRESS"); v != "" {
*adminAddr = v
}
// Start Admin server (with Prometheus metrics)
adminServer := admin.NewServer(*adminAddr)
go func() {
logger.Log("admin", fmt.Sprintf("listening on %s", adminServer.BindAddr()))
if err := adminServer.Listen(); err != nil {
err = fmt.Errorf("problem starting admin http: %v", err)
logger.Log("admin", err)
errs <- err
}
}()
defer adminServer.Shutdown()
// Setup download repository
downloadRepo := &sqliteDownloadRepository{db, logger}
defer downloadRepo.close()
searcher := &searcher{logger: logger}
// Add manual OFAC data refresh endpoint
adminServer.AddHandler(manualRefreshPath, manualRefreshHandler(logger, searcher, downloadRepo))
// Initial download of OFAC data
if stats, err := searcher.refreshData(os.Getenv("INITIAL_DATA_DIRECTORY")); err != nil {
logger.Log("main", fmt.Sprintf("ERROR: failed to download/parse initial OFAC data: %v", err))
os.Exit(1)
} else {
downloadRepo.recordStats(stats)
logger.Log("main", fmt.Sprintf("OFAC data refreshed - Addresses=%d AltNames=%d SDNs=%d DeniedPersons=%d", stats.Addresses, stats.Alts, stats.SDNs, stats.DeniedPersons))
}
// Setup Watch and Webhook database wrapper
watchRepo := &sqliteWatchRepository{db, logger}
defer watchRepo.close()
webhookRepo := &sqliteWebhookRepository{db}
defer webhookRepo.close()
// Setup company / customer repositories
companyRepo := &sqliteCompanyRepository{db, logger}
defer companyRepo.close()
custRepo := &sqliteCustomerRepository{db, logger}
defer custRepo.close()
// Setup periodic download and re-search
updates := make(chan *downloadStats)
ofacDataRefreshInterval = getOFACRefreshInterval(logger, os.Getenv("OFAC_DATA_REFRESH"))
go searcher.periodicDataRefresh(ofacDataRefreshInterval, downloadRepo, updates)
go searcher.spawnResearching(logger, companyRepo, custRepo, watchRepo, webhookRepo, updates)
// Add searcher for HTTP routes
addCompanyRoutes(logger, router, searcher, companyRepo, watchRepo)
addCustomerRoutes(logger, router, searcher, custRepo, watchRepo)
addSDNRoutes(logger, router, searcher)
addSearchRoutes(logger, router, searcher)
addDownloadRoutes(logger, router, downloadRepo)
addValuesRoutes(logger, router, searcher)
// Setup our web UI to be served as well
setupWebui(logger, router)
// Start business logic HTTP server
go func() {
if certFile, keyFile := os.Getenv("HTTPS_CERT_FILE"), os.Getenv("HTTPS_KEY_FILE"); certFile != "" && keyFile != "" {
logger.Log("startup", fmt.Sprintf("binding to %s for secure HTTP server", *httpAddr))
if err := serve.ListenAndServeTLS(certFile, keyFile); err != nil {
logger.Log("exit", err)
}
} else {
logger.Log("startup", fmt.Sprintf("binding to %s for HTTP server", *httpAddr))
if err := serve.ListenAndServe(); err != nil {
logger.Log("exit", err)
}
}
}()
// Block/Wait for an error
if err := <-errs; err != nil {
shutdownServer()
logger.Log("exit", err)
}
}
func addPingRoute(r *mux.Router) {
r.Methods("GET").Path("/ping").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
moovhttp.SetAccessControlAllowHeaders(w, r.Header.Get("Origin"))
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte("PONG"))
})
}
// getOFACRefreshInterval returns a time.Duration for how often OFAC should refresh data
//
// env is the value from an environmental variable
func getOFACRefreshInterval(logger log.Logger, env string) time.Duration {
if env != "" {
if strings.EqualFold(env, "off") {
return 0 * time.Second
}
if dur, _ := time.ParseDuration(env); dur > 0 {
logger.Log("main", fmt.Sprintf("Setting OFAC data refresh interval to %v", dur))
return dur
}
}
logger.Log("main", fmt.Sprintf("Setting OFAC data refresh interval to %v (default)", ofacDataRefreshInterval))
return ofacDataRefreshInterval
}
func setupWebui(logger log.Logger, r *mux.Router) {
dir := os.Getenv("WEB_ROOT")
if dir == "" {
dir = filepath.Join("webui", "build")
}
if _, err := os.Stat(dir); err != nil {
logger.Log("main", fmt.Sprintf("problem with webui=%s: %v", dir, err))
os.Exit(1)
}
r.PathPrefix("/").Handler(http.FileServer(http.Dir(dir)))
}
| [
"\"LOG_FORMAT\"",
"\"DATABASE_TYPE\"",
"\"HTTP_BIND_ADDRESS\"",
"\"HTTP_ADMIN_BIND_ADDRESS\"",
"\"INITIAL_DATA_DIRECTORY\"",
"\"OFAC_DATA_REFRESH\"",
"\"HTTPS_CERT_FILE\"",
"\"HTTPS_KEY_FILE\"",
"\"WEB_ROOT\""
]
| []
| [
"INITIAL_DATA_DIRECTORY",
"HTTP_BIND_ADDRESS",
"HTTP_ADMIN_BIND_ADDRESS",
"HTTPS_KEY_FILE",
"OFAC_DATA_REFRESH",
"WEB_ROOT",
"LOG_FORMAT",
"DATABASE_TYPE",
"HTTPS_CERT_FILE"
]
| [] | ["INITIAL_DATA_DIRECTORY", "HTTP_BIND_ADDRESS", "HTTP_ADMIN_BIND_ADDRESS", "HTTPS_KEY_FILE", "OFAC_DATA_REFRESH", "WEB_ROOT", "LOG_FORMAT", "DATABASE_TYPE", "HTTPS_CERT_FILE"] | go | 9 | 0 | |
src/build.go | package main
import (
"archive/tar"
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
func main() {
if err := start(); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(2)
}
}
var debugEnabled = false
func start() error {
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
return fmt.Errorf("Unsupported OS '%v'", runtime.GOOS)
}
if runtime.GOARCH != "amd64" {
return fmt.Errorf("Unsupported OS '%v' or arch '%v'", runtime.GOOS, runtime.GOARCH)
}
if len(os.Args) < 2 {
return fmt.Errorf("No command provided")
}
switch os.Args[1] {
case "rerun":
err := clean()
if err == nil {
err = run()
}
return err
case "run":
return run()
case "clean":
return clean()
case "rebuild":
err := clean()
if err == nil {
err = build()
}
return err
case "build":
return build()
case "package":
return pkg()
case "build-cef":
return buildCef()
case "lint":
return lint()
case "unit-test":
return unitTest()
case "benchmark":
return benchmark()
default:
return fmt.Errorf("Unrecognized command '%v'", os.Args[1])
}
}
func run(extraQmakeArgs ...string) error {
if err := build(extraQmakeArgs...); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie")), extraArgs()...)
}
func clean() error {
err := os.RemoveAll("debug")
if err == nil {
err = os.RemoveAll("release")
}
return err
}
func build(extraQmakeArgs ...string) error {
target, err := target()
if err != nil {
return err
}
// Get qmake path
qmakePath, err := exec.LookPath(exeExt("qmake"))
if err != nil {
return err
}
// Make the dir for the target
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
// Run qmake TODO: put behind flag
qmakeArgs := extraQmakeArgs
if target == "debug" {
qmakeArgs = append(qmakeArgs, "CONFIG+=debug")
} else {
qmakeArgs = append(qmakeArgs, "CONFIG+=release", "CONFIG-=debug")
}
qmakeArgs = append(qmakeArgs, "doogie.pro")
if err := execCmd(qmakePath, qmakeArgs...); err != nil {
return fmt.Errorf("QMake failed: %v", err)
}
// Run nmake if windows, make if linux
makeExe := "make"
makeArgs := []string{}
if runtime.GOOS == "windows" {
makeExe = "nmake.exe"
// Use jom instead if it's on the path
if _, err = exec.LookPath("jom.exe"); err == nil {
makeExe = "jom.exe"
}
// This version takes the target name unlike the Linux one
makeArgs = []string{target, "/NOLOGO"}
}
if err := execCmd(makeExe, makeArgs...); err != nil {
return fmt.Errorf("NMake failed: %v", err)
}
// Chmod on linux
if runtime.GOOS == "linux" {
if err = os.Chmod(filepath.Join(target, "doogie"), 0755); err != nil {
return err
}
}
// Copy over resources
if err := copyResources(qmakePath, target); err != nil {
return err
}
return nil
}
func pkg() error {
target, err := target()
if err != nil {
return err
}
// Just move over the files that matter to a new deploy dir and zip em up
deployDir := filepath.Join(target, "package", "doogie")
if err = os.MkdirAll(deployDir, 0755); err != nil {
return err
}
// Get all base-dir items to copy, excluding only some
filesToCopy := []string{}
dirFiles, err := ioutil.ReadDir(target)
if err != nil {
return err
}
for _, file := range dirFiles {
if !file.IsDir() {
switch filepath.Ext(file.Name()) {
case ".cpp", ".h", ".obj", ".res", ".manifest", ".log", ".o":
// No-op
default:
filesToCopy = append(filesToCopy, file.Name())
}
}
}
if err = copyEachToDirIfNotPresent(target, deployDir, filesToCopy...); err != nil {
return err
}
// And other dirs if present in folder
subDirs := []string{"imageformats", "locales", "platforms", "sqldrivers", "styles"}
for _, subDir := range subDirs {
srcDir := filepath.Join(target, subDir)
if _, err = os.Stat(srcDir); err == nil {
if err = copyDirIfNotPresent(srcDir, filepath.Join(deployDir, subDir)); err != nil {
return fmt.Errorf("Unable to copy %v: %v", subDir, err)
}
}
}
// Now create a zip or tar file with all the goods
if runtime.GOOS == "windows" {
err = createSingleDirZipFile(deployDir, filepath.Join(target, "package", "doogie.zip"))
} else {
err = createSingleDirTarGzFile(deployDir, filepath.Join(target, "package", "doogie.tar.gz"))
}
if err != nil {
return err
}
return os.RemoveAll(deployDir)
}
func buildCef() error {
if runtime.GOOS == "windows" {
return buildCefWindows()
}
return buildCefLinux()
}
func buildCefLinux() error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// We have to run separate make runs for different target types
makeLib := func(target string) error {
if err := execCmdInDir(cefDir, "cmake", "-DCMAKE_BUILD_TYPE="+target, "."); err != nil {
return fmt.Errorf("CMake failed: %v", err)
}
wrapperDir := filepath.Join(cefDir, "libcef_dll_wrapper")
if err := execCmdInDir(wrapperDir, "make"); err != nil {
return fmt.Errorf("Make failed: %v", err)
}
if err := os.Rename(filepath.Join(wrapperDir, "libcef_dll_wrapper.a"),
filepath.Join(wrapperDir, "libcef_dll_wrapper_"+target+".a")); err != nil {
return fmt.Errorf("Unable to rename .a file: %v", err)
}
// We also need to run strip on the Release libcef.so per:
// https://bitbucket.org/chromiumembedded/cef/issues/1979
if target == "Release" {
// Back it up first
err := copyIfNotPresent(filepath.Join(cefDir, "Release/libcef.so"),
filepath.Join(cefDir, "Release/libcef.fullsym.so"))
if err != nil {
return fmt.Errorf("Release libcef backup failed: %v", err)
}
if err = execCmdInDir(cefDir, "strip", "--strip-all", "Release/libcef.so"); err != nil {
return fmt.Errorf("Failed stripping symbols: %v", err)
}
}
return nil
}
if err := makeLib("Debug"); err != nil {
return err
}
return makeLib("Release")
}
func buildCefWindows() error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Build the make files
if err := execCmdInDir(cefDir, "cmake", "-G", "Visual Studio 14 Win64", "."); err != nil {
return fmt.Errorf("CMake failed: %v", err)
}
// Replace a couple of strings in VC proj file on windows
dllWrapperDir := filepath.Join(cefDir, "libcef_dll_wrapper")
vcProjFile := filepath.Join(dllWrapperDir, "libcef_dll_wrapper.vcxproj")
projXml, err := ioutil.ReadFile(vcProjFile)
if err != nil {
return fmt.Errorf("Unable to read VC proj file: %v", err)
}
// First one is debug, second is release
projXml = bytes.Replace(projXml, []byte("<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>"),
[]byte("<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>"), 1)
projXml = bytes.Replace(projXml, []byte("<RuntimeLibrary>MultiThreaded</RuntimeLibrary>"),
[]byte("<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>"), 1)
if err = ioutil.WriteFile(vcProjFile, projXml, os.ModePerm); err != nil {
return fmt.Errorf("Unable to write VC proj file: %v", err)
}
// Build debug and then build release
if err = execCmdInDir(dllWrapperDir, "msbuild", "libcef_dll_wrapper.vcxproj", "/p:Configuration=Debug"); err != nil {
return fmt.Errorf("Unable to build debug wrapper: %v", err)
}
if err = execCmdInDir(dllWrapperDir, "msbuild", "libcef_dll_wrapper.vcxproj", "/p:Configuration=Release"); err != nil {
return fmt.Errorf("Unable to build release wrapper: %v", err)
}
return nil
}
func lint() error {
toIgnore := []string{
"No copyright message found.",
"#ifndef header guard has wrong style, please use: SRC_",
"#endif line should be \"#endif // SRC_",
"Include the directory when naming .h files",
"Done processing",
"Total errors found",
}
// Run lint on all cc and h files, and trim off any of the toIgnore stuff
depotToolsDir := os.Getenv("DEPOT_TOOLS_DIR")
if depotToolsDir == "" {
return fmt.Errorf("Unable to find DEPOT_TOOLS_DIR env var")
}
args := []string{
filepath.Join(depotToolsDir, "cpplint.py"),
// Can't use, ref: https://github.com/google/styleguide/issues/22
// "--root=doogie\\",
}
integrationTestDir := filepath.Join("tests", "integration")
err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
if !info.IsDir() && !strings.HasPrefix(info.Name(), "moc_") &&
!strings.HasPrefix(path, integrationTestDir) &&
(strings.HasSuffix(path, ".cc") || strings.HasSuffix(path, ".h")) {
args = append(args, path)
}
return nil
})
if err != nil {
return err
}
pycmd := "python"
if runtime.GOOS == "linux" {
// python by itself may refer to python3 or python2 depending on the distro,
// so invoke python2 explicitly.
pycmd = "python2"
}
cmd := exec.Command(pycmd, args...)
out, err := cmd.CombinedOutput()
if err != nil && len(out) == 0 {
return fmt.Errorf("Unable to run cpplint: %v", err)
}
scanner := bufio.NewScanner(bytes.NewReader(out))
foundAny := false
for scanner.Scan() {
// If after the trimmed string after the second colon starts w/ any toIgnore, we ignore it
ignore := false
origLine := scanner.Text()
checkLine := origLine
if firstColon := strings.Index(origLine, ":"); firstColon != -1 {
if secondColon := strings.Index(origLine[firstColon+1:], ":"); secondColon != -1 {
checkLine = strings.TrimSpace(origLine[firstColon+secondColon+2:])
}
}
for _, toCheck := range toIgnore {
if strings.HasPrefix(checkLine, toCheck) {
ignore = true
break
}
}
if !ignore {
fmt.Println(origLine)
foundAny = true
}
}
if foundAny {
return fmt.Errorf("Lint check returned one or more errors")
}
return nil
}
func unitTest() error {
if err := build("CONFIG+=test"); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie-test")))
}
func benchmark() error {
if err := build("CONFIG+=benchmark"); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie-benchmark")))
}
func target() (string, error) {
target := "debug"
if len(os.Args) >= 3 && !strings.HasPrefix(os.Args[2], "--") {
if os.Args[2] != "release" && os.Args[2] != "debug" {
return "", fmt.Errorf("Unknown target '%v'", os.Args[2])
}
target = os.Args[2]
}
return target, nil
}
func extraArgs() []string {
argStartIndex := 1
if len(os.Args) >= 2 {
argStartIndex = 2
if len(os.Args) > 2 && (os.Args[2] == "release" || os.Args[2] == "debug") {
argStartIndex = 3
}
}
return os.Args[argStartIndex:]
}
func exeExt(baseName string) string {
if runtime.GOOS == "windows" {
return baseName + ".exe"
}
return baseName
}
func execCmd(name string, args ...string) error {
return execCmdInDir("", name, args...)
}
func execCmdInDir(dir string, name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Dir = dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func copyResources(qmakePath string, target string) error {
if runtime.GOOS == "windows" {
return copyResourcesWindows(qmakePath, target)
}
return copyResourcesLinux(qmakePath, target)
}
func copyResourcesLinux(qmakePath string, target string) error {
if _, err := exec.LookPath("chrpath"); err != nil {
return fmt.Errorf("Unable to find chrpath on the PATH: %v", err)
}
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Everything read only except by owner
// Copy over crash reporter cfg
err := copyAndChmodEachToDirIfNotPresent(0644, ".", target, "crash_reporter.cfg")
if err != nil {
return err
}
// Copy over some Qt DLLs
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(filepath.Dir(qmakePath), "../lib"), target,
"libQt5Core.so.5",
"libQt5Gui.so.5",
"libQt5Sql.so.5",
"libQt5Widgets.so.5",
// TODO: See https://bugreports.qt.io/browse/QTBUG-53865
"libicui18n.so.56",
"libicuuc.so.56",
"libicudata.so.56",
// Needed for libqxcb platform
"libQt5XcbQpa.so.5",
"libQt5DBus.so.5",
)
if err != nil {
return err
}
// Some DLLs are needed in debug only
if target == "debug" {
err := copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(filepath.Dir(qmakePath), "../lib"), target,
"libQt5Network.so.5",
"libQt5Test.so.5",
"libQt5WebSockets.so.5",
)
if err != nil {
return err
}
}
// Need some plugins
// Before that, record whether the xcb plugin is there yet
hadXcbPlugin := true
xcbPluginPath := filepath.Join(target, "platforms", "libqxcb.so")
if _, err = os.Stat(xcbPluginPath); os.IsNotExist(err) {
hadXcbPlugin = false
}
copyPlugins(qmakePath, target, "imageformats", "qgif")
copyPlugins(qmakePath, target, "platforms", "qxcb")
copyPlugins(qmakePath, target, "sqldrivers", "qsqlite")
// If the xcb plugin wasn't there (but is now), change the rpath
if !hadXcbPlugin {
if err = execCmd("chrpath", "-r", "$ORIGIN/..", xcbPluginPath); err != nil {
return fmt.Errorf("Unable to run chrpath: %v", err)
}
}
// Copy over CEF libs
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(cefDir, strings.Title(target)), target,
"libcef.so",
"natives_blob.bin",
"snapshot_blob.bin",
"v8_context_snapshot.bin",
)
if err != nil {
return err
}
// Copy over CEF resources
cefResDir := filepath.Join(cefDir, "Resources")
err = copyAndChmodEachToDirIfNotPresent(0644, cefResDir, target,
"icudtl.dat",
"cef.pak",
"cef_100_percent.pak",
"cef_200_percent.pak",
"cef_extensions.pak",
"devtools_resources.pak",
)
if err != nil {
return err
}
// And CEF locales
targetLocaleDir := filepath.Join(target, "locales")
if err = os.MkdirAll(targetLocaleDir, 0744); err != nil {
return err
}
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(cefResDir, "locales"), targetLocaleDir, "en-US.pak")
return err
}
func copyResourcesWindows(qmakePath string, target string) error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Copy over crash reporter cfg
err := copyEachToDirIfNotPresent(".", target, "crash_reporter.cfg")
if err != nil {
return err
}
// Copy over some Qt DLLs
qtDlls := []string{
"Qt5Core.dll",
"Qt5Gui.dll",
"Qt5Sql.dll",
"Qt5Widgets.dll",
}
// Debug libs are d.dll
if target == "debug" {
// Only need web sockets during debug
qtDlls = append(qtDlls, "Qt5WebSockets.dll", "Qt5Network.dll", "Qt5Test.dll")
for i := range qtDlls {
qtDlls[i] = strings.Replace(qtDlls[i], ".dll", "d.dll", -1)
}
// Also want the PDB files if they are there
for _, dll := range qtDlls {
qtDlls = append(qtDlls, strings.Replace(dll, ".dll", ".pdb", -1))
}
}
err = copyEachToDirIfNotPresent(filepath.Dir(qmakePath), target, qtDlls...)
if err != nil {
return err
}
// Need special ucrtbased.dll for debug builds
if target == "debug" {
err = copyEachToDirIfNotPresent("C:\\Program Files (x86)\\Windows Kits\\10\\bin\\x64\\ucrt",
target, "ucrtbased.dll")
if err != nil {
return err
}
}
// TODO: statically compile this, ref: https://github.com/cretz/doogie/issues/46
// Need some plugins
copyPlugins(qmakePath, target, "imageformats", "qgif")
copyPlugins(qmakePath, target, "platforms", "qwindows")
copyPlugins(qmakePath, target, "sqldrivers", "qsqlite")
copyPlugins(qmakePath, target, "styles", "qwindowsvistastyle")
// Copy over CEF libs
err = copyEachToDirIfNotPresent(filepath.Join(cefDir, strings.Title(target)), target,
"libcef.dll",
"chrome_elf.dll",
"natives_blob.bin",
"snapshot_blob.bin",
"v8_context_snapshot.bin",
"d3dcompiler_43.dll",
"d3dcompiler_47.dll",
"libEGL.dll",
"libGLESv2.dll",
)
if err != nil {
return err
}
// Copy over CEF resources
cefResDir := filepath.Join(cefDir, "Resources")
err = copyEachToDirIfNotPresent(cefResDir, target,
"icudtl.dat",
"cef.pak",
"cef_100_percent.pak",
"cef_200_percent.pak",
"cef_extensions.pak",
"devtools_resources.pak",
)
if err != nil {
return err
}
// And CEF locales
targetLocaleDir := filepath.Join(target, "locales")
if err = os.MkdirAll(targetLocaleDir, 0755); err != nil {
return err
}
err = copyEachToDirIfNotPresent(filepath.Join(cefResDir, "locales"), targetLocaleDir, "en-US.pak")
return err
}
func chmodEachInDir(mode os.FileMode, dir string, filenames ...string) error {
for _, filename := range filenames {
if err := os.Chmod(filepath.Join(dir, filename), mode); err != nil {
return err
}
}
return nil
}
func copyPlugins(qmakePath string, target string, dir string, plugins ...string) error {
srcDir := filepath.Join(qmakePath, "../../plugins", dir)
if _, err := os.Stat(srcDir); os.IsExist(err) {
return fmt.Errorf("Unable to find Qt plugins dir %v: %v", dir, err)
}
destDir := filepath.Join(target, dir)
if err := os.MkdirAll(destDir, 0755); err != nil {
return fmt.Errorf("Unable to create dir: %v", err)
}
for _, plugin := range plugins {
var fileName string
if runtime.GOOS == "linux" {
fileName = "lib" + plugin + ".so"
} else if target == "debug" {
fileName = plugin + "d.dll"
} else {
fileName = plugin + ".dll"
}
if err := copyAndChmodEachToDirIfNotPresent(0644, srcDir, destDir, fileName); err != nil {
return err
}
}
return nil
}
func copyDirIfNotPresent(srcDir string, destDir string) error {
// Note, this is not recursive, but it does preserve permissions
srcFi, err := os.Stat(srcDir)
if err != nil {
return fmt.Errorf("Unable to find src dir: %v", err)
}
if err = os.MkdirAll(destDir, srcFi.Mode()); err != nil {
return fmt.Errorf("Unable to create dest dir: %v", err)
}
files, err := ioutil.ReadDir(srcDir)
if err != nil {
return fmt.Errorf("Unable to read src dir: %v", err)
}
for _, file := range files {
srcFile := filepath.Join(srcDir, file.Name())
if err = copyToDirIfNotPresent(srcFile, destDir); err != nil {
return fmt.Errorf("Error copying file: %v", err)
}
if err = os.Chmod(srcFile, file.Mode()); err != nil {
return fmt.Errorf("Unable to chmod file: %v", err)
}
}
return nil
}
func copyAndChmodEachToDirIfNotPresent(mode os.FileMode, srcDir string, destDir string, srcFilenames ...string) error {
if err := copyEachToDirIfNotPresent(srcDir, destDir, srcFilenames...); err != nil {
return err
}
return chmodEachInDir(mode, destDir, srcFilenames...)
}
func copyEachToDirIfNotPresent(srcDir string, destDir string, srcFilenames ...string) error {
for _, srcFilename := range srcFilenames {
if err := copyToDirIfNotPresent(filepath.Join(srcDir, srcFilename), destDir); err != nil {
return err
}
}
return nil
}
func copyToDirIfNotPresent(src string, destDir string) error {
return copyIfNotPresent(src, filepath.Join(destDir, filepath.Base(src)))
}
func copyIfNotPresent(src string, dest string) error {
if _, err := os.Stat(dest); os.IsExist(err) {
debugLogf("Skipping copying '%v' to '%v' because it already exists")
return nil
}
debugLogf("Copying %v to %v\n", src, dest)
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
inStat, err := in.Stat()
if err != nil {
return err
}
out, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE|os.O_TRUNC, inStat.Mode())
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
cerr := out.Close()
if err != nil {
return err
}
return cerr
}
func debugLogf(format string, v ...interface{}) {
if debugEnabled {
log.Printf(format, v...)
}
}
func createSingleDirTarGzFile(dir string, tarFilename string) error {
tarFile, err := os.Create(tarFilename)
if err != nil {
return err
}
defer tarFile.Close()
gw := gzip.NewWriter(tarFile)
defer gw.Close()
w := tar.NewWriter(gw)
defer w.Close()
return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
rel, err := filepath.Rel(dir, path)
if err != nil {
return err
}
tarPath := filepath.ToSlash(filepath.Join(filepath.Base(dir), rel))
srcPath := filepath.Join(dir, rel)
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return err
}
header.Name = tarPath
// Remove owner info
header.Uname = ""
header.Gname = ""
header.Uid = 0
header.Gid = 0
if err := w.WriteHeader(header); err != nil {
return err
}
src, err := os.Open(srcPath)
if err != nil {
return err
}
defer src.Close()
_, err = io.Copy(w, src)
return err
})
}
func createSingleDirZipFile(dir string, zipFilename string) error {
zipFile, err := os.Create(zipFilename)
if err != nil {
return err
}
defer zipFile.Close()
w := zip.NewWriter(zipFile)
defer w.Close()
return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
rel, err := filepath.Rel(dir, path)
if err != nil {
return err
}
zipPath := filepath.ToSlash(filepath.Join(filepath.Base(dir), rel))
srcPath := filepath.Join(dir, rel)
dest, err := w.Create(zipPath)
if err != nil {
return err
}
src, err := os.Open(srcPath)
if err != nil {
return err
}
defer src.Close()
_, err = io.Copy(dest, src)
return err
})
}
| [
"\"CEF_DIR\"",
"\"CEF_DIR\"",
"\"DEPOT_TOOLS_DIR\"",
"\"CEF_DIR\"",
"\"CEF_DIR\""
]
| []
| [
"CEF_DIR",
"DEPOT_TOOLS_DIR"
]
| [] | ["CEF_DIR", "DEPOT_TOOLS_DIR"] | go | 2 | 0 | |
fhirclient/models/composition_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import composition
from .fhirdate import FHIRDate
class CompositionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Composition", js["resourceType"])
return composition.Composition(js)
def testComposition1(self):
inst = self.instantiate_from("composition-example-mixed.json")
self.assertIsNotNone(inst, "Must have instantiated a Composition instance")
self.implComposition1(inst)
js = inst.as_json()
self.assertEqual("Composition", js["resourceType"])
inst2 = composition.Composition(js)
self.implComposition1(inst2)
def implComposition1(self, inst):
self.assertEqual(inst.attester[0].mode, "legal")
self.assertEqual(inst.attester[0].time.date, FHIRDate("2012-01-04T09:10:14Z").date)
self.assertEqual(inst.attester[0].time.as_json(), "2012-01-04T09:10:14Z")
self.assertEqual(inst.category[0].coding[0].code, "LP173421-1")
self.assertEqual(inst.category[0].coding[0].display, "Report")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.confidentiality, "N")
self.assertEqual(inst.date.date, FHIRDate("2018-10-30T16:56:04+11:00").date)
self.assertEqual(inst.date.as_json(), "2018-10-30T16:56:04+11:00")
self.assertEqual(inst.id, "example-mixed")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.section[0].code.coding[0].code, "newborn")
self.assertEqual(inst.section[0].code.coding[0].display, "New Born Details")
self.assertEqual(inst.section[0].code.coding[0].system, "http://acme.org/codes/SectionType")
self.assertEqual(inst.section[0].text.status, "generated")
self.assertEqual(inst.section[0].title, "Child's Details")
self.assertEqual(inst.section[1].code.coding[0].code, "mother")
self.assertEqual(inst.section[1].code.coding[0].display, "Mother's Details")
self.assertEqual(inst.section[1].code.coding[0].system, "http://acme.org/codes/SectionType")
self.assertEqual(inst.section[1].text.status, "generated")
self.assertEqual(inst.section[1].title, "Mpther's Details")
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Discharge Summary (Neonatal Service)")
self.assertEqual(inst.type.coding[0].code, "78418-1")
self.assertEqual(inst.type.coding[0].display, "Neonatal perinatal medicine Discharge summary")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testComposition2(self):
inst = self.instantiate_from("composition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Composition instance")
self.implComposition2(inst)
js = inst.as_json()
self.assertEqual("Composition", js["resourceType"])
inst2 = composition.Composition(js)
self.implComposition2(inst2)
def implComposition2(self, inst):
self.assertEqual(inst.attester[0].mode, "legal")
self.assertEqual(inst.attester[0].time.date, FHIRDate("2012-01-04T09:10:14Z").date)
self.assertEqual(inst.attester[0].time.as_json(), "2012-01-04T09:10:14Z")
self.assertEqual(inst.category[0].coding[0].code, "LP173421-1")
self.assertEqual(inst.category[0].coding[0].display, "Report")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.confidentiality, "N")
self.assertEqual(inst.date.date, FHIRDate("2012-01-04T09:10:14Z").date)
self.assertEqual(inst.date.as_json(), "2012-01-04T09:10:14Z")
self.assertEqual(inst.event[0].code[0].coding[0].code, "HEALTHREC")
self.assertEqual(inst.event[0].code[0].coding[0].display, "health record")
self.assertEqual(inst.event[0].code[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.event[0].period.end.date, FHIRDate("2012-11-12").date)
self.assertEqual(inst.event[0].period.end.as_json(), "2012-11-12")
self.assertEqual(inst.event[0].period.start.date, FHIRDate("2010-07-18").date)
self.assertEqual(inst.event[0].period.start.as_json(), "2010-07-18")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier.system, "http://healthintersections.com.au/test")
self.assertEqual(inst.identifier.value, "1")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.relatesTo[0].code, "replaces")
self.assertEqual(inst.relatesTo[1].code, "appends")
self.assertEqual(inst.relatesTo[1].targetIdentifier.system, "http://example.org/fhir/NamingSystem/document-ids")
self.assertEqual(inst.relatesTo[1].targetIdentifier.value, "ABC123")
self.assertEqual(inst.section[0].code.coding[0].code, "11348-0")
self.assertEqual(inst.section[0].code.coding[0].display, "History of past illness Narrative")
self.assertEqual(inst.section[0].code.coding[0].system, "http://loinc.org")
self.assertEqual(inst.section[0].mode, "snapshot")
self.assertEqual(inst.section[0].orderedBy.coding[0].code, "event-date")
self.assertEqual(inst.section[0].orderedBy.coding[0].display, "Sorted by Event Date")
self.assertEqual(inst.section[0].orderedBy.coding[0].system, "http://terminology.hl7.org/CodeSystem/list-order")
self.assertEqual(inst.section[0].text.status, "generated")
self.assertEqual(inst.section[0].title, "History of present illness")
self.assertEqual(inst.section[1].code.coding[0].code, "10157-6")
self.assertEqual(inst.section[1].code.coding[0].display, "History of family member diseases Narrative")
self.assertEqual(inst.section[1].code.coding[0].system, "http://loinc.org")
self.assertEqual(inst.section[1].emptyReason.coding[0].code, "withheld")
self.assertEqual(inst.section[1].emptyReason.coding[0].display, "Information Withheld")
self.assertEqual(inst.section[1].emptyReason.coding[0].system, "http://terminology.hl7.org/CodeSystem/list-empty-reason")
self.assertEqual(inst.section[1].mode, "snapshot")
self.assertEqual(inst.section[1].text.status, "generated")
self.assertEqual(inst.section[1].title, "History of family member diseases")
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Consultation Note")
self.assertEqual(inst.type.coding[0].code, "11488-4")
self.assertEqual(inst.type.coding[0].display, "Consult note")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
| []
| []
| [
"FHIR_UNITTEST_DATADIR"
]
| [] | ["FHIR_UNITTEST_DATADIR"] | python | 1 | 0 | |
backports/shutil_get_terminal_size/get_terminal_size.py | """This is a backport of shutil.get_terminal_size from Python 3.3.
The original implementation is in C, but here we use the ctypes and
fcntl modules to create a pure Python version of os.get_terminal_size.
"""
import os
import struct
import sys
from collections import namedtuple
__all__ = ["get_terminal_size"]
terminal_size = namedtuple("terminal_size", "columns lines")
try:
from ctypes import windll, create_string_buffer, WinError
_handle_ids = {
0: -10,
1: -11,
2: -12,
}
def _get_terminal_size(fd):
handle = windll.kernel32.GetStdHandle(_handle_ids[fd])
if handle == 0:
raise OSError('handle cannot be retrieved')
if handle == -1:
raise WinError()
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
res = struct.unpack("hhhhHhhhhhh", csbi.raw)
left, top, right, bottom = res[5:9]
columns = right - left + 1
lines = bottom - top + 1
return terminal_size(columns, lines)
else:
raise WinError()
except ImportError:
import fcntl
import termios
def _get_terminal_size(fd):
try:
res = fcntl.ioctl(fd, termios.TIOCGWINSZ, b"\x00" * 4)
except IOError as e:
raise OSError(e)
lines, columns = struct.unpack("hh", res)
return terminal_size(columns, lines)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# Try the environment first
try:
columns = int(os.environ["COLUMNS"])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ["LINES"])
except (KeyError, ValueError):
lines = 0
# Only query if necessary
if columns <= 0 or lines <= 0:
try:
size = _get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = terminal_size(*fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return terminal_size(columns, lines)
| []
| []
| [
"LINES",
"COLUMNS"
]
| [] | ["LINES", "COLUMNS"] | python | 2 | 0 | |
internal/state/indexer/sink/psql/psql_test.go | package psql
import (
"context"
"database/sql"
"flag"
"fmt"
"log"
"os"
"os/signal"
"testing"
"time"
"github.com/adlio/schema"
"github.com/gogo/protobuf/proto"
"github.com/ory/dockertest"
"github.com/ory/dockertest/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/state/indexer"
"github.com/tendermint/tendermint/types"
// Register the Postgres database driver.
_ "github.com/lib/pq"
)
// Verify that the type satisfies the EventSink interface.
var _ indexer.EventSink = (*EventSink)(nil)
var (
doPauseAtExit = flag.Bool("pause-at-exit", false,
"If true, pause the test until interrupted at shutdown, to allow debugging")
// A hook that test cases can call to obtain the shared database instance
// used for testing the sink. This is initialized in TestMain (see below).
testDB func() *sql.DB
)
const (
user = "postgres"
password = "secret"
port = "5432"
dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable"
dbName = "postgres"
chainID = "test-chainID"
viewBlockEvents = "block_events"
viewTxEvents = "tx_events"
)
func TestMain(m *testing.M) {
flag.Parse()
// Set up docker and start a container running PostgreSQL.
pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL"))
if err != nil {
log.Fatalf("Creating docker pool: %v", err)
}
resource, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "postgres",
Tag: "13",
Env: []string{
"POSTGRES_USER=" + user,
"POSTGRES_PASSWORD=" + password,
"POSTGRES_DB=" + dbName,
"listen_addresses = '*'",
},
ExposedPorts: []string{port},
}, func(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
})
if err != nil {
log.Fatalf("Starting docker pool: %v", err)
}
if *doPauseAtExit {
log.Print("Pause at exit is enabled, containers will not expire")
} else {
const expireSeconds = 60
_ = resource.Expire(expireSeconds)
log.Printf("Container expiration set to %d seconds", expireSeconds)
}
// Connect to the database, clear any leftover data, and install the
// indexing schema.
conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName)
var db *sql.DB
if err := pool.Retry(func() error {
sink, err := NewEventSink(conn, chainID)
if err != nil {
return err
}
db = sink.DB() // set global for test use
return db.Ping()
}); err != nil {
log.Fatalf("Connecting to database: %v", err)
}
if err := resetDatabase(db); err != nil {
log.Fatalf("Flushing database: %v", err)
}
sm, err := readSchema()
if err != nil {
log.Fatalf("Reading schema: %v", err)
}
migrator := schema.NewMigrator()
if err := migrator.Apply(db, sm); err != nil {
log.Fatalf("Applying schema: %v", err)
}
// Set up the hook for tests to get the shared database handle.
testDB = func() *sql.DB { return db }
// Run the selected test cases.
code := m.Run()
// Clean up and shut down the database container.
if *doPauseAtExit {
log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown")
waitForInterrupt()
log.Print("(resuming)")
}
log.Print("Shutting down database")
if err := pool.Purge(resource); err != nil {
log.Printf("WARNING: Purging pool failed: %v", err)
}
if err := db.Close(); err != nil {
log.Printf("WARNING: Closing database failed: %v", err)
}
os.Exit(code)
}
func TestType(t *testing.T) {
psqlSink := &EventSink{store: testDB(), chainID: chainID}
assert.Equal(t, indexer.PSQL, psqlSink.Type())
}
func TestIndexing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
t.Run("IndexBlockEvents", func(t *testing.T) {
indexer := &EventSink{store: testDB(), chainID: chainID}
require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
verifyBlock(t, 1)
verifyBlock(t, 2)
verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) })
verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) })
verifyNotImplemented(t, "block search", func() (bool, error) {
v, err := indexer.SearchBlockEvents(ctx, nil)
return v != nil, err
})
require.NoError(t, verifyTimeStamp(tableBlocks))
// Attempting to reindex the same events should gracefully succeed.
require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
})
t.Run("IndexTxEvents", func(t *testing.T) {
indexer := &EventSink{store: testDB(), chainID: chainID}
txResult := txResultWithEvents([]abci.Event{
makeIndexedEvent("account.number", "1"),
makeIndexedEvent("account.owner", "Ivan"),
makeIndexedEvent("account.owner", "Yulieta"),
{Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}},
})
require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult}))
txr, err := loadTxResult(types.Tx(txResult.Tx).Hash())
require.NoError(t, err)
assert.Equal(t, txResult, txr)
require.NoError(t, verifyTimeStamp(tableTxResults))
require.NoError(t, verifyTimeStamp(viewTxEvents))
verifyNotImplemented(t, "getTxByHash", func() (bool, error) {
txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash())
return txr != nil, err
})
verifyNotImplemented(t, "tx search", func() (bool, error) {
txr, err := indexer.SearchTxEvents(ctx, nil)
return txr != nil, err
})
// try to insert the duplicate tx events.
err = indexer.IndexTxEvents([]*abci.TxResult{txResult})
require.NoError(t, err)
})
}
func TestStop(t *testing.T) {
indexer := &EventSink{store: testDB()}
require.NoError(t, indexer.Stop())
}
// newTestBlockHeader constructs a fresh copy of a block header containing
// known test values to exercise the indexer.
func newTestBlockHeader() types.EventDataNewBlockHeader {
return types.EventDataNewBlockHeader{
Header: types.Header{Height: 1},
ResultBeginBlock: abci.ResponseBeginBlock{
Events: []abci.Event{
makeIndexedEvent("begin_event.proposer", "FCAA001"),
makeIndexedEvent("thingy.whatzit", "O.O"),
},
},
ResultEndBlock: abci.ResponseEndBlock{
Events: []abci.Event{
makeIndexedEvent("end_event.foo", "100"),
makeIndexedEvent("thingy.whatzit", "-.O"),
},
},
}
}
// readSchema loads the indexing database schema file
func readSchema() ([]*schema.Migration, error) {
const filename = "schema.sql"
contents, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err)
}
return []*schema.Migration{{
ID: time.Now().Local().String() + " db schema",
Script: string(contents),
}}, nil
}
// resetDB drops all the data from the test database.
func resetDatabase(db *sql.DB) error {
_, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`)
if err != nil {
return fmt.Errorf("dropping tables: %w", err)
}
_, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`)
if err != nil {
return fmt.Errorf("dropping views: %w", err)
}
return nil
}
// txResultWithEvents constructs a fresh transaction result with fixed values
// for testing, that includes the specified events.
func txResultWithEvents(events []abci.Event) *abci.TxResult {
return &abci.TxResult{
Height: 1,
Index: 0,
Tx: types.Tx("HELLO WORLD"),
Result: abci.ResponseDeliverTx{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
Events: events,
},
}
}
func loadTxResult(hash []byte) (*abci.TxResult, error) {
hashString := fmt.Sprintf("%X", hash)
var resultData []byte
if err := testDB().QueryRow(`
SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
`, hashString).Scan(&resultData); err != nil {
return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err)
}
txr := new(abci.TxResult)
if err := proto.Unmarshal(resultData, txr); err != nil {
return nil, fmt.Errorf("unmarshaling txr: %w", err)
}
return txr, nil
}
func verifyTimeStamp(tableName string) error {
return testDB().QueryRow(fmt.Sprintf(`
SELECT DISTINCT %[1]s.created_at
FROM %[1]s
WHERE %[1]s.created_at >= $1;
`, tableName), time.Now().Add(-2*time.Second)).Err()
}
func verifyBlock(t *testing.T, height int64) {
// Check that the blocks table contains an entry for this height.
if err := testDB().QueryRow(`
SELECT height FROM `+tableBlocks+` WHERE height = $1;
`, height).Err(); err == sql.ErrNoRows {
t.Errorf("No block found for height=%d", height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
// Verify the presence of begin_block and end_block events.
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %c", err)
}
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
}
// verifyNotImplemented calls f and verifies that it returns both a
// false-valued flag and a non-nil error whose string matching the expected
// "not supported" message with label prefixed.
func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) {
t.Helper()
t.Logf("Verifying that %q reports it is not implemented", label)
want := label + " is not supported via the postgres event sink"
ok, err := f()
assert.False(t, ok)
require.Error(t, err)
assert.Equal(t, want, err.Error())
}
// waitForInterrupt blocks until a SIGINT is received by the process.
func waitForInterrupt() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
<-ch
}
| [
"\"DOCKER_URL\""
]
| []
| [
"DOCKER_URL"
]
| [] | ["DOCKER_URL"] | go | 1 | 0 | |
sdk/cdn/azure-mgmt-cdn/tests/test_cli_mgmt_cdn.py | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 41
# Methods Covered : 41
# Examples Total : 42
# Examples Tested : 42
# Coverage % : 100
# ----------------------
import os
import unittest
import azure.mgmt.cdn
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtCdnTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtCdnTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.cdn.CdnManagementClient
)
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_cdn(self, resource_group):
SUBSCRIPTION_ID = None
if self.is_live:
SUBSCRIPTION_ID = os.environ.get("AZURE_SUBSCRIPTION_ID", None)
if not SUBSCRIPTION_ID:
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
PROFILE_NAME = "profilename"
CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME = "policyname"
ENDPOINT_NAME = "endpoint9527x"
CUSTOM_DOMAIN_NAME = "someDomain"
ORIGIN_NAME = "origin1"
# Profiles_Create[put]
BODY = {
"location": "WestUs",
"sku": {
"name": "Standard_Verizon"
}
}
result = self.mgmt_client.profiles.begin_create(resource_group.name, PROFILE_NAME, BODY)
result = result.result()
"""
# Creates specific policy[put]
BODY = {
"location": "global",
"sku": {
"name": "Standard_Microsoft"
},
"policy_settings": {
"default_redirect_url": "http://www.bing.com",
"default_custom_block_response_status_code": "499",
"default_custom_block_response_body": "PGh0bWw+CjxoZWFkZXI+PHRpdGxlPkhlbGxvPC90aXRsZT48L2hlYWRlcj4KPGJvZHk+CkhlbGxvIHdvcmxkCjwvYm9keT4KPC9odG1sPg=="
},
"rate_limit_rules": {
"rules": [
{
"name": "RateLimitRule1",
"priority": "1",
"enabled_state": "Enabled",
"rate_limit_duration_in_minutes": "0",
"rate_limit_threshold": "1000",
"match_conditions": [
{
"match_variable": "RemoteAddr",
"operator": "IPMatch",
"negate_condition": False,
"transforms": [],
"match_value": [
"192.168.1.0/24",
"10.0.0.0/24"
]
}
],
"action": "Block"
}
]
},
"custom_rules": {
"rules": [
{
"name": "CustomRule1",
"priority": "2",
"enabled_state": "Enabled",
"match_conditions": [
{
"match_variable": "RemoteAddr",
"operator": "GeoMatch",
"negate_condition": False,
"transforms": [],
"match_value": [
"CH"
]
},
{
"match_variable": "RequestHeader",
"selector": "UserAgent",
"operator": "Contains",
"negate_condition": False,
"transforms": [],
"match_value": [
"windows"
]
},
{
"match_variable": "QueryString",
"selector": "search",
"operator": "Contains",
"negate_condition": False,
"transforms": [
"UrlDecode",
"Lowercase"
],
"match_value": [
"<?php",
"?>"
]
}
],
"action": "Block"
}
]
},
"managed_rules": {
"managed_rule_sets": [
{
"rule_set_type": "DefaultRuleSet",
"rule_set_version": "preview-1.0",
"rule_group_overrides": [
{
"rule_group_name": "Group1",
"rules": [
{
"rule_id": "GROUP1-0001",
"enabled_state": "Enabled",
"action": "Redirect"
},
{
"rule_id": "GROUP1-0002",
"enabled_state": "Disabled"
}
]
}
]
}
]
}
}
result = self.mgmt_client.policies.create_or_update(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME, BODY)
result = result.result()
"""
# Endpoints_Create[put]
BODY = {
"origin_host_header": "www.bing.com",
"origin_path": "/image",
"content_types_to_compress": [
"text/html",
"application/octet-stream"
],
"is_compression_enabled": True,
"is_http_allowed": True,
"is_https_allowed": True,
"query_string_caching_behavior": "BypassCaching",
# "delivery_policy": {
# "description": "Test description for a policy.",
# "rules": [
# {
# "name": "rule1",
# "order": "1",
# "conditions": [
# {
# "name": "RemoteAddress",
# "parameters": {
# "operator": "IPMatch",
# "negate_condition": True,
# "match_values": [
# "192.168.1.0/24",
# "10.0.0.0/24"
# ],
# "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleRemoteAddressConditionParameters"
# }
# }
# ],
# "actions": [
# {
# "name": "CacheExpiration",
# "parameters": {
# "cache_behavior": "Override",
# "cache_duration": "10:10:09",
# "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters",
# "cache_type": "All"
# }
# },
# {
# "name": "ModifyResponseHeader",
# "parameters": {
# "header_action": "Overwrite",
# "header_name": "Access-Control-Allow-Origin",
# "value": "*",
# "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters"
# }
# },
# {
# "name": "ModifyRequestHeader",
# "parameters": {
# "header_action": "Overwrite",
# "header_name": "Accept-Encoding",
# "value": "gzip",
# "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters"
# }
# }
# ]
# }
# ]
# },
"origins": [
{
"name": "origin1",
"host_name": "host1.hello.com"
}
],
# "web_application_firewall_policy_link": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Cdn/CdnWebApplicationFirewallPolicies/" + CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME + ""
# },
"location": "WestUs",
"tags": {
"kay1": "value1"
}
}
result = self.mgmt_client.endpoints.begin_create(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY)
result = result.result()
"""
# CustomDomains_Create[put]
# BODY = {
# "host_name": "www.someDomain.net"
# }
HOST_NAME = "www.someDomain.net"
result = self.mgmt_client.custom_domains.create(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, HOST_NAME)
result = result.result()
# CustomDomains_Get[get]
result = self.mgmt_client.custom_domains.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME)
"""
# Origins_Get[get]
result = self.mgmt_client.origins.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, ORIGIN_NAME)
"""
# Get Policy[get]
result = self.mgmt_client.policies.get(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME)
"""
# CustomDomains_ListByEndpoint[get]
result = self.mgmt_client.custom_domains.list_by_endpoint(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
# Origins_ListByEndpoint[get]
result = self.mgmt_client.origins.list_by_endpoint(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
# Endpoints_Get[get]
result = self.mgmt_client.endpoints.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
# Endpoints_ListByProfile[get]
result = self.mgmt_client.endpoints.list_by_profile(resource_group.name, PROFILE_NAME)
# List Policies in a Resource Group[get]
result = self.mgmt_client.policies.list(resource_group.name)
# Profiles_Get[get]
result = self.mgmt_client.profiles.get(resource_group.name, PROFILE_NAME)
# Profiles_ListByResourceGroup[get]
result = self.mgmt_client.profiles.list_by_resource_group(resource_group.name)
# List Policies in a Resource Group[get]
result = self.mgmt_client.policies.list(resource_group.name)
# Profiles_List[get]
result = self.mgmt_client.profiles.list()
# Operations_List[get]
result = self.mgmt_client.operations.list()
# EdgeNodes_List[get]
result = self.mgmt_client.edge_nodes.list()
"""
# CustomDomains_DisableCustomHttps[post]
result = self.mgmt_client.custom_domains.disable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME)
# CustomDomains_EnableCustomHttpsUsingYourOwnCertificate[post]
BODY = {
"certificate_source": "AzureKeyVault",
"protocol_type": "ServerNameIndication",
"certificate_source_parameters": {
"odata.type": "#Microsoft.Azure.Cdn.Models.KeyVaultCertificateSourceParameters",
"subscription_id": "subid",
"resource_group_name": "RG",
"vault_name": "kv",
"secret_name": "secret1",
"secret_version": "00000000-0000-0000-0000-000000000000",
"update_rule": "NoAction",
"delete_rule": "NoAction"
}
}
result = self.mgmt_client.custom_domains.enable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, BODY)
# CustomDomains_EnableCustomHttpsUsingCDNManagedCertificate[post]
BODY = {
"certificate_source": "Cdn",
"protocol_type": "ServerNameIndication",
"certificate_source_parameters": {
"odata.type": "#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters",
"certificate_type": "Shared"
}
}
result = self.mgmt_client.custom_domains.enable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, BODY)
"""
# Origins_Update[patch]
BODY = {
"http_port": "42",
"https_port": "43"
}
result = self.mgmt_client.origins.begin_update(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, ORIGIN_NAME, BODY)
result = result.result()
"""
# Creates specific policy[put]
BODY = {
"location": "WestUs",
"sku": {
"name": "Standard_Microsoft"
},
"policy_settings": {
"default_redirect_url": "http://www.bing.com",
"default_custom_block_response_status_code": "499",
"default_custom_block_response_body": "PGh0bWw+CjxoZWFkZXI+PHRpdGxlPkhlbGxvPC90aXRsZT48L2hlYWRlcj4KPGJvZHk+CkhlbGxvIHdvcmxkCjwvYm9keT4KPC9odG1sPg=="
},
"rate_limit_rules": {
"rules": [
{
"name": "RateLimitRule1",
"priority": "1",
"enabled_state": "Enabled",
"rate_limit_duration_in_minutes": "0",
"rate_limit_threshold": "1000",
"match_conditions": [
{
"match_variable": "RemoteAddr",
"operator": "IPMatch",
"negate_condition": False,
"transforms": [],
"match_value": [
"192.168.1.0/24",
"10.0.0.0/24"
]
}
],
"action": "Block"
}
]
},
"custom_rules": {
"rules": [
{
"name": "CustomRule1",
"priority": "2",
"enabled_state": "Enabled",
"match_conditions": [
{
"match_variable": "RemoteAddr",
"operator": "GeoMatch",
"negate_condition": False,
"transforms": [],
"match_value": [
"CH"
]
},
{
"match_variable": "RequestHeader",
"selector": "UserAgent",
"operator": "Contains",
"negate_condition": False,
"transforms": [],
"match_value": [
"windows"
]
},
{
"match_variable": "QueryString",
"selector": "search",
"operator": "Contains",
"negate_condition": False,
"transforms": [
"UrlDecode",
"Lowercase"
],
"match_value": [
"<?php",
"?>"
]
}
],
"action": "Block"
}
]
},
"managed_rules": {
"managed_rule_sets": [
{
"rule_set_type": "DefaultRuleSet",
"rule_set_version": "preview-1.0",
"rule_group_overrides": [
{
"rule_group_name": "Group1",
"rules": [
{
"rule_id": "GROUP1-0001",
"enabled_state": "Enabled",
"action": "Redirect"
},
{
"rule_id": "GROUP1-0002",
"enabled_state": "Disabled"
}
]
}
]
}
]
}
}
result = self.mgmt_client.policies.create_or_update(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME, BODY)
result = result.result()
"""
# Endpoints_ValidateCustomDomain[post]
BODY = {
"host_name": "www.someDomain.com"
}
# HOST_NAME = "www.someDomain.com"
result = self.mgmt_client.endpoints.validate_custom_domain(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY)
# Endpoints_ListResourceUsage[post]
result = self.mgmt_client.endpoints.list_resource_usage(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
# Endpoints_PurgeContent[post]
BODY = {
"content_paths": [
"/folder1"
]
}
# CONTENT_PATHS = ["/folder1"]
result = self.mgmt_client.endpoints.begin_purge_content(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY)
result = result.result()
# Endpoints_Stop[post]
result = self.mgmt_client.endpoints.begin_stop(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
result = result.result()
# Endpoints_Start[post]
result = self.mgmt_client.endpoints.begin_start(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
result = result.result()
# Endpoints_LoadContent[post]
BODY = {
"content_paths": [
"/folder1"
]
}
# CONTENT_PATHS = ["/folder1"]
result = self.mgmt_client.endpoints.begin_load_content(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY)
result = result.result()
# Profiles_ListSupportedOptimizationTypes[post]
result = self.mgmt_client.profiles.list_supported_optimization_types(resource_group.name, PROFILE_NAME)
# Endpoints_Update[patch]
BODY = {
"tags": {
"additional_properties": "Tag1"
},
# "web_application_firewall_policy_link": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Cdn/CdnWebApplicationFirewallPolicies/" + CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME + ""
# }
}
result = self.mgmt_client.endpoints.begin_update(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY)
result = result.result()
# Profiles_ListResourceUsage[post]
result = self.mgmt_client.profiles.list_resource_usage(resource_group.name, PROFILE_NAME)
# Profiles_GenerateSsoUri[post]
result = self.mgmt_client.profiles.generate_sso_uri(resource_group.name, PROFILE_NAME)
# Profiles_Update[patch]
BODY = {
"tags": {
"additional_properties": "Tag1"
}
}
result = self.mgmt_client.profiles.begin_update(resource_group.name, PROFILE_NAME, BODY)
result = result.result()
# CheckNameAvailabilityWithSubscription[post]
BODY = {
"name": "sampleName",
"type": "Microsoft.Cdn/Profiles/Endpoints"
}
# CHECK_NAME = "sampleName"
result = self.mgmt_client.check_name_availability_with_subscription(BODY)
# ResourceUsage_List[post]
result = self.mgmt_client.resource_usage.list()
# ValidateProbe[post]
BODY = {
"probe_url": "https://www.bing.com/image"
}
# PROBEURL = "https://www.bing.com/image"
result = self.mgmt_client.validate_probe(BODY)
# CheckNameAvailability[post]
BODY = {
"name": "sampleName",
"type": "Microsoft.Cdn/Profiles/Endpoints"
}
# CHECKNAME = "sampleName"
result = self.mgmt_client.check_name_availability(BODY)
# CustomDomains_Delete[delete]
result = self.mgmt_client.custom_domains.begin_delete(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME)
result = result.result()
"""
# Delete protection policy[delete]
result = self.mgmt_client.policies.delete(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME)
"""
# Endpoints_Delete[delete]
result = self.mgmt_client.endpoints.begin_delete(resource_group.name, PROFILE_NAME, ENDPOINT_NAME)
result = result.result()
# Profiles_Delete[delete]
result = self.mgmt_client.profiles.begin_delete(resource_group.name, PROFILE_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"AZURE_SUBSCRIPTION_ID"
]
| [] | ["AZURE_SUBSCRIPTION_ID"] | python | 1 | 0 | |
client/fuzzer.go | package client
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"syscall"
"github.com/richo/roving/types"
)
var invalidFuzzerNames *regexp.Regexp
func init() {
invalidFuzzerNames = regexp.MustCompile("[^a-zA-Z0-9_-]")
}
// Fuzzer runs an AFL fuzzer by shelling out to `afl-fuzz`.
// It keeps track of the fuzzer's process, and of its
// progress using an AflFileManager.
type Fuzzer struct {
Id string
fileManager *types.AflFileManager
started bool
cmd *exec.Cmd
}
// run starts the fuzzer and sets up its output pipes.
// Once the fuzz command has started, run should never return
// unless something goes wrong with the command.
func (f *Fuzzer) run() error {
var err error
cwd, err := os.Getwd()
if err != nil {
log.Fatalf("Couldn't get cwd: %s", err)
}
log.Printf("Starting fuzzer in %s", cwd)
cmd := f.cmd
log.Printf("%s %s", cmd.Path, strings.Join(cmd.Args, " "))
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatalf("Couldn't get stdout handle: %s", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
log.Fatalf("Couldn't get stderr handle: %s", err)
}
if err := cmd.Start(); err != nil {
log.Fatalf("Couldn't start fuzzer: %s", err)
}
go func() {
io.Copy(os.Stdout, stdout)
}()
go func() {
io.Copy(os.Stderr, stderr)
}()
log.Printf("Started fuzzer")
f.started = true
return cmd.Wait()
}
// stop pauses the fuzz process by sending it a SIGSTOP.
func (f *Fuzzer) stop() {
log.Printf("Stopping the fuzzer")
f.cmd.Process.Signal(syscall.SIGSTOP)
}
// start restarts the fuzz process after it has been stopped
// by sending it a SIGCONT.
func (f *Fuzzer) start() {
log.Printf("Starting the fuzzer")
f.cmd.Process.Signal(syscall.SIGCONT)
}
// hasBegunFuzzing returns whether the fuzz command process has
// started fuzzing. This is distinct from "running". hasBegunFuzzing
// does not test for the process liveness, but instead whether the
// fuzz process has made it past the initialization phase and has
// begun the actual task of fuzzing.
func (f *Fuzzer) hasBegunFuzzing() bool {
_, err := os.Stat(f.fileManager.FuzzerStatsPath())
return !os.IsNotExist(err)
}
// ReadState returns the State of the Fuzzer.
func (f *Fuzzer) ReadState() (types.State, error) {
aflOutput, err := f.fileManager.ReadOutput()
if err != nil {
return types.State{}, err
}
stats, err := f.fileManager.ReadFuzzerStats()
if err != nil {
return types.State{}, err
}
return types.State{
Id: f.Id,
Stats: *stats,
AflOutput: aflOutput,
}, nil
}
// newAFLFuzzer returns a new fuzzer.
func newAFLFuzzer(targetCommand []string, workdir string, dictPath string, timeoutMs int, memLimitMb int) Fuzzer {
name, err := os.Hostname()
if err != nil {
log.Fatal("Couldn't get hostname", err)
}
id := mkFuzzerId(name)
fileManager := types.NewAflFileManagerWithFuzzerId(workdir, id)
fuzzCmd := aflFuzzCmd(
id,
targetCommand,
fileManager.OutputDirToPassIntoAfl(),
fileManager.InputDir(),
dictPath,
aflFuzzPath(),
timeoutMs,
memLimitMb,
)
return Fuzzer{
Id: id,
fileManager: fileManager,
started: false,
cmd: fuzzCmd,
}
}
// aflFuzzPath returns the path to afl-fuzz. It first looks for an env var
// called `AFL`, which should be the path to the dir that afl-fuzz is in.
// If it does not find this var then it defaults to `afl-fuzz` and hopes
// that this is in PATH.
func aflFuzzPath() string {
root := os.Getenv("AFL")
if root == "" {
return "afl-fuzz"
}
return fmt.Sprintf("%s/afl-fuzz", root)
}
// aflFuzzCmd constucts an afl-fuzz Cmd out of the given options.
func aflFuzzCmd(fuzzerId string, targetCommand []string, outputPath string, inputPath string, dictPath string, aflFuzzPath string, timeoutMs int, memLimitMb int) *exec.Cmd {
cmdFlags := []string{
"-S", fuzzerId,
"-o", outputPath,
"-i", inputPath,
}
if timeoutMs != 0 {
cmdFlags = append(cmdFlags, "-t", strconv.Itoa(timeoutMs))
}
if memLimitMb != 0 {
cmdFlags = append(cmdFlags, "-m", strconv.Itoa(memLimitMb))
}
if dictPath != "" {
cmdFlags = append(cmdFlags, "-x", dictPath)
}
cmdFullArgs := append(cmdFlags, targetCommand...)
c := exec.Command(aflFuzzPath, cmdFullArgs...)
return c
}
// mkFuzzerId builds a fuzzerId out of a hostname and a random 4 char hexstring.
// It replaces non-alphanumeric chars in the hostname with underscores, and
// truncates it to 27 chars.
func mkFuzzerId(hostname string) string {
validHostname := invalidFuzzerNames.ReplaceAllString(hostname, "_")
// Max AFL fuzzer ID length is 32:
// https://github.com/mirrorer/afl/blob/2fb5a3482ec27b593c57258baae7089ebdc89043/afl-fuzz.c#L7456
//
// Our fuzzer ID is ${hostname}-xxxx, so the hostname portion can
// be max 32 - 5 = 27 chars.
maxHostnameLen := 27
if len(hostname) > maxHostnameLen {
hostname = hostname[0:maxHostnameLen]
}
number := types.RandInt() & 0xffff
return fmt.Sprintf("%s-%x", validHostname, number)
}
| [
"\"AFL\""
]
| []
| [
"AFL"
]
| [] | ["AFL"] | go | 1 | 0 | |
satflow/run.py | import os
os.environ["HYDRA_FULL_ERROR"] = "1"
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from satflow.core import utils
from satflow.experiments.pl_train import train
# A couple of optional utilities:
# - disabling python warnings
# - easier access to debug mode
# - forcing debug friendly configuration
# - forcing multi-gpu friendly configuration
# You can safely get rid of this line if you don't want those
utils.extras(config)
#
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
return train(config)
if __name__ == "__main__":
main()
| []
| []
| [
"HYDRA_FULL_ERROR"
]
| [] | ["HYDRA_FULL_ERROR"] | python | 1 | 0 | |
exps/NAS-Bench-201/statistics.py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import os, sys, time, argparse, collections
from copy import deepcopy
import torch
import torch.nn as nn
from pathlib import Path
from collections import defaultdict
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from log_utils import AverageMeter, time_string, convert_secs2time
from config_utils import load_config, dict2config
from datasets import get_datasets
# NAS-Bench-201 related module or function
from models import CellStructure, get_cell_based_tiny_net
from nas_201_api import ArchResults, ResultsCount
from functions import pure_evaluate
def create_result_count(used_seed, dataset, arch_config, results, dataloader_dict):
xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], \
results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)
net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes':arch_config['class_num']}, None)
network = get_cell_based_tiny_net(net_config)
network.load_state_dict(xresult.get_net_param())
if 'train_times' in results: # new version
xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])
xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])
else:
if dataset == 'cifar10-valid':
xresult.update_OLD_eval('x-valid' , results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda())
xresult.update_OLD_eval('ori-test', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
xresult.update_latency(latencies)
elif dataset == 'cifar10':
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())
xresult.update_latency(latencies)
elif dataset == 'cifar100' or dataset == 'ImageNet16-120':
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda())
xresult.update_OLD_eval('x-valid', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())
xresult.update_OLD_eval('x-test' , {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
xresult.update_latency(latencies)
else:
raise ValueError('invalid dataset name : {:}'.format(dataset))
return xresult
def account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict):
information = ArchResults(arch_index, arch_str)
for checkpoint_path in checkpoints:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]
for dataset in datasets:
assert dataset in checkpoint, 'Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)
results = checkpoint[dataset]
assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)
arch_config = {'channel': results['channel'], 'num_cells': results['num_cells'], 'arch_str': arch_str, 'class_num': results['config']['class_num']}
xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict)
information.update(dataset, int(used_seed), xresult)
return information
def GET_DataLoaders(workers):
torch.set_num_threads(workers)
root_dir = (Path(__file__).parent / '..' / '..').resolve()
torch_dir = Path(os.environ['TORCH_HOME'])
# cifar
cifar_config_path = root_dir / 'configs' / 'nas-benchmark' / 'CIFAR.config'
cifar_config = load_config(cifar_config_path, None, None)
print ('{:} Create data-loader for all datasets'.format(time_string()))
print ('-'*200)
TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num = get_datasets('cifar10', str(torch_dir/'cifar.python'), -1)
print ('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num))
cifar10_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar-split.txt', None, None)
assert cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24] and cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14]
temp_dataset = deepcopy(TRAIN_CIFAR10)
temp_dataset.transform = VALID_CIFAR10.transform
# data loader
trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True , num_workers=workers, pin_memory=True)
train_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True)
valid_cifar10_loader = torch.utils.data.DataLoader(temp_dataset , batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True)
test__cifar10_loader = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)
print ('CIFAR-10 : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size))
print ('-'*200)
# CIFAR-100
TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num = get_datasets('cifar100', str(torch_dir/'cifar.python'), -1)
print ('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num))
cifar100_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar100-test-split.txt', None, None)
assert cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16] and cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24]
train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True)
test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest) , num_workers=workers, pin_memory=True)
print ('CIFAR-100 : train-loader has {:3d} batch'.format(len(train_cifar100_loader)))
print ('CIFAR-100 : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader)))
print ('CIFAR-100 : test--loader has {:3d} batch'.format(len(test__cifar100_loader)))
print ('-'*200)
imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config'
imagenet16_config = load_config(imagenet16_config_path, None, None)
TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num = get_datasets('ImageNet16-120', str(torch_dir/'cifar.python'/'ImageNet16'), -1)
print ('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num))
imagenet_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'imagenet-16-120-test-split.txt', None, None)
assert imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18] and imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20]
train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True)
test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest) , num_workers=workers, pin_memory=True)
print ('ImageNet-16-120 : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size))
print ('ImageNet-16-120 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size))
print ('ImageNet-16-120 : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size))
# 'cifar10', 'cifar100', 'ImageNet16-120'
loaders = {'cifar10@trainval': trainval_cifar10_loader,
'cifar10@train' : train_cifar10_loader,
'cifar10@valid' : valid_cifar10_loader,
'cifar10@test' : test__cifar10_loader,
'cifar100@train' : train_cifar100_loader,
'cifar100@valid' : valid_cifar100_loader,
'cifar100@test' : test__cifar100_loader,
'ImageNet16-120@train': train_imagenet_loader,
'ImageNet16-120@valid': valid_imagenet_loader,
'ImageNet16-120@test' : test__imagenet_loader}
return loaders
def simplify(save_dir, meta_file, basestr, target_dir):
meta_infos = torch.load(meta_file, map_location='cpu')
meta_archs = meta_infos['archs'] # a list of architecture strings
meta_num_archs = meta_infos['total']
meta_max_node = meta_infos['max_node']
assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))
sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))
print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))
subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0
num_seeds = defaultdict(lambda: 0)
for index, sub_dir in enumerate(sub_model_dirs):
xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth'))
arch_indexes = set()
for checkpoint in xcheckpoints:
temp_names = checkpoint.name.split('-')
assert len(temp_names) == 4 and temp_names[0] == 'arch' and temp_names[2] == 'seed', 'invalid checkpoint name : {:}'.format(checkpoint.name)
arch_indexes.add( temp_names[1] )
subdir2archs[sub_dir] = sorted(list(arch_indexes))
num_evaluated_arch += len(arch_indexes)
# count number of seeds for each architecture
for arch_index in arch_indexes:
num_seeds[ len(list(sub_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))) ] += 1
print('{:} There are {:5d} architectures that have been evaluated ({:} in total).'.format(time_string(), num_evaluated_arch, meta_num_archs))
for key in sorted( list( num_seeds.keys() ) ): print ('{:} There are {:5d} architectures that are evaluated {:} times.'.format(time_string(), num_seeds[key], key))
dataloader_dict = GET_DataLoaders( 6 )
to_save_simply = save_dir / 'simplifies'
to_save_allarc = save_dir / 'simplifies' / 'architectures'
if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)
if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True)
assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir)
arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')
evaluated_indexes = set()
target_directory = save_dir / target_dir
target_less_dir = save_dir / '{:}-LESS'.format(target_dir)
arch_indexes = subdir2archs[ target_directory ]
num_seeds = defaultdict(lambda: 0)
end_time = time.time()
arch_time = AverageMeter()
for idx, arch_index in enumerate(arch_indexes):
checkpoints = list(target_directory.glob('arch-{:}-seed-*.pth'.format(arch_index)))
ckps_less = list(target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))
# create the arch info for each architecture
try:
arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict)
arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, ['cifar10-valid'], dataloader_dict)
num_seeds[ len(checkpoints) ] += 1
except:
print('Loading {:} failed, : {:}'.format(arch_index, checkpoints))
continue
assert int(arch_index) not in evaluated_indexes, 'conflict arch-index : {:}'.format(arch_index)
assert 0 <= int(arch_index) < len(meta_archs), 'invalid arch-index {:} (not found in meta_archs)'.format(arch_index)
arch_info = {'full': arch_info_full, 'less': arch_info_less}
evaluated_indexes.add( int(arch_index) )
arch2infos[int(arch_index)] = arch_info
torch.save({'full': arch_info_full.state_dict(),
'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-FULL.pth'.format(arch_index))
arch_info['full'].clear_params()
arch_info['less'].clear_params()
torch.save({'full': arch_info_full.state_dict(),
'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index))
# measure elapsed time
arch_time.update(time.time() - end_time)
end_time = time.time()
need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes)-idx-1), True) )
print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format(time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time))
# measure time
xstrs = ['{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted( list( num_seeds.keys() ) ) ]
print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs))
final_infos = {'meta_archs' : meta_archs,
'total_archs': meta_num_archs,
'basestr' : basestr,
'arch2infos' : arch2infos,
'evaluated_indexes': evaluated_indexes}
save_file_name = to_save_simply / '{:}.pth'.format(target_dir)
torch.save(final_infos, save_file_name)
print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))
def merge_all(save_dir, meta_file, basestr):
meta_infos = torch.load(meta_file, map_location='cpu')
meta_archs = meta_infos['archs']
meta_num_archs = meta_infos['total']
meta_max_node = meta_infos['max_node']
assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))
sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))
print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))
for index, sub_dir in enumerate(sub_model_dirs):
arch_info_files = sorted( list(sub_dir.glob('arch-*-seed-*.pth') ) )
print ('The {:02d}/{:02d}-th directory : {:} : {:} runs.'.format(index, len(sub_model_dirs), sub_dir, len(arch_info_files)))
arch2infos, evaluated_indexes = dict(), set()
for IDX, sub_dir in enumerate(sub_model_dirs):
ckp_path = sub_dir.parent / 'simplifies' / '{:}.pth'.format(sub_dir.name)
if ckp_path.exists():
sub_ckps = torch.load(ckp_path, map_location='cpu')
assert sub_ckps['total_archs'] == meta_num_archs and sub_ckps['basestr'] == basestr
xarch2infos = sub_ckps['arch2infos']
xevalindexs = sub_ckps['evaluated_indexes']
for eval_index in xevalindexs:
assert eval_index not in evaluated_indexes and eval_index not in arch2infos
#arch2infos[eval_index] = xarch2infos[eval_index].state_dict()
arch2infos[eval_index] = {'full': xarch2infos[eval_index]['full'].state_dict(),
'less': xarch2infos[eval_index]['less'].state_dict()}
evaluated_indexes.add( eval_index )
print ('{:} [{:03d}/{:03d}] merge data from {:} with {:} models.'.format(time_string(), IDX, len(sub_model_dirs), ckp_path, len(xevalindexs)))
else:
raise ValueError('Can not find {:}'.format(ckp_path))
#print ('{:} [{:03d}/{:03d}] can not find {:}, skip.'.format(time_string(), IDX, len(subdir2archs), ckp_path))
evaluated_indexes = sorted( list( evaluated_indexes ) )
print ('Finally, there are {:} architectures that have been trained and evaluated.'.format(len(evaluated_indexes)))
to_save_simply = save_dir / 'simplifies'
if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)
final_infos = {'meta_archs' : meta_archs,
'total_archs': meta_num_archs,
'arch2infos' : arch2infos,
'evaluated_indexes': evaluated_indexes}
save_file_name = to_save_simply / '{:}-final-infos.pth'.format(basestr)
torch.save(final_infos, save_file_name)
print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NAS-BENCH-201', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode' , type=str, choices=['cal', 'merge'], help='The running mode for this script.')
parser.add_argument('--base_save_dir', type=str, default='./output/NAS-BENCH-201-4', help='The base-name of folder to save checkpoints and log.')
parser.add_argument('--target_dir' , type=str, help='The target directory.')
parser.add_argument('--max_node' , type=int, default=4, help='The maximum node in a cell.')
parser.add_argument('--channel' , type=int, default=16, help='The number of channels.')
parser.add_argument('--num_cells' , type=int, default=5, help='The number of cells in one stage.')
args = parser.parse_args()
save_dir = Path( args.base_save_dir )
meta_path = save_dir / 'meta-node-{:}.pth'.format(args.max_node)
assert save_dir.exists(), 'invalid save dir path : {:}'.format(save_dir)
assert meta_path.exists(), 'invalid saved meta path : {:}'.format(meta_path)
print ('start the statistics of our nas-benchmark from {:} using {:}.'.format(save_dir, args.target_dir))
basestr = 'C{:}-N{:}'.format(args.channel, args.num_cells)
if args.mode == 'cal':
simplify(save_dir, meta_path, basestr, args.target_dir)
elif args.mode == 'merge':
merge_all(save_dir, meta_path, basestr)
else:
raise ValueError('invalid mode : {:}'.format(args.mode))
| []
| []
| [
"TORCH_HOME"
]
| [] | ["TORCH_HOME"] | python | 1 | 0 | |
src/main.go | package main
import (
"encoding/json"
"io/ioutil"
"math/rand"
"os"
"os/signal"
"reflect"
"strconv"
"syscall"
"time"
"gameServer-demo/src/base"
"gameServer-demo/src/common"
pb "gameServer-demo/src/grpc"
"gameServer-demo/src/logic"
)
func main() {
rand.Seed(time.Now().UnixNano())
var configBytes []byte
//读取服务编排配置
common.IsDev = false
configFileName := "./layout_dev.json"
if len(os.Args) > 3 && os.Args[3] == "dev" {
configFileName = "./layout_dev.json"
}
common.IsDev = true
file, err := os.Open(configFileName)
if err != nil {
common.LogError("open layout_dev.json has err", err)
return
}
defer file.Close()
configBytes, err = ioutil.ReadAll(file)
file.Close()
err = json.Unmarshal(configBytes, &common.ServerConfig)
if err != nil {
common.LogError(" json.Unmarshal has err", err)
return
}
common.LogInfo("common.ServerConfig", common.ServerConfig)
//读取服务器环境变量,加载组件
serverName := os.Getenv("SERVER_NAME")
if len(os.Args) > 1 {
serverName = os.Args[1]
}
common.ServerName = serverName
common.LogInfo("common.ServerName:", common.ServerName)
common.ServerIndex = "0"
serverIndex := os.Getenv("SERVER_INDEX")
if len(os.Args) > 2 {
serverIndex = os.Args[2]
}
if serverIndex != "" {
common.ServerIndex = serverIndex
}
common.LogInfo("common.ServerIndex:", common.ServerIndex)
baseServerConfig := common.ServerConfig["base_config"]
if baseServerConfig == nil {
common.LogError("baseServerConfig == nil")
return
}
gameModeStr := baseServerConfig["GameMode"]["mode"]
gameModeInt, err := strconv.Atoi(gameModeStr)
if err != nil {
common.LogError("gameModeStr atoi has err", err)
return
}
gameMode := pb.GameMode(gameModeInt)
if gameMode == pb.GameMode_GameMode_None {
common.LogError("gameMode is none")
return
}
common.GameMode = gameMode
common.LogInfo("common.GameMode:", common.GameMode)
curServerConfig := common.ServerConfig[serverName]
if curServerConfig == nil {
common.LogError("curServerConfig == nil")
return
}
commonServerConfig := common.ServerConfig["common_config"]
if commonServerConfig == nil {
common.LogError("commonServerConfig == nil")
return
}
mustServerConfig := common.ServerConfig["must"]
if mustServerConfig == nil {
common.LogError("mustServerConfig == nil")
return
}
//引入组件初始化
base.Init()
logic.Init()
//先加载基础组件
for componentName, componentConfig := range mustServerConfig {
oneComponentConfig := common.OneComponentConfig{}
//先用公共配置的值来填充
commonComponentConfig := commonServerConfig[componentName]
if commonComponentConfig != nil {
for oneFieldName, oneFieldValue := range commonComponentConfig {
oneComponentConfig[oneFieldName] = oneFieldValue
}
}
//然后使用基础组件配置填充
for oneFieldName, oneFieldValue := range componentConfig {
oneComponentConfig[oneFieldName] = oneFieldValue
}
//检测是否多线程服务并确认组件名
realComponentName := componentName
if oneComponentConfig["multi_line"] == "true" {
realComponentName = componentName + serverIndex
}
//过滤重复组件
if common.ComponentMap[realComponentName] != nil {
continue
}
//检测组件是否初始化
if common.AllComponentMap[componentName] == nil {
common.LogError("init component err, componentName == nil", componentName)
return
}
//最后使用各自进程的组件配置填充
curComponentConfig := curServerConfig[componentName]
if curComponentConfig != nil {
for oneFieldName, oneFieldValue := range curComponentConfig {
oneComponentConfig[oneFieldName] = oneFieldValue
}
}
//调用组件加载LoadComponent函数
methodArgs := []reflect.Value{reflect.ValueOf(&oneComponentConfig), reflect.ValueOf(realComponentName)}
reflect.ValueOf(common.AllComponentMap[componentName]).MethodByName("LoadComponent").Call(methodArgs)
common.ComponentMap[realComponentName] = common.AllComponentMap[componentName]
}
//开始加载进程独有组件
for componentName, componentConfig := range curServerConfig {
oneComponentConfig := common.OneComponentConfig{}
//先用公共配置的值来填充
commonComponentConfig := commonServerConfig[componentName]
if commonComponentConfig != nil {
for oneFieldName, oneFieldValue := range commonComponentConfig {
oneComponentConfig[oneFieldName] = oneFieldValue
}
}
//然后使用组件自己的配置填充
for oneFieldName, oneFieldValue := range componentConfig {
oneComponentConfig[oneFieldName] = oneFieldValue
}
realComponentName := componentName
if oneComponentConfig["multi_line"] == "true" {
realComponentName = componentName + serverIndex
}
//过滤重复
if common.ComponentMap[realComponentName] != nil {
continue
}
//检测组件是否初始化
if common.AllComponentMap[componentName] == nil {
common.LogError("init component err, componentName == nil", componentName)
return
}
//调用组件加载LoadComponent函数
methodArgs := []reflect.Value{reflect.ValueOf(&oneComponentConfig), reflect.ValueOf(realComponentName)}
reflect.ValueOf(common.AllComponentMap[componentName]).MethodByName("LoadComponent").Call(methodArgs)
common.ComponentMap[realComponentName] = common.AllComponentMap[componentName]
}
//开启分布式锁组件
lockComponentInterface := common.ComponentMap["Lock"]
if lockComponentInterface != nil {
lockComponent, ok := lockComponentInterface.(*base.Lock)
if !ok {
common.LogError(" lockComponentInterface not lockComponent ")
return
}
common.Locker = lockComponent
}
//开启消息路由组件
routeComponentInterface := common.ComponentMap["Route"]
if routeComponentInterface != nil {
routeComponent, ok := routeComponentInterface.(*base.Route)
if !ok {
common.LogError(" routeComponentInterface not routeComponent ")
return
}
common.Router = routeComponent
}
//开启推送组件
pushComponentInterface := common.ComponentMap["Push"]
if pushComponentInterface != nil {
pushComponent, ok := pushComponentInterface.(*base.Push)
if !ok {
common.LogError(" pushComponentInterface not pushComponent ")
return
}
common.Pusher = pushComponent
}
//开启MQ组件
mqComponentInterface := common.ComponentMap["MQ"]
if mqComponentInterface != nil {
mqComponent, ok := mqComponentInterface.(*base.MQ)
if !ok {
common.LogError(" mqComponentInterface not mqComponent ")
return
}
common.MQer = mqComponent
}
//开启时间组件
timeComponentInterface := common.ComponentMap["Time"]
if timeComponentInterface != nil {
timeComponent, ok := timeComponentInterface.(*base.Time)
if !ok {
common.LogError(" timeComponentInterface not timeComponent ")
return
}
common.Timer = timeComponent
}
//global config component
configComponentInterface := common.ComponentMap["Config"]
if configComponentInterface != nil {
configComponent, ok := configComponentInterface.(*base.Config)
if !ok {
common.LogError(" configComponentInterface not configComponent ")
return
}
common.Configer = configComponent
}
//global config component
authComponentInterface := common.ComponentMap["Authorization"]
if authComponentInterface != nil {
authComponent, ok := authComponentInterface.(*base.Authorization)
if !ok {
common.LogError(" configComponentInterface not configComponent ")
return
}
common.Authorizationer = authComponent
}
//开启日志服务
logComponentInterface := common.ComponentMap["Log"]
if logComponentInterface != nil {
logComponent, ok := logComponentInterface.(*base.Log)
if !ok {
common.LogError(" logComponentInterface not logComponent ")
return
}
common.Logger = logComponent
}
redisComponentInterface := common.ComponentMap["Redis"]
if redisComponentInterface != nil {
redisComponent, ok := redisComponentInterface.(*base.Redis)
if !ok {
common.LogError(" redisComponentInterface not pushComponent ")
return
}
common.Rediser = redisComponent
}
tokenComponentInterface := common.ComponentMap["Token"]
if tokenComponentInterface != nil {
tokenComponent, ok := tokenComponentInterface.(*base.Token)
if !ok {
common.LogError(" tokenComponentInterface not pushComponent ")
return
}
common.Tokener = tokenComponent
}
// 所有组件加载完毕,初始化也完毕的情况下,调用BeforeStart方法
// 有些组件需要更前置的执行
for _, component := range common.ComponentMap {
reflect.ValueOf(component).MethodByName("BeforeStart").Call([]reflect.Value{})
}
// 所有组件加载完毕,初始化也完毕的情况下,调用Start方法
for _, component := range common.ComponentMap {
reflect.ValueOf(component).MethodByName("Start").Call([]reflect.Value{})
}
//所有完毕之后,才开放,开启服务发现与注册服务
findComponentInterface := common.ComponentMap["Find"]
if findComponentInterface != nil {
findComponent, ok := findComponentInterface.(*base.Find)
if !ok {
common.LogError(" findComponentInterface not findComponent ")
return
}
findComponent.RegisterComponent()
}
stime := time.Now().Format("2006-01-02 15:04:05")
common.LogInfo("server start ok", common.ServerName, common.ServerIndex, stime)
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan,
syscall.SIGINT,
syscall.SIGILL,
syscall.SIGFPE,
syscall.SIGSEGV,
syscall.SIGTERM,
syscall.SIGABRT)
<-signalChan
common.LogInfo("do some close operate")
// 清除连接信息
socketInterface := common.ComponentMap["SocketIO"+serverIndex]
if socketInterface != nil {
// 断言验证
socketComponent, ok := socketInterface.(*base.SocketIO)
if !ok {
common.LogError(" socketInterface not socketComponent ")
return
}
socketComponent.Clear()
}
common.LogInfo("server end")
}
| [
"\"SERVER_NAME\"",
"\"SERVER_INDEX\""
]
| []
| [
"SERVER_INDEX",
"SERVER_NAME"
]
| [] | ["SERVER_INDEX", "SERVER_NAME"] | go | 2 | 0 | |
server.go | package main
import (
"github.com/brandon-julio-t/graph-gongular-backend/factories/chi-router"
"github.com/brandon-julio-t/graph-gongular-backend/factories/gorm-database"
"github.com/brandon-julio-t/graph-gongular-backend/factories/secret"
"github.com/joho/godotenv"
"log"
"net/http"
"os"
)
func main() {
if err := godotenv.Load(); err != nil {
log.Println("error loading .env file")
}
port := getPort()
appSecret := new(secret.Factory).Create()
db := new(gorm_database.Factory).Create()
router := new(chi_router.Factory).Create(appSecret, db)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, router))
}
func getPort() string {
if port := os.Getenv("PORT"); port != "" {
return port
}
return "8080"
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
src/main/java/com/github/hypfvieh/common/SearchOrder.java | package com.github.hypfvieh.common;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
import org.slf4j.LoggerFactory;
/**
* Defines where to look for a library.
*
*/
public enum SearchOrder {
/** Look in any given external path */
CUSTOM_PATH,
/** Look in classpath, this includes directory and the jar(s) */
CLASS_PATH,
/** Look in system path (e.g. /usr/lib on linux/unix systems) */
SYSTEM_PATH;
/**
* Search for the given filename in given {@link SearchOrder}.
* @param _fileName filename
* @param _order order
* @return InputStream of first found matching file or null if no file found
*/
public static InputStream findFile(String _fileName, SearchOrder... _order) {
if (_fileName == null || _fileName.isEmpty() || _order == null) {
return null;
}
InputStream result = null;
for (SearchOrder so : _order) {
switch (so) {
case CLASS_PATH:
result = SearchOrder.class.getClassLoader().getResourceAsStream(_fileName);
if (result != null) {
return result;
}
break;
case CUSTOM_PATH:
File file = new File(_fileName);
if (!file.exists()) {
continue;
}
result = toStream(file);
if (result != null) {
return result;
}
break;
case SYSTEM_PATH:
String getenv = System.getenv("PATH");
getenv = getenv.replace(";", ":");
for (String p : getenv.split(":")) {
File curFile = new File (p, _fileName);
if (!curFile.exists()) {
continue;
}
result = toStream(curFile);
if (result != null) {
return result;
}
}
break;
default:
break;
}
}
return result;
}
/**
* Convert file to input stream if possible.
* @param _file file
* @return InputStream or null if file could not be found
*/
private static InputStream toStream(File _file) {
try {
return new FileInputStream(_file);
} catch (FileNotFoundException _ex) {
LoggerFactory.getLogger(SearchOrder.class).debug("File {} not found", _file);
}
return null;
}
} | [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | java | 1 | 0 | |
test/functional/rpc_signrawtransaction.py | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import DiazTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, hex_str_to_bytes
from test_framework.messages import sha256
from test_framework.script import CScript, OP_0
from decimal import Decimal
class SignRawTransactionsTest(DiazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, inputs)
# 1) The transaction has a complete set of signatures
assert rawTxSigned['complete']
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def test_with_lock_outputs(self):
"""Test correct error reporting when trying to sign a locked output"""
self.nodes[0].encryptwallet("password")
rawTx = '020000000156b958f78e3f24e0b2f4e4db1255426b0902027cb37e3ddadb52e37c3557dddb0000000000ffffffff01c0a6b929010000001600149a2ee8c77140a053f36018ac8124a6ececc1668a00000000'
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signrawtransactionwithwallet, rawTx)
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, scripts)
# 3) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransactionwithwallet(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def witness_script_test(self):
# Now test signing transaction to P2SH-P2WSH addresses without wallet
# Create a new P2SH-P2WSH 1-of-1 multisig address:
embedded_address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())
embedded_privkey = self.nodes[1].dumpprivkey(embedded_address["address"])
p2sh_p2wsh_address = self.nodes[1].addmultisigaddress(1, [embedded_address["pubkey"]], "", "p2sh-segwit")
# send transaction to P2SH-P2WSH 1-of-1 multisig address
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(p2sh_p2wsh_address["address"], 49.999)
self.nodes[0].generate(1)
self.sync_all()
# Find the UTXO for the transaction node[1] should have received, check witnessScript matches
unspent_output = self.nodes[1].listunspent(0, 999999, [p2sh_p2wsh_address["address"]])[0]
assert_equal(unspent_output["witnessScript"], p2sh_p2wsh_address["redeemScript"])
p2sh_redeemScript = CScript([OP_0, sha256(hex_str_to_bytes(p2sh_p2wsh_address["redeemScript"]))])
assert_equal(unspent_output["redeemScript"], p2sh_redeemScript.hex())
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([unspent_output], {self.nodes[1].getnewaddress(): Decimal("49.998")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [unspent_output])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
self.witness_script_test()
self.test_with_lock_outputs()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/character/display.go | package character
import (
"bytes"
"fmt"
"strings"
"text/template"
"github.com/ironarachne/world/pkg/words"
)
// SimplifiedCharacter is a simplified version of a character
type SimplifiedCharacter struct {
Name string `json:"name"`
Titles string `json:"titles"`
Blazon string `json:"blazon"`
Device string `json:"device"`
Description string `json:"description"`
}
// Simplify returns a simplified version of a character
func (character Character) Simplify() (SimplifiedCharacter, error) {
description, err := character.Describe()
if err != nil {
err = fmt.Errorf("Could not generate face shape: %w", err)
return SimplifiedCharacter{}, err
}
titles := words.CombinePhrases(character.Titles)
simplified := SimplifiedCharacter{
Name: character.FirstName + " " + character.LastName,
Titles: titles,
Blazon: character.Heraldry.Blazon,
Description: description,
Device: character.Heraldry.ImageURL,
}
if character.Title != "" {
simplified.Name = strings.Title(character.Title) + " " + simplified.Name
}
return simplified, nil
}
// RandomSimplified returns a random simplified character
func RandomSimplified() (SimplifiedCharacter, error) {
character, err := Random()
if err != nil {
err = fmt.Errorf("Could not generate simplified character: %w", err)
return SimplifiedCharacter{}, err
}
simplified, err := character.Simplify()
if err != nil {
err = fmt.Errorf("Could not generate simplified character: %w", err)
return SimplifiedCharacter{}, err
}
return simplified, nil
}
// Describe returns a prose description of a character based on his or her traits and attributes
func (character Character) Describe() (string, error) {
descriptionObject, err := character.compileDescription()
if err != nil {
err = fmt.Errorf("Could not generate character description: %w", err)
return "", err
}
descriptionTemplate, err := randomDescriptionTemplate()
if err != nil {
err = fmt.Errorf("Could not generate character description: %w", err)
return "", err
}
var tplOutput bytes.Buffer
tmpl, err := template.New(descriptionObject.FullName).Funcs(template.FuncMap{
"caseStart": func(word string) string {
return strings.Title(word)
},
"pronoun": func(word string) string {
phrase := words.Pronoun(word) + " " + word
return phrase
},
}).Parse(descriptionTemplate)
if err != nil {
panic(err)
}
err = tmpl.Execute(&tplOutput, descriptionObject)
if err != nil {
panic(err)
}
description := tplOutput.String()
return description, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
jira/examples/issue-fields-configurations/assign/assign.go | package main
import (
"context"
"github.com/ctreminiom/go-atlassian/jira"
"log"
"os"
)
func main() {
var (
host = os.Getenv("HOST")
mail = os.Getenv("MAIL")
token = os.Getenv("TOKEN")
)
atlassian, err := jira.New(nil, host)
if err != nil {
return
}
atlassian.Auth.SetBasicAuth(mail, token)
response, err := atlassian.Issue.Field.Configuration.Assign(context.Background(), "10001", "10001")
if err != nil {
log.Fatal(err)
}
log.Println("HTTP Endpoint Used", response.Endpoint)
}
| [
"\"HOST\"",
"\"MAIL\"",
"\"TOKEN\""
]
| []
| [
"MAIL",
"HOST",
"TOKEN"
]
| [] | ["MAIL", "HOST", "TOKEN"] | go | 3 | 0 | |
onnxruntime/python/tools/gen_doc.py | #!/usr/bin/env python
# This file is copied and adapted from https://github.com/onnx/onnx repository.
# There was no copyright statement on the file at the time of copying.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import io
import os
import sys
import argparse
import numpy as np # type: ignore
import onnxruntime as rt
import onnxruntime.capi.onnxruntime_pybind11_state as rtpy
from onnxruntime.capi.onnxruntime_pybind11_state import schemadef
from onnxruntime.capi.onnxruntime_pybind11_state.schemadef import OpSchema #, ONNX_DOMAIN, ONNX_ML_DOMAIN
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_DOMAIN = "onnx"
ONNX_ML_DOMAIN = "onnx-ml"
if ONNX_ML:
ext = '-ml.md'
else:
ext = '.md'
def display_number(v): # type: (int) -> Text
if OpSchema.is_infinite(v):
return '∞'
return Text(v)
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_DOMAIN or domain == '' or domain == ONNX_ML_DOMAIN or domain == 'ai.onnx.ml':
return False
return True
def format_name_with_domain(domain, schema_name): # type: (Text, Text) -> Text
if domain:
return '{}.{}'.format(domain, schema_name)
else:
return schema_name
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def display_domain(domain): # type: (Text) -> Text
if domain:
return "the '{}' operator set".format(domain)
else:
return "the default ONNX operator set"
def display_domain_short(domain): # type: (Text) -> Text
if domain:
return domain
else:
return 'ai.onnx (default)'
def display_version_link(name, version): # type: (Text, int) -> Text
changelog_md = 'Changelog' + ext
name_with_ver = '{}-{}'.format(name, version)
return '<a href="{}#{}">{}</a>'.format(changelog_md, name_with_ver, name_with_ver)
def display_function_version_link(name, version): # type: (Text, int) -> Text
changelog_md = 'FunctionsChangelog' + ext
name_with_ver = '{}-{}'.format(name, version)
return '<a href="{}#{}">{}</a>'.format(changelog_md, name_with_ver, name_with_ver)
def get_attribute_value(attr): # type: (AttributeProto) -> Any
if attr.HasField('f'):
return attr.f
elif attr.HasField('i'):
return attr.i
elif attr.HasField('s'):
return attr.s
elif attr.HasField('t'):
return attr.t
elif attr.HasField('g'):
return attr.g
elif len(attr.floats):
return list(attr.floats)
elif len(attr.ints):
return list(attr.ints)
elif len(attr.strings):
return list(attr.strings)
elif len(attr.tensors):
return list(attr.tensors)
elif len(attr.graphs):
return list(attr.graphs)
else:
raise ValueError("Unsupported ONNX attribute: {}".format(attr))
def display_schema(schema, versions): # type: (OpSchema, Sequence[OpSchema]) -> Text
s = ''
# doc
if schema.doc:
s += '\n'
s += '\n'.join(' ' + line
for line in schema.doc.lstrip().splitlines())
s += '\n'
# since version
s += '\n#### Version\n'
if schema.support_level == OpSchema.SupportType.EXPERIMENTAL:
s += '\nNo versioning maintained for experimental ops.'
else:
s += '\nThis version of the operator has been ' + ('deprecated' if schema.deprecated else 'available') + ' since version {}'.format(schema.since_version)
s += ' of {}.\n'.format(display_domain(schema.domain))
if len(versions) > 1:
# TODO: link to the Changelog.md
s += '\nOther versions of this operator: {}\n'.format(
', '.join(display_version_link(format_name_with_domain(v.domain, v.name),
v.since_version) for v in versions[:-1]))
# If this schema is deprecated, don't display any of the following sections
if schema.deprecated:
return s
# attributes
if schema.attributes:
s += '\n#### Attributes\n\n'
s += '<dl>\n'
for _, attr in sorted(schema.attributes.items()):
# option holds either required or default value
opt = ''
if attr.required:
opt = 'required'
elif hasattr(attr, 'default_value') and attr.default_value.name:
default_value = get_attribute_value(attr.default_value)
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
value = np.round(value, 5)
if isinstance(value, (bytes, bytearray)) and sys.version_info[0] == 3:
value = value.decode('utf-8')
return str(value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
else:
default_value = format_value(default_value)
opt = 'default is {}'.format(default_value)
s += '<dt><tt>{}</tt> : {}{}</dt>\n'.format(
attr.name,
display_attr_type(attr.type),
' ({})'.format(opt) if opt else '')
s += '<dd>{}</dd>\n'.format(attr.description)
s += '</dl>\n'
# inputs
s += '\n#### Inputs'
if schema.min_input != schema.max_input:
s += ' ({} - {})'.format(display_number(schema.min_input),
display_number(schema.max_input))
s += '\n\n'
if schema.inputs:
s += '<dl>\n'
for input in schema.inputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == input.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(input.name, option_str, input.typeStr)
s += '<dd>{}</dd>\n'.format(input.description)
s += '</dl>\n'
# outputs
s += '\n#### Outputs'
if schema.min_output != schema.max_output:
s += ' ({} - {})'.format(display_number(schema.min_output),
display_number(schema.max_output))
s += '\n\n'
if schema.outputs:
s += '<dl>\n'
for output in schema.outputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == output.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == output.option:
if output.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(output.name, option_str, output.typeStr)
s += '<dd>{}</dd>\n'.format(output.description)
s += '</dl>\n'
# type constraints
s += '\n#### Type Constraints'
s += '\n\n'
if schema.type_constraints:
s += '<dl>\n'
for type_constraint in schema.type_constraints:
allowedTypes = type_constraint.allowed_type_strs
allowedTypeStr = ''
if (len(allowedTypes) > 0):
allowedTypeStr = allowedTypes[0]
for allowedType in allowedTypes[1:]:
allowedTypeStr += ', ' + allowedType
s += '<dt><tt>{}</tt> : {}</dt>\n'.format(
type_constraint.type_param_str, allowedTypeStr)
s += '<dd>{}</dd>\n'.format(type_constraint.description)
s += '</dl>\n'
return s
def display_function(function, versions, domain=ONNX_DOMAIN): # type: (FunctionProto, List[int], Text) -> Text
s = ''
if domain:
domain_prefix = '{}.'.format(ONNX_ML_DOMAIN)
else:
domain_prefix = ''
# doc
if function.doc_string:
s += '\n'
s += '\n'.join(' ' + line
for line in function.doc_string.lstrip().splitlines())
s += '\n'
# since version
s += '\n#### Version\n'
s += '\nThis version of the function has been available since version {}'.format(function.since_version)
s += ' of {}.\n'.format(display_domain(domain_prefix))
if len(versions) > 1:
s += '\nOther versions of this function: {}\n'.format(
', '.join(display_function_version_link(domain_prefix + function.name, v) for v in versions if v != function.since_version))
# inputs
s += '\n#### Inputs'
s += '\n\n'
if function.input:
s += '<dl>\n'
for input in function.input:
s += '<dt>{}; </dt>\n'.format(input)
s += '<br/></dl>\n'
# outputs
s += '\n#### Outputs'
s += '\n\n'
if function.output:
s += '<dl>\n'
for output in function.output:
s += '<dt>{}; </dt>\n'.format(output)
s += '<br/></dl>\n'
# attributes
if function.attribute:
s += '\n#### Attributes\n\n'
s += '<dl>\n'
for attr in function.attribute:
s += '<dt>{};<br/></dt>\n'.format(attr)
s += '</dl>\n'
return s
def support_level_str(level): # type: (OpSchema.SupportType) -> Text
return \
"<sub>experimental</sub> " if level == OpSchema.SupportType.EXPERIMENTAL else ""
# def function_status_str(status=OperatorStatus.Value("EXPERIMENTAL")): # type: ignore
# return \
# "<sub>experimental</sub> " if status == OperatorStatus.Value('EXPERIMENTAL') else "" # type: ignore
def main(args): # type: (Type[Args]) -> None
with io.open(args.output, 'w', newline='', encoding="utf-8") as fout:
fout.write('## Contrib Operator Schemas\n')
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnxruntime/core/graph/contrib_ops/contrib_defs.cc) via [this script](/onnxruntime/python/tools/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n")
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in rtpy.get_all_operator_schema():
index[schema.domain][int(schema.support_level)][schema.name].append(schema)
fout.write('\n')
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list() # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions, key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
# Table of contents
for domain, supportmap in operator_schemas:
s = '* {}\n'.format(display_domain_short(domain))
fout.write(s)
for _, namemap in supportmap:
for n, schema, versions in namemap:
s = ' * {}<a href="#{}">{}</a>\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n))
fout.write(s)
fout.write('\n')
for domain, supportmap in operator_schemas:
s = '## {}\n'.format(display_domain_short(domain))
fout.write(s)
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
# op_type
s = ('### {}<a name="{}"></a><a name="{}">**{}**' + (' (deprecated)' if schema.deprecated else '') + '</a>\n').format(
support_level_str(schema.support_level),
format_name_with_domain(domain, op_type),
format_name_with_domain(domain, op_type.lower()),
format_name_with_domain(domain, op_type))
s += display_schema(schema, versions)
s += '\n\n'
fout.write(s)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ONNX Runtime Operator Documentation Generator')
parser.add_argument('--output_path', help='output markdown file path',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ContribOperators.md')
)
args = parser.parse_args()
class Args(object):
output = args.output_path
main(Args)
| []
| []
| [
"ONNX_ML"
]
| [] | ["ONNX_ML"] | python | 1 | 0 | |
service/test/integration2_test.go | package service_test
import (
"flag"
"fmt"
"math/rand"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
protest "github.com/derekparker/delve/pkg/proc/test"
"github.com/derekparker/delve/pkg/goversion"
"github.com/derekparker/delve/service"
"github.com/derekparker/delve/service/api"
"github.com/derekparker/delve/service/rpc2"
"github.com/derekparker/delve/service/rpccommon"
)
var normalLoadConfig = api.LoadConfig{true, 1, 64, 64, -1}
var testBackend string
func TestMain(m *testing.M) {
flag.StringVar(&testBackend, "backend", "", "selects backend")
flag.Parse()
if testBackend == "" {
testBackend = os.Getenv("PROCTEST")
if testBackend == "" {
testBackend = "native"
}
}
os.Exit(protest.RunTestsWithFixtures(m))
}
func withTestClient2(name string, t *testing.T, fn func(c service.Client)) {
if testBackend == "rr" {
protest.MustHaveRecordingAllowed(t)
}
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("couldn't start listener: %s\n", err)
}
defer listener.Close()
server := rpccommon.NewServer(&service.Config{
Listener: listener,
ProcessArgs: []string{protest.BuildFixture(name, 0).Path},
Backend: testBackend,
}, false)
if err := server.Run(); err != nil {
t.Fatal(err)
}
client := rpc2.NewClient(listener.Addr().String())
defer func() {
dir, _ := client.TraceDirectory()
client.Detach(true)
if dir != "" {
protest.SafeRemoveAll(dir)
}
}()
fn(client)
}
func TestRunWithInvalidPath(t *testing.T) {
if testBackend == "rr" {
// This test won't work because rr returns an error, after recording, when
// the recording failed but also when the recording succeeded but the
// inferior returned an error. Therefore we have to ignore errors from rr.
return
}
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("couldn't start listener: %s\n", err)
}
defer listener.Close()
server := rpccommon.NewServer(&service.Config{
Listener: listener,
ProcessArgs: []string{"invalid_path"},
APIVersion: 2,
Backend: testBackend,
}, false)
if err := server.Run(); err == nil {
t.Fatal("Expected Run to return error for invalid program path")
}
}
func TestRestart_afterExit(t *testing.T) {
withTestClient2("continuetestprog", t, func(c service.Client) {
origPid := c.ProcessPid()
state := <-c.Continue()
if !state.Exited {
t.Fatal("expected initial process to have exited")
}
if _, err := c.Restart(); err != nil {
t.Fatal(err)
}
if c.ProcessPid() == origPid {
t.Fatal("did not spawn new process, has same PID")
}
state = <-c.Continue()
if !state.Exited {
t.Fatalf("expected restarted process to have exited %v", state)
}
})
}
func TestRestart_breakpointPreservation(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("continuetestprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: 1, Name: "firstbreakpoint", Tracepoint: true})
assertNoError(err, t, "CreateBreakpoint()")
stateCh := c.Continue()
state := <-stateCh
if state.CurrentThread.Breakpoint.Name != "firstbreakpoint" || !state.CurrentThread.Breakpoint.Tracepoint {
t.Fatalf("Wrong breakpoint: %#v\n", state.CurrentThread.Breakpoint)
}
state = <-stateCh
if !state.Exited {
t.Fatal("Did not exit after first tracepoint")
}
t.Log("Restart")
c.Restart()
stateCh = c.Continue()
state = <-stateCh
if state.CurrentThread.Breakpoint.Name != "firstbreakpoint" || !state.CurrentThread.Breakpoint.Tracepoint {
t.Fatalf("Wrong breakpoint (after restart): %#v\n", state.CurrentThread.Breakpoint)
}
state = <-stateCh
if !state.Exited {
t.Fatal("Did not exit after first tracepoint (after restart)")
}
})
}
func TestRestart_duringStop(t *testing.T) {
withTestClient2("continuetestprog", t, func(c service.Client) {
origPid := c.ProcessPid()
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: 1})
if err != nil {
t.Fatal(err)
}
state := <-c.Continue()
if state.CurrentThread.Breakpoint == nil {
t.Fatal("did not hit breakpoint")
}
if _, err := c.Restart(); err != nil {
t.Fatal(err)
}
if c.ProcessPid() == origPid {
t.Fatal("did not spawn new process, has same PID")
}
bps, err := c.ListBreakpoints()
if err != nil {
t.Fatal(err)
}
if len(bps) == 0 {
t.Fatal("breakpoints not preserved")
}
})
}
func TestRestart_attachPid(t *testing.T) {
// Assert it does not work and returns error.
// We cannot restart a process we did not spawn.
server := rpccommon.NewServer(&service.Config{
Listener: nil,
AttachPid: 999,
APIVersion: 2,
Backend: testBackend,
}, false)
if err := server.Restart(); err == nil {
t.Fatal("expected error on restart after attaching to pid but got none")
}
}
func TestClientServer_exit(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("continuetestprog", t, func(c service.Client) {
state, err := c.GetState()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if e, a := false, state.Exited; e != a {
t.Fatalf("Expected exited %v, got %v", e, a)
}
state = <-c.Continue()
if state.Err == nil {
t.Fatalf("Error expected after continue")
}
if !state.Exited {
t.Fatalf("Expected exit after continue: %v", state)
}
_, err = c.GetState()
if err == nil {
t.Fatal("Expected error on querying state from exited process")
}
})
}
func TestClientServer_step(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.helloworld", Line: -1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
stateBefore := <-c.Continue()
if stateBefore.Err != nil {
t.Fatalf("Unexpected error: %v", stateBefore.Err)
}
stateAfter, err := c.Step()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if before, after := stateBefore.CurrentThread.PC, stateAfter.CurrentThread.PC; before >= after {
t.Errorf("Expected %#v to be greater than %#v", before, after)
}
})
}
func TestClientServer_stepout(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.helloworld", Line: -1})
assertNoError(err, t, "CreateBreakpoint()")
stateBefore := <-c.Continue()
assertNoError(stateBefore.Err, t, "Continue()")
if stateBefore.CurrentThread.Line != 13 {
t.Fatalf("wrong line number %s:%d, expected %d", stateBefore.CurrentThread.File, stateBefore.CurrentThread.Line, 13)
}
stateAfter, err := c.StepOut()
assertNoError(err, t, "StepOut()")
if stateAfter.CurrentThread.Line != 35 {
t.Fatalf("wrong line number %s:%d, expected %d", stateAfter.CurrentThread.File, stateAfter.CurrentThread.Line, 13)
}
})
}
func testnext2(testcases []nextTest, initialLocation string, t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
bp, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: initialLocation, Line: -1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Unexpected error: %v", state.Err)
}
_, err = c.ClearBreakpoint(bp.ID)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for _, tc := range testcases {
if state.CurrentThread.Line != tc.begin {
t.Fatalf("Program not stopped at correct spot expected %d was %d", tc.begin, state.CurrentThread.Line)
}
t.Logf("Next for scenario %#v", tc)
state, err = c.Next()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if state.CurrentThread.Line != tc.end {
t.Fatalf("Program did not continue to correct next location expected %d was %d", tc.end, state.CurrentThread.Line)
}
}
})
}
func TestNextGeneral(t *testing.T) {
var testcases []nextTest
ver, _ := goversion.Parse(runtime.Version())
if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 7, -1, 0, 0, ""}) {
testcases = []nextTest{
{17, 19},
{19, 20},
{20, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 27},
{27, 28},
{28, 34},
}
} else {
testcases = []nextTest{
{17, 19},
{19, 20},
{20, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 31},
{31, 23},
{23, 24},
{24, 26},
{26, 27},
{27, 34},
}
}
testnext2(testcases, "main.testnext", t)
}
func TestNextFunctionReturn(t *testing.T) {
testcases := []nextTest{
{13, 14},
{14, 15},
{15, 35},
}
testnext2(testcases, "main.helloworld", t)
}
func TestClientServer_breakpointInMainThread(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testprog", t, func(c service.Client) {
bp, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.helloworld", Line: 1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if err != nil {
t.Fatalf("Unexpected error: %v, state: %#v", err, state)
}
pc := state.CurrentThread.PC
if pc-1 != bp.Addr && pc != bp.Addr {
f, l := state.CurrentThread.File, state.CurrentThread.Line
t.Fatalf("Break not respected:\nPC:%#v %s:%d\nFN:%#v \n", pc, f, l, bp.Addr)
}
})
}
func TestClientServer_breakpointInSeparateGoroutine(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testthreads", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.anotherthread", Line: 1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Unexpected error: %v, state: %#v", state.Err, state)
}
f, l := state.CurrentThread.File, state.CurrentThread.Line
if f != "testthreads.go" && l != 9 {
t.Fatal("Program did not hit breakpoint")
}
})
}
func TestClientServer_breakAtNonexistentPoint(t *testing.T) {
withTestClient2("testprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "nowhere", Line: 1})
if err == nil {
t.Fatal("Should not be able to break at non existent function")
}
})
}
func TestClientServer_clearBreakpoint(t *testing.T) {
withTestClient2("testprog", t, func(c service.Client) {
bp, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sleepytime", Line: 1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if e, a := 1, countBreakpoints(t, c); e != a {
t.Fatalf("Expected breakpoint count %d, got %d", e, a)
}
deleted, err := c.ClearBreakpoint(bp.ID)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if deleted.ID != bp.ID {
t.Fatalf("Expected deleted breakpoint ID %v, got %v", bp.ID, deleted.ID)
}
if e, a := 0, countBreakpoints(t, c); e != a {
t.Fatalf("Expected breakpoint count %d, got %d", e, a)
}
})
}
func TestClientServer_switchThread(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
// With invalid thread id
_, err := c.SwitchThread(-1)
if err == nil {
t.Fatal("Expected error for invalid thread id")
}
_, err = c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: 1})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Unexpected error: %v, state: %#v", state.Err, state)
}
var nt int
ct := state.CurrentThread.ID
threads, err := c.ListThreads()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for _, th := range threads {
if th.ID != ct {
nt = th.ID
break
}
}
if nt == 0 {
t.Fatal("could not find thread to switch to")
}
// With valid thread id
state, err = c.SwitchThread(nt)
if err != nil {
t.Fatal(err)
}
if state.CurrentThread.ID != nt {
t.Fatal("Did not switch threads")
}
})
}
func TestClientServer_infoLocals(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
fp := testProgPath(t, "testnextprog")
_, err := c.CreateBreakpoint(&api.Breakpoint{File: fp, Line: 23})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Unexpected error: %v, state: %#v", state.Err, state)
}
locals, err := c.ListLocalVariables(api.EvalScope{-1, 0}, normalLoadConfig)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(locals) != 3 {
t.Fatalf("Expected 3 locals, got %d %#v", len(locals), locals)
}
})
}
func TestClientServer_infoArgs(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
fp := testProgPath(t, "testnextprog")
_, err := c.CreateBreakpoint(&api.Breakpoint{File: fp, Line: 47})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Unexpected error: %v, state: %#v", state.Err, state)
}
regs, err := c.ListRegisters(0, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(regs) == 0 {
t.Fatal("Expected string showing registers values, got empty string")
}
locals, err := c.ListFunctionArgs(api.EvalScope{-1, 0}, normalLoadConfig)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(locals) != 2 {
t.Fatalf("Expected 2 function args, got %d %#v", len(locals), locals)
}
})
}
func TestClientServer_traceContinue(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("integrationprog", t, func(c service.Client) {
fp := testProgPath(t, "integrationprog")
_, err := c.CreateBreakpoint(&api.Breakpoint{File: fp, Line: 15, Tracepoint: true, Goroutine: true, Stacktrace: 5, Variables: []string{"i"}})
if err != nil {
t.Fatalf("Unexpected error: %v\n", err)
}
count := 0
contChan := c.Continue()
for state := range contChan {
if state.CurrentThread != nil && state.CurrentThread.Breakpoint != nil {
count++
t.Logf("%v", state)
bpi := state.CurrentThread.BreakpointInfo
if bpi.Goroutine == nil {
t.Fatalf("No goroutine information")
}
if len(bpi.Stacktrace) <= 0 {
t.Fatalf("No stacktrace\n")
}
if len(bpi.Variables) != 1 {
t.Fatalf("Wrong number of variables returned: %d", len(bpi.Variables))
}
if bpi.Variables[0].Name != "i" {
t.Fatalf("Wrong variable returned %s", bpi.Variables[0].Name)
}
t.Logf("Variable i is %v", bpi.Variables[0])
n, err := strconv.Atoi(bpi.Variables[0].Value)
if err != nil || n != count-1 {
t.Fatalf("Wrong variable value %q (%v %d)", bpi.Variables[0].Value, err, count)
}
}
if state.Exited {
continue
}
t.Logf("%v", state)
if state.Err != nil {
t.Fatalf("Unexpected error during continue: %v\n", state.Err)
}
}
if count != 3 {
t.Fatalf("Wrong number of continues hit: %d\n", count)
}
})
}
func TestClientServer_traceContinue2(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("integrationprog", t, func(c service.Client) {
bp1, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: 1, Tracepoint: true})
if err != nil {
t.Fatalf("Unexpected error: %v\n", err)
}
bp2, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sayhi", Line: 1, Tracepoint: true})
if err != nil {
t.Fatalf("Unexpected error: %v\n", err)
}
countMain := 0
countSayhi := 0
contChan := c.Continue()
for state := range contChan {
if state.CurrentThread != nil && state.CurrentThread.Breakpoint != nil {
switch state.CurrentThread.Breakpoint.ID {
case bp1.ID:
countMain++
case bp2.ID:
countSayhi++
}
t.Logf("%v", state)
}
if state.Exited {
continue
}
if state.Err != nil {
t.Fatalf("Unexpected error during continue: %v\n", state.Err)
}
}
if countMain != 1 {
t.Fatalf("Wrong number of continues (main.main) hit: %d\n", countMain)
}
if countSayhi != 3 {
t.Fatalf("Wrong number of continues (main.sayhi) hit: %d\n", countSayhi)
}
})
}
func TestClientServer_FindLocations(t *testing.T) {
withTestClient2("locationsprog", t, func(c service.Client) {
someFunctionCallAddr := findLocationHelper(t, c, "locationsprog.go:26", false, 1, 0)[0]
someFunctionLine1 := findLocationHelper(t, c, "locationsprog.go:27", false, 1, 0)[0]
findLocationHelper(t, c, "anotherFunction:1", false, 1, someFunctionLine1)
findLocationHelper(t, c, "main.anotherFunction:1", false, 1, someFunctionLine1)
findLocationHelper(t, c, "anotherFunction", false, 1, someFunctionCallAddr)
findLocationHelper(t, c, "main.anotherFunction", false, 1, someFunctionCallAddr)
findLocationHelper(t, c, fmt.Sprintf("*0x%x", someFunctionCallAddr), false, 1, someFunctionCallAddr)
findLocationHelper(t, c, "sprog.go:26", true, 0, 0)
findLocationHelper(t, c, "String", true, 0, 0)
findLocationHelper(t, c, "main.String", true, 0, 0)
someTypeStringFuncAddr := findLocationHelper(t, c, "locationsprog.go:14", false, 1, 0)[0]
otherTypeStringFuncAddr := findLocationHelper(t, c, "locationsprog.go:18", false, 1, 0)[0]
findLocationHelper(t, c, "SomeType.String", false, 1, someTypeStringFuncAddr)
findLocationHelper(t, c, "(*SomeType).String", false, 1, someTypeStringFuncAddr)
findLocationHelper(t, c, "main.SomeType.String", false, 1, someTypeStringFuncAddr)
findLocationHelper(t, c, "main.(*SomeType).String", false, 1, someTypeStringFuncAddr)
// Issue #275
readfile := findLocationHelper(t, c, "io/ioutil.ReadFile", false, 1, 0)[0]
// Issue #296
findLocationHelper(t, c, "/io/ioutil.ReadFile", false, 1, readfile)
findLocationHelper(t, c, "ioutil.ReadFile", false, 1, readfile)
stringAddrs := findLocationHelper(t, c, "/^main.*Type.*String$/", false, 2, 0)
if otherTypeStringFuncAddr != stringAddrs[0] && otherTypeStringFuncAddr != stringAddrs[1] {
t.Fatalf("Wrong locations returned for \"/.*Type.*String/\", got: %v expected: %v and %v\n", stringAddrs, someTypeStringFuncAddr, otherTypeStringFuncAddr)
}
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: 4, Tracepoint: false})
if err != nil {
t.Fatalf("CreateBreakpoint(): %v\n", err)
}
<-c.Continue()
locationsprog35Addr := findLocationHelper(t, c, "locationsprog.go:35", false, 1, 0)[0]
findLocationHelper(t, c, fmt.Sprintf("%s:35", testProgPath(t, "locationsprog")), false, 1, locationsprog35Addr)
findLocationHelper(t, c, "+1", false, 1, locationsprog35Addr)
findLocationHelper(t, c, "35", false, 1, locationsprog35Addr)
findLocationHelper(t, c, "-1", false, 1, findLocationHelper(t, c, "locationsprog.go:33", false, 1, 0)[0])
findLocationHelper(t, c, `*amap["k"]`, false, 1, findLocationHelper(t, c, `amap["k"]`, false, 1, 0)[0])
})
withTestClient2("testnextdefer", t, func(c service.Client) {
firstMainLine := findLocationHelper(t, c, "testnextdefer.go:5", false, 1, 0)[0]
findLocationHelper(t, c, "main.main", false, 1, firstMainLine)
})
withTestClient2("stacktraceprog", t, func(c service.Client) {
stacktracemeAddr := findLocationHelper(t, c, "stacktraceprog.go:4", false, 1, 0)[0]
findLocationHelper(t, c, "main.stacktraceme", false, 1, stacktracemeAddr)
})
withTestClient2("locationsUpperCase", t, func(c service.Client) {
// Upper case
findLocationHelper(t, c, "locationsUpperCase.go:6", false, 1, 0)
// Fully qualified path
path := protest.Fixtures["locationsUpperCase"].Source
findLocationHelper(t, c, path+":6", false, 1, 0)
bp, err := c.CreateBreakpoint(&api.Breakpoint{File: path, Line: 6})
if err != nil {
t.Fatalf("Could not set breakpoint in %s: %v\n", path, err)
}
c.ClearBreakpoint(bp.ID)
// Allow `/` or `\` on Windows
if runtime.GOOS == "windows" {
findLocationHelper(t, c, filepath.FromSlash(path)+":6", false, 1, 0)
bp, err = c.CreateBreakpoint(&api.Breakpoint{File: filepath.FromSlash(path), Line: 6})
if err != nil {
t.Fatalf("Could not set breakpoint in %s: %v\n", filepath.FromSlash(path), err)
}
c.ClearBreakpoint(bp.ID)
}
// Case-insensitive on Windows, case-sensitive otherwise
shouldWrongCaseBeError := true
numExpectedMatches := 0
if runtime.GOOS == "windows" {
shouldWrongCaseBeError = false
numExpectedMatches = 1
}
findLocationHelper(t, c, strings.ToLower(path)+":6", shouldWrongCaseBeError, numExpectedMatches, 0)
bp, err = c.CreateBreakpoint(&api.Breakpoint{File: strings.ToLower(path), Line: 6})
if (err == nil) == shouldWrongCaseBeError {
t.Fatalf("Could not set breakpoint in %s: %v\n", strings.ToLower(path), err)
}
c.ClearBreakpoint(bp.ID)
})
}
func TestClientServer_FindLocationsAddr(t *testing.T) {
withTestClient2("locationsprog2", t, func(c service.Client) {
<-c.Continue()
afunction := findLocationHelper(t, c, "main.afunction", false, 1, 0)[0]
anonfunc := findLocationHelper(t, c, "main.main.func1", false, 1, 0)[0]
findLocationHelper(t, c, "*fn1", false, 1, afunction)
findLocationHelper(t, c, "*fn3", false, 1, anonfunc)
})
}
func TestClientServer_FindLocationsExactMatch(t *testing.T) {
// if an expression matches multiple functions but one of them is an exact
// match it should be used anyway.
// In this example "math/rand.Intn" would normally match "math/rand.Intn"
// and "math/rand.(*Rand).Intn" but since the first match is exact it
// should be prioritized.
withTestClient2("locationsprog3", t, func(c service.Client) {
<-c.Continue()
findLocationHelper(t, c, "math/rand.Intn", false, 1, 0)
})
}
func TestClientServer_EvalVariable(t *testing.T) {
withTestClient2("testvariables", t, func(c service.Client) {
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Continue(): %v\n", state.Err)
}
var1, err := c.EvalVariable(api.EvalScope{-1, 0}, "a1", normalLoadConfig)
assertNoError(err, t, "EvalVariable")
t.Logf("var1: %s", var1.SinglelineString())
if var1.Value != "foofoofoofoofoofoo" {
t.Fatalf("Wrong variable value: %s", var1.Value)
}
})
}
func TestClientServer_SetVariable(t *testing.T) {
withTestClient2("testvariables", t, func(c service.Client) {
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Continue(): %v\n", state.Err)
}
assertNoError(c.SetVariable(api.EvalScope{-1, 0}, "a2", "8"), t, "SetVariable()")
a2, err := c.EvalVariable(api.EvalScope{-1, 0}, "a2", normalLoadConfig)
if err != nil {
t.Fatalf("Could not evaluate variable: %v", err)
}
t.Logf("a2: %v", a2)
n, err := strconv.Atoi(a2.Value)
if err != nil && n != 8 {
t.Fatalf("Wrong variable value: %v", a2)
}
})
}
func TestClientServer_FullStacktrace(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("goroutinestackprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.stacktraceme", Line: -1})
assertNoError(err, t, "CreateBreakpoint()")
state := <-c.Continue()
if state.Err != nil {
t.Fatalf("Continue(): %v\n", state.Err)
}
gs, err := c.ListGoroutines()
assertNoError(err, t, "GoroutinesInfo()")
found := make([]bool, 10)
for _, g := range gs {
frames, err := c.Stacktrace(g.ID, 10, &normalLoadConfig)
assertNoError(err, t, fmt.Sprintf("Stacktrace(%d)", g.ID))
for i, frame := range frames {
if frame.Function == nil {
continue
}
if frame.Function.Name != "main.agoroutine" {
continue
}
t.Logf("frame %d: %v", i, frame)
for _, arg := range frame.Arguments {
if arg.Name != "i" {
continue
}
t.Logf("frame %v, variable i is %v\n", frame, arg)
argn, err := strconv.Atoi(arg.Value)
if err == nil {
found[argn] = true
}
}
}
}
for i := range found {
if !found[i] {
t.Fatalf("Goroutine %d not found", i)
}
}
state = <-c.Continue()
if state.Err != nil {
t.Fatalf("Continue(): %v\n", state.Err)
}
frames, err := c.Stacktrace(-1, 10, &normalLoadConfig)
assertNoError(err, t, "Stacktrace")
cur := 3
for i, frame := range frames {
if i == 0 {
continue
}
t.Logf("frame %d: %v", i, frame)
v := frame.Var("n")
if v == nil {
t.Fatalf("Could not find value of variable n in frame %d", i)
}
vn, err := strconv.Atoi(v.Value)
if err != nil || vn != cur {
t.Fatalf("Expected value %d got %d (error: %v)", cur, vn, err)
}
cur--
if cur < 0 {
break
}
}
})
}
func TestIssue355(t *testing.T) {
// After the target process has terminated should return an error but not crash
protest.AllowRecording(t)
withTestClient2("continuetestprog", t, func(c service.Client) {
bp, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sayhi", Line: -1})
assertNoError(err, t, "CreateBreakpoint()")
ch := c.Continue()
state := <-ch
tid := state.CurrentThread.ID
gid := state.SelectedGoroutine.ID
assertNoError(state.Err, t, "First Continue()")
ch = c.Continue()
state = <-ch
if !state.Exited {
t.Fatalf("Target did not terminate after second continue")
}
ch = c.Continue()
state = <-ch
assertError(state.Err, t, "Continue()")
_, err = c.Next()
assertError(err, t, "Next()")
_, err = c.Step()
assertError(err, t, "Step()")
_, err = c.StepInstruction()
assertError(err, t, "StepInstruction()")
_, err = c.SwitchThread(tid)
assertError(err, t, "SwitchThread()")
_, err = c.SwitchGoroutine(gid)
assertError(err, t, "SwitchGoroutine()")
_, err = c.Halt()
assertError(err, t, "Halt()")
_, err = c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: -1})
if testBackend != "rr" {
assertError(err, t, "CreateBreakpoint()")
}
_, err = c.ClearBreakpoint(bp.ID)
if testBackend != "rr" {
assertError(err, t, "ClearBreakpoint()")
}
_, err = c.ListThreads()
assertError(err, t, "ListThreads()")
_, err = c.GetThread(tid)
assertError(err, t, "GetThread()")
assertError(c.SetVariable(api.EvalScope{gid, 0}, "a", "10"), t, "SetVariable()")
_, err = c.ListLocalVariables(api.EvalScope{gid, 0}, normalLoadConfig)
assertError(err, t, "ListLocalVariables()")
_, err = c.ListFunctionArgs(api.EvalScope{gid, 0}, normalLoadConfig)
assertError(err, t, "ListFunctionArgs()")
_, err = c.ListRegisters(0, false)
assertError(err, t, "ListRegisters()")
_, err = c.ListGoroutines()
assertError(err, t, "ListGoroutines()")
_, err = c.Stacktrace(gid, 10, &normalLoadConfig)
assertError(err, t, "Stacktrace()")
_, err = c.FindLocation(api.EvalScope{gid, 0}, "+1")
assertError(err, t, "FindLocation()")
_, err = c.DisassemblePC(api.EvalScope{-1, 0}, 0x40100, api.IntelFlavour)
assertError(err, t, "DisassemblePC()")
})
}
func TestDisasm(t *testing.T) {
// Tests that disassembling by PC, range, and current PC all yeld similar results
// Tests that disassembly by current PC will return a disassembly containing the instruction at PC
// Tests that stepping on a calculated CALL instruction will yield a disassembly that contains the
// effective destination of the CALL instruction
withTestClient2("locationsprog2", t, func(c service.Client) {
ch := c.Continue()
state := <-ch
assertNoError(state.Err, t, "Continue()")
locs, err := c.FindLocation(api.EvalScope{-1, 0}, "main.main")
assertNoError(err, t, "FindLocation()")
if len(locs) != 1 {
t.Fatalf("wrong number of locations for main.main: %d", len(locs))
}
d1, err := c.DisassemblePC(api.EvalScope{-1, 0}, locs[0].PC, api.IntelFlavour)
assertNoError(err, t, "DisassemblePC()")
if len(d1) < 2 {
t.Fatalf("wrong size of disassembly: %d", len(d1))
}
pcstart := d1[0].Loc.PC
pcend := d1[len(d1)-1].Loc.PC + uint64(len(d1[len(d1)-1].Bytes))
d2, err := c.DisassembleRange(api.EvalScope{-1, 0}, pcstart, pcend, api.IntelFlavour)
assertNoError(err, t, "DisassembleRange()")
if len(d1) != len(d2) {
t.Logf("d1: %v", d1)
t.Logf("d2: %v", d2)
t.Fatal("mismatched length between disassemble pc and disassemble range")
}
d3, err := c.DisassemblePC(api.EvalScope{-1, 0}, state.CurrentThread.PC, api.IntelFlavour)
assertNoError(err, t, "DisassemblePC() - second call")
if len(d1) != len(d3) {
t.Logf("d1: %v", d1)
t.Logf("d3: %v", d3)
t.Fatal("mismatched length between the two calls of disassemble pc")
}
// look for static call to afunction() on line 29
found := false
for i := range d3 {
if d3[i].Loc.Line == 29 && strings.HasPrefix(d3[i].Text, "call") && d3[i].DestLoc != nil && d3[i].DestLoc.Function != nil && d3[i].DestLoc.Function.Name == "main.afunction" {
found = true
break
}
}
if !found {
t.Fatal("Could not find call to main.afunction on line 29")
}
haspc := false
for i := range d3 {
if d3[i].AtPC {
haspc = true
break
}
}
if !haspc {
t.Logf("d3: %v", d3)
t.Fatal("PC instruction not found")
}
startinstr := getCurinstr(d3)
count := 0
for {
if count > 20 {
t.Fatal("too many step instructions executed without finding a call instruction")
}
state, err := c.StepInstruction()
assertNoError(err, t, fmt.Sprintf("StepInstruction() %d", count))
d3, err = c.DisassemblePC(api.EvalScope{-1, 0}, state.CurrentThread.PC, api.IntelFlavour)
assertNoError(err, t, fmt.Sprintf("StepInstruction() %d", count))
curinstr := getCurinstr(d3)
if curinstr == nil {
t.Fatalf("Could not find current instruction %d", count)
}
if curinstr.Loc.Line != startinstr.Loc.Line {
t.Fatal("Calling StepInstruction() repeatedly did not find the call instruction")
}
if strings.HasPrefix(curinstr.Text, "call") {
t.Logf("call: %v", curinstr)
if curinstr.DestLoc == nil || curinstr.DestLoc.Function == nil {
t.Fatalf("Call instruction does not have destination: %v", curinstr)
}
if curinstr.DestLoc.Function.Name != "main.afunction" {
t.Fatalf("Call instruction destination not main.afunction: %v", curinstr)
}
break
}
count++
}
})
}
func TestNegativeStackDepthBug(t *testing.T) {
// After the target process has terminated should return an error but not crash
protest.AllowRecording(t)
withTestClient2("continuetestprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sayhi", Line: -1})
assertNoError(err, t, "CreateBreakpoint()")
ch := c.Continue()
state := <-ch
assertNoError(state.Err, t, "Continue()")
_, err = c.Stacktrace(-1, -2, &normalLoadConfig)
assertError(err, t, "Stacktrace()")
})
}
func TestClientServer_CondBreakpoint(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("parallel_next", t, func(c service.Client) {
bp, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sayhi", Line: 1})
assertNoError(err, t, "CreateBreakpoint()")
bp.Cond = "n == 7"
assertNoError(c.AmendBreakpoint(bp), t, "AmendBreakpoint() 1")
bp, err = c.GetBreakpoint(bp.ID)
assertNoError(err, t, "GetBreakpoint() 1")
bp.Variables = append(bp.Variables, "n")
assertNoError(c.AmendBreakpoint(bp), t, "AmendBreakpoint() 2")
bp, err = c.GetBreakpoint(bp.ID)
assertNoError(err, t, "GetBreakpoint() 2")
if bp.Cond == "" {
t.Fatalf("No condition set on breakpoint %#v", bp)
}
if len(bp.Variables) != 1 {
t.Fatalf("Wrong number of expressions to evaluate on breakpoint %#v", bp)
}
state := <-c.Continue()
assertNoError(state.Err, t, "Continue()")
nvar, err := c.EvalVariable(api.EvalScope{-1, 0}, "n", normalLoadConfig)
assertNoError(err, t, "EvalVariable()")
if nvar.SinglelineString() != "7" {
t.Fatalf("Stopped on wrong goroutine %s\n", nvar.Value)
}
})
}
func TestSkipPrologue(t *testing.T) {
withTestClient2("locationsprog2", t, func(c service.Client) {
<-c.Continue()
afunction := findLocationHelper(t, c, "main.afunction", false, 1, 0)[0]
findLocationHelper(t, c, "*fn1", false, 1, afunction)
findLocationHelper(t, c, "locationsprog2.go:8", false, 1, afunction)
afunction0 := findLocationHelper(t, c, "main.afunction:0", false, 1, 0)[0]
if afunction == afunction0 {
t.Fatal("Skip prologue failed")
}
})
}
func TestSkipPrologue2(t *testing.T) {
withTestClient2("callme", t, func(c service.Client) {
callme := findLocationHelper(t, c, "main.callme", false, 1, 0)[0]
callmeZ := findLocationHelper(t, c, "main.callme:0", false, 1, 0)[0]
findLocationHelper(t, c, "callme.go:5", false, 1, callme)
if callme == callmeZ {
t.Fatal("Skip prologue failed")
}
callme2 := findLocationHelper(t, c, "main.callme2", false, 1, 0)[0]
callme2Z := findLocationHelper(t, c, "main.callme2:0", false, 1, 0)[0]
findLocationHelper(t, c, "callme.go:12", false, 1, callme2)
if callme2 == callme2Z {
t.Fatal("Skip prologue failed")
}
callme3 := findLocationHelper(t, c, "main.callme3", false, 1, 0)[0]
callme3Z := findLocationHelper(t, c, "main.callme3:0", false, 1, 0)[0]
ver, _ := goversion.Parse(runtime.Version())
if ver.Major < 0 || ver.AfterOrEqual(goversion.GoVer18Beta) {
findLocationHelper(t, c, "callme.go:19", false, 1, callme3)
} else {
// callme3 does not have local variables therefore the first line of the
// function is immediately after the prologue
// This is only true before 1.8 where frame pointer chaining introduced a
// bit of prologue even for functions without local variables
findLocationHelper(t, c, "callme.go:19", false, 1, callme3Z)
}
if callme3 == callme3Z {
t.Fatal("Skip prologue failed")
}
})
}
func TestIssue419(t *testing.T) {
// Calling service/rpc.(*Client).Halt could cause a crash because both Halt and Continue simultaneously
// try to read 'runtime.g' and debug/dwarf.Data.Type is not thread safe
withTestClient2("issue419", t, func(c service.Client) {
go func() {
rand.Seed(time.Now().Unix())
d := time.Duration(rand.Intn(4) + 1)
time.Sleep(d * time.Second)
_, err := c.Halt()
assertNoError(err, t, "RequestManualStop()")
}()
statech := c.Continue()
state := <-statech
assertNoError(state.Err, t, "Continue()")
})
}
func TestTypesCommand(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testvariables2", t, func(c service.Client) {
state := <-c.Continue()
assertNoError(state.Err, t, "Continue()")
types, err := c.ListTypes("")
assertNoError(err, t, "ListTypes()")
found := false
for i := range types {
if types[i] == "main.astruct" {
found = true
break
}
}
if !found {
t.Fatal("Type astruct not found in ListTypes output")
}
types, err = c.ListTypes("^main.astruct$")
assertNoError(err, t, "ListTypes(\"main.astruct\")")
if len(types) != 1 {
t.Fatalf("ListTypes(\"^main.astruct$\") did not filter properly, expected 1 got %d: %v", len(types), types)
}
})
}
func TestIssue406(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("issue406", t, func(c service.Client) {
locs, err := c.FindLocation(api.EvalScope{-1, 0}, "issue406.go:146")
assertNoError(err, t, "FindLocation()")
_, err = c.CreateBreakpoint(&api.Breakpoint{Addr: locs[0].PC})
assertNoError(err, t, "CreateBreakpoint()")
ch := c.Continue()
state := <-ch
assertNoError(state.Err, t, "Continue()")
v, err := c.EvalVariable(api.EvalScope{-1, 0}, "cfgtree", normalLoadConfig)
assertNoError(err, t, "EvalVariable()")
vs := v.MultilineString("")
t.Logf("cfgtree formats to: %s\n", vs)
})
}
func TestEvalExprName(t *testing.T) {
withTestClient2("testvariables2", t, func(c service.Client) {
state := <-c.Continue()
assertNoError(state.Err, t, "Continue()")
var1, err := c.EvalVariable(api.EvalScope{-1, 0}, "i1+1", normalLoadConfig)
assertNoError(err, t, "EvalVariable")
const name = "i1+1"
t.Logf("i1+1 → %#v", var1)
if var1.Name != name {
t.Fatalf("Wrong variable name %q, expected %q", var1.Name, name)
}
})
}
func TestClientServer_Issue528(t *testing.T) {
// FindLocation with Receiver.MethodName syntax does not work
// on remote package names due to a bug in debug/gosym that
// Was fixed in go 1.7 // Commit that fixes the issue in go:
// f744717d1924340b8f5e5a385e99078693ad9097
ver, _ := goversion.Parse(runtime.Version())
if ver.Major > 0 && !ver.AfterOrEqual(goversion.GoVersion{1, 7, -1, 0, 0, ""}) {
t.Log("Test skipped")
return
}
withTestClient2("issue528", t, func(c service.Client) {
findLocationHelper(t, c, "State.Close", false, 1, 0)
})
}
func TestClientServer_FpRegisters(t *testing.T) {
regtests := []struct{ name, value string }{
{"ST(0)", "0x3fffe666660000000000"},
{"ST(1)", "0x3fffd9999a0000000000"},
{"ST(2)", "0x3fffcccccd0000000000"},
{"ST(3)", "0x3fffc000000000000000"},
{"ST(4)", "0x3fffb333333333333000"},
{"ST(5)", "0x3fffa666666666666800"},
{"ST(6)", "0x3fff9999999999999800"},
{"ST(7)", "0x3fff8cccccccccccd000"},
{"XMM0", "0x3ff33333333333333ff199999999999a v2_int={ 3ff199999999999a 3ff3333333333333 } v4_int={ 9999999a 3ff19999 33333333 3ff33333 } v8_int={ 999a 9999 9999 3ff1 3333 3333 3333 3ff3 } v16_int={ 9a 99 99 99 99 99 f1 3f 33 33 33 33 33 33 f3 3f }"},
{"XMM1", "0x3ff66666666666663ff4cccccccccccd"},
{"XMM2", "0x3fe666663fd9999a3fcccccd3fc00000"},
{"XMM3", "0x3ff199999999999a3ff3333333333333"},
{"XMM4", "0x3ff4cccccccccccd3ff6666666666666"},
{"XMM5", "0x3fcccccd3fc000003fe666663fd9999a"},
{"XMM6", "0x4004cccccccccccc4003333333333334"},
{"XMM7", "0x40026666666666664002666666666666"},
{"XMM8", "0x4059999a404ccccd4059999a404ccccd"},
}
protest.AllowRecording(t)
withTestClient2("fputest/", t, func(c service.Client) {
<-c.Continue()
regs, err := c.ListRegisters(0, true)
assertNoError(err, t, "ListRegisters()")
t.Logf("%s", regs.String())
for _, regtest := range regtests {
found := false
for _, reg := range regs {
if reg.Name == regtest.name {
found = true
if !strings.HasPrefix(reg.Value, regtest.value) {
t.Fatalf("register %s expected %q got %q", reg.Name, regtest.value, reg.Value)
}
}
}
if !found {
t.Fatalf("register %s not found: %v", regtest.name, regs)
}
}
})
}
func TestClientServer_RestartBreakpointPosition(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("locationsprog2", t, func(c service.Client) {
bpBefore, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.afunction", Line: -1, Tracepoint: true, Name: "this"})
addrBefore := bpBefore.Addr
t.Logf("%x\n", bpBefore.Addr)
assertNoError(err, t, "CreateBreakpoint")
stateCh := c.Continue()
for range stateCh {
}
_, err = c.Halt()
assertNoError(err, t, "Halt")
_, err = c.Restart()
assertNoError(err, t, "Restart")
bps, err := c.ListBreakpoints()
assertNoError(err, t, "ListBreakpoints")
for _, bp := range bps {
if bp.Name == bpBefore.Name {
if bp.Addr != addrBefore {
t.Fatalf("Address changed after restart: %x %x", bp.Addr, addrBefore)
}
t.Logf("%x %x\n", bp.Addr, addrBefore)
}
}
})
}
func TestClientServer_SelectedGoroutineLoc(t *testing.T) {
// CurrentLocation of SelectedGoroutine should reflect what's happening on
// the thread running the goroutine, not the position the goroutine was in
// the last time it was parked.
protest.AllowRecording(t)
withTestClient2("testprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: -11})
assertNoError(err, t, "CreateBreakpoint")
s := <-c.Continue()
assertNoError(s.Err, t, "Continue")
gloc := s.SelectedGoroutine.CurrentLoc
if gloc.PC != s.CurrentThread.PC {
t.Errorf("mismatched PC %#x %#x", gloc.PC, s.CurrentThread.PC)
}
if gloc.File != s.CurrentThread.File || gloc.Line != s.CurrentThread.Line {
t.Errorf("mismatched file:lineno: %s:%d %s:%d", gloc.File, gloc.Line, s.CurrentThread.File, s.CurrentThread.Line)
}
})
}
func TestClientServer_ReverseContinue(t *testing.T) {
protest.AllowRecording(t)
if testBackend != "rr" {
t.Skip("backend is not rr")
}
withTestClient2("continuetestprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.main", Line: -1})
assertNoError(err, t, "CreateBreakpoint(main.main)")
_, err = c.CreateBreakpoint(&api.Breakpoint{FunctionName: "main.sayhi", Line: -1})
assertNoError(err, t, "CreateBreakpoint(main.sayhi)")
state := <-c.Continue()
assertNoError(state.Err, t, "first continue")
mainPC := state.CurrentThread.PC
t.Logf("after first continue %#x", mainPC)
state = <-c.Continue()
assertNoError(state.Err, t, "second continue")
sayhiPC := state.CurrentThread.PC
t.Logf("after second continue %#x", sayhiPC)
if mainPC == sayhiPC {
t.Fatalf("expected different PC after second PC (%#x)", mainPC)
}
state = <-c.Rewind()
assertNoError(state.Err, t, "rewind")
if mainPC != state.CurrentThread.PC {
t.Fatalf("Expected rewind to go back to the first breakpoint: %#x", state.CurrentThread.PC)
}
})
}
func TestClientServer_collectBreakpointInfoOnNext(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{
Addr: findLocationHelper(t, c, "testnextprog.go:23", false, 1, 0)[0],
Variables: []string{"j"},
LoadLocals: &normalLoadConfig})
assertNoError(err, t, "CreateBreakpoint()")
_, err = c.CreateBreakpoint(&api.Breakpoint{
Addr: findLocationHelper(t, c, "testnextprog.go:24", false, 1, 0)[0],
Variables: []string{"j"},
LoadLocals: &normalLoadConfig})
assertNoError(err, t, "CreateBreakpoint()")
stateBefore := <-c.Continue()
assertNoError(stateBefore.Err, t, "Continue()")
if stateBefore.CurrentThread.Line != 23 {
t.Fatalf("wrong line number %s:%d, expected %d", stateBefore.CurrentThread.File, stateBefore.CurrentThread.Line, 23)
}
if bi := stateBefore.CurrentThread.BreakpointInfo; bi == nil || len(bi.Variables) != 1 {
t.Fatalf("bad breakpoint info %v", bi)
}
stateAfter, err := c.Next()
assertNoError(err, t, "Next()")
if stateAfter.CurrentThread.Line != 24 {
t.Fatalf("wrong line number %s:%d, expected %d", stateAfter.CurrentThread.File, stateAfter.CurrentThread.Line, 24)
}
if bi := stateAfter.CurrentThread.BreakpointInfo; bi == nil || len(bi.Variables) != 1 {
t.Fatalf("bad breakpoint info %v", bi)
}
})
}
func TestClientServer_collectBreakpointInfoError(t *testing.T) {
protest.AllowRecording(t)
withTestClient2("testnextprog", t, func(c service.Client) {
_, err := c.CreateBreakpoint(&api.Breakpoint{
Addr: findLocationHelper(t, c, "testnextprog.go:23", false, 1, 0)[0],
Variables: []string{"nonexistentvariable", "j"},
LoadLocals: &normalLoadConfig})
assertNoError(err, t, "CreateBreakpoint()")
state := <-c.Continue()
assertNoError(state.Err, t, "Continue()")
})
}
| [
"\"PROCTEST\""
]
| []
| [
"PROCTEST"
]
| [] | ["PROCTEST"] | go | 1 | 0 | |
molecule/test/scenarios/driver/podman/molecule/multi-node/tests/test_bar.py | """Testinfra tests."""
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("bar")
def test_hostname(host):
"""Validate hostname."""
assert re.search(r"instance-1.*", host.check_output("hostname -s"))
def test_etc_molecule_directory(host):
"""Validate molecule directory."""
f = host.file("/etc/molecule")
assert f.is_directory
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
"""Validate molecule file."""
f = host.file("/etc/molecule/{}".format(host.check_output("hostname -s")))
assert f.is_file
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o644
| []
| []
| [
"MOLECULE_INVENTORY_FILE"
]
| [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
.github/scripts/get_release_version.py | # ------------------------------------------------------------
# Copyright (c) Microsoft Corporation and Dapr Contributors.
# Licensed under the MIT License.
# ------------------------------------------------------------
# This script parses release version from Git tag and set the parsed version to
# environment variable, REL_VERSION.
import os
import sys
gitRef = os.getenv("GITHUB_REF")
tagRefPrefix = "refs/tags/v"
with open(os.getenv("GITHUB_ENV"), "a") as githubEnv:
if gitRef is None or not gitRef.startswith(tagRefPrefix):
githubEnv.write("REL_VERSION=edge\n")
print ("This is daily build from {}...".format(gitRef))
sys.exit(0)
releaseVersion = gitRef[len(tagRefPrefix):]
releaseNotePath="docs/release_notes/v{}.md".format(releaseVersion)
if gitRef.find("-rc.") > 0:
print ("Release Candidate build from {}...".format(gitRef))
else:
# Set LATEST_RELEASE to true
githubEnv.write("LATEST_RELEASE=true\n")
print ("Release build from {}...".format(gitRef))
githubEnv.write("REL_VERSION={}\n".format(releaseVersion))
| []
| []
| [
"GITHUB_REF",
"GITHUB_ENV"
]
| [] | ["GITHUB_REF", "GITHUB_ENV"] | python | 2 | 0 | |
tests/common/impala_test_suite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The base class that should be used for almost all Impala tests
import glob
import grp
import json
import logging
import os
import pprint
import pwd
import pytest
import re
import requests
import socket
import subprocess
import time
from functools import wraps
from getpass import getuser
from random import choice
from subprocess import check_call
from tests.common.base_test_suite import BaseTestSuite
from tests.common.environ import (
HIVE_MAJOR_VERSION,
MANAGED_WAREHOUSE_DIR,
EXTERNAL_WAREHOUSE_DIR)
from tests.common.errors import Timeout
from tests.common.impala_connection import create_connection
from tests.common.impala_service import ImpaladService
from tests.common.test_dimensions import (
ALL_BATCH_SIZES,
ALL_CLUSTER_SIZES,
ALL_DISABLE_CODEGEN_OPTIONS,
ALL_NODES_ONLY,
TableFormatInfo,
create_exec_option_dimension,
get_dataset_from_workload,
load_table_info_dimension)
from tests.common.test_result_verifier import (
try_compile_regex,
verify_lineage,
verify_raw_results,
verify_runtime_profile)
from tests.common.test_vector import ImpalaTestDimension
from tests.performance.query import Query
from tests.performance.query_exec_functions import execute_using_jdbc
from tests.performance.query_executor import JdbcQueryExecConfig
from tests.util.filesystem_utils import (
IS_S3,
IS_ABFS,
IS_ADLS,
IS_GCS,
IS_HDFS,
S3_BUCKET_NAME,
S3GUARD_ENABLED,
ADLS_STORE_NAME,
FILESYSTEM_PREFIX,
FILESYSTEM_NAME)
from tests.util.hdfs_util import (
HdfsConfig,
get_webhdfs_client,
get_webhdfs_client_from_conf,
NAMENODE,
DelegatingHdfsClient,
HadoopFsCommandLineClient)
from tests.util.test_file_parser import (
QueryTestSectionReader,
parse_query_test_file,
write_test_file)
from tests.util.thrift_util import create_transport
# Imports required for Hive Metastore Client
from hive_metastore import ThriftHiveMetastore
from thrift.protocol import TBinaryProtocol
# Initializing the logger before conditional imports, since we will need it
# for them.
LOG = logging.getLogger('impala_test_suite')
# The ADLS python client isn't downloaded when ADLS isn't the target FS, so do a
# conditional import.
if IS_ADLS:
try:
from tests.util.adls_util import ADLSClient
except ImportError:
LOG.error("Need the ADLSClient for testing with ADLS")
IMPALAD_HOST_PORT_LIST = pytest.config.option.impalad.split(',')
assert len(IMPALAD_HOST_PORT_LIST) > 0, 'Must specify at least 1 impalad to target'
IMPALAD = IMPALAD_HOST_PORT_LIST[0]
IMPALAD_HOSTNAME = IMPALAD.split(':')[0]
IMPALAD_BEESWAX_HOST_PORT = IMPALAD_HOST_PORT_LIST[0]
IMPALAD_HS2_HOST_PORT =\
IMPALAD_HOSTNAME + ":" + pytest.config.option.impalad_hs2_port
IMPALAD_HS2_HTTP_HOST_PORT =\
IMPALAD_HOSTNAME + ":" + pytest.config.option.impalad_hs2_http_port
HIVE_HS2_HOST_PORT = pytest.config.option.hive_server2
WORKLOAD_DIR = os.environ['IMPALA_WORKLOAD_DIR']
HDFS_CONF = HdfsConfig(pytest.config.option.minicluster_xml_conf)
TARGET_FILESYSTEM = os.getenv("TARGET_FILESYSTEM") or "hdfs"
IMPALA_HOME = os.getenv("IMPALA_HOME")
INTERNAL_LISTEN_HOST = os.getenv("INTERNAL_LISTEN_HOST")
# Some tests use the IP instead of the host.
INTERNAL_LISTEN_IP = socket.gethostbyname_ex(INTERNAL_LISTEN_HOST)[2][0]
EE_TEST_LOGS_DIR = os.getenv("IMPALA_EE_TEST_LOGS_DIR")
# Match any SET statement. Assume that query options' names
# only contain alphabets, underscores and digits after position 1.
# The statement may include SQL line comments starting with --, which we need to
# strip out. The test file parser already strips out comments starting with #.
COMMENT_LINES_REGEX = r'(?:\s*--.*\n)*'
SET_PATTERN = re.compile(
COMMENT_LINES_REGEX + r'\s*set\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*=*', re.I)
METRICS_URL = 'http://{0}:25000/metrics?json'.format(IMPALAD_HOSTNAME)
VARZ_URL = 'http://{0}:25000/varz?json'.format(IMPALAD_HOSTNAME)
GROUP_NAME = grp.getgrgid(pwd.getpwnam(getuser()).pw_gid).gr_name
# Base class for Impala tests. All impala test cases should inherit from this class
class ImpalaTestSuite(BaseTestSuite):
@classmethod
def add_test_dimensions(cls):
"""
A hook for adding additional dimensions.
By default load the table_info and exec_option dimensions, but if a test wants to
add more dimensions or different dimensions they can override this function.
"""
super(ImpalaTestSuite, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
cls.create_table_info_dimension(cls.exploration_strategy()))
cls.ImpalaTestMatrix.add_dimension(cls.__create_exec_option_dimension())
# Execute tests through Beeswax by default. Individual tests that have been converted
# to work with the HS2 client can add HS2 in addition to or instead of beeswax.
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('protocol', 'beeswax'))
@classmethod
def setup_class(cls):
"""Setup section that runs before each test suite"""
cls.client = None
cls.hive_client = None
cls.hs2_client = None
cls.hs2_http_client = None
# Create a Hive Metastore Client (used for executing some test SETUP steps
metastore_host, metastore_port = pytest.config.option.metastore_server.split(':')
trans_type = 'buffered'
if pytest.config.option.use_kerberos:
trans_type = 'kerberos'
cls.hive_transport = create_transport(
host=metastore_host,
port=metastore_port,
service=pytest.config.option.hive_service_name,
transport_type=trans_type)
protocol = TBinaryProtocol.TBinaryProtocol(cls.hive_transport)
cls.hive_client = ThriftHiveMetastore.Client(protocol)
cls.hive_transport.open()
cls.create_impala_clients()
# Default query options are populated on demand.
cls.default_query_options = {}
cls.impalad_test_service = cls.create_impala_service()
# There are multiple clients for interacting with the underlying storage service.
#
# There are two main types of clients: filesystem-specific clients and CLI clients.
# CLI clients all use the 'hdfs dfs' CLI to execute operations against a target
# filesystem.
#
# 'filesystem_client' is a generic interface for doing filesystem operations that
# works across all the filesystems that Impala supports. 'filesystem_client' uses
# either the HDFS command line (e.g. 'hdfs dfs'), a filesystem-specific library, or
# a wrapper around both, to implement common HDFS operations.
#
# *Test writers should always use 'filesystem_client' unless they are using filesystem
# specific functionality (e.g. HDFS ACLs).*
#
# The implementation of 'filesystem_client' for each filesystem is:
# HDFS: uses a mixture of pywebhdfs (which is faster than the HDFS CLI) and the
# HDFS CLI
# S3: uses the HDFS CLI
# ABFS: uses the HDFS CLI
# ADLS: uses a mixture of azure-data-lake-store-python and the HDFS CLI (TODO:
# this should completely switch to the HDFS CLI once we test it)
# GCS: uses the HDFS CLI
#
# 'hdfs_client' is a HDFS-specific client library, and it only works when running on
# HDFS. When using 'hdfs_client', the test must be skipped on everything other than
# HDFS. This is only really useful for tests that do HDFS ACL operations. The
# 'hdfs_client' supports all the methods and functionality of the 'filesystem_client',
# with additional support for ACL operations such as chmod, chown, getacl, and setacl.
# 'hdfs_client' is set to None on non-HDFS systems.
if IS_HDFS:
cls.hdfs_client = cls.create_hdfs_client()
cls.filesystem_client = cls.hdfs_client
elif IS_S3:
# S3Guard needs filesystem operations to go through the s3 connector. Use the
# HDFS command line client.
cls.filesystem_client = HadoopFsCommandLineClient("S3")
elif IS_ABFS:
# ABFS is implemented via HDFS command line client
cls.filesystem_client = HadoopFsCommandLineClient("ABFS")
elif IS_ADLS:
cls.filesystem_client = ADLSClient(ADLS_STORE_NAME)
elif IS_GCS:
# GCS is implemented via HDFS command line client
cls.filesystem_client = HadoopFsCommandLineClient("GCS")
# Override the shell history path so that commands run by any tests
# don't write any history into the developer's file.
os.environ['IMPALA_HISTFILE'] = '/dev/null'
@classmethod
def teardown_class(cls):
"""Setup section that runs after each test suite"""
# Cleanup the Impala and Hive Metastore client connections
if cls.hive_transport:
cls.hive_transport.close()
cls.close_impala_clients()
@classmethod
def create_impala_client(cls, host_port=None, protocol='beeswax',
is_hive=False):
if host_port is None:
host_port = cls.__get_default_host_port(protocol)
client = create_connection(host_port=host_port,
use_kerberos=pytest.config.option.use_kerberos, protocol=protocol,
is_hive=is_hive)
client.connect()
return client
@classmethod
def get_impalad_cluster_size(cls):
return len(cls.__get_cluster_host_ports('beeswax'))
@classmethod
def create_client_for_nth_impalad(cls, nth=0):
# TODO Extended it to other protocols
protocol = 'beeswax'
host_port = cls.__get_cluster_host_ports(protocol)[nth]
return ImpalaTestSuite.create_impala_client(host_port, protocol=protocol)
@classmethod
def create_impala_clients(cls):
"""Creates Impala clients for all supported protocols."""
# The default connection (self.client) is Beeswax so that existing tests, which assume
# Beeswax do not need modification (yet).
cls.client = cls.create_impala_client(protocol='beeswax')
cls.hs2_client = None
try:
cls.hs2_client = cls.create_impala_client(protocol='hs2')
except Exception, e:
# HS2 connection can fail for benign reasons, e.g. running with unsupported auth.
LOG.info("HS2 connection setup failed, continuing...: {0}".format(e))
cls.hs2_http_client = None
try:
cls.hs2_http_client = cls.create_impala_client(protocol='hs2-http')
except Exception, e:
# HS2 HTTP connection can fail for benign reasons, e.g. running with unsupported
# auth.
LOG.info("HS2 HTTP connection setup failed, continuing...: {0}".format(e))
@classmethod
def close_impala_clients(cls):
"""Closes Impala clients created by create_impala_clients()."""
if cls.client:
cls.client.close()
cls.client = None
if cls.hs2_client:
cls.hs2_client.close()
cls.hs2_client = None
if cls.hs2_http_client:
cls.hs2_http_client.close()
cls.hs2_http_client = None
@classmethod
def __get_default_host_port(cls, protocol):
if protocol == 'beeswax':
return IMPALAD
elif protocol == 'hs2-http':
return IMPALAD_HS2_HTTP_HOST_PORT
else:
assert protocol == 'hs2'
return IMPALAD_HS2_HOST_PORT
@classmethod
def __get_cluster_host_ports(cls, protocol):
"""Return a list of host/port combinations for all impalads in the cluster."""
if protocol == 'beeswax':
return IMPALAD_HOST_PORT_LIST
else:
assert protocol in ('hs2', 'hs2-http')
# TODO: support running tests against multiple coordinators for HS2. It should work,
# we just need to update all test runners to pass in all host/port combinations for
# the cluster and then handle it here.
raise NotImplementedError(
"Not yet implemented: only one HS2 host/port can be configured")
@classmethod
def create_impala_service(
cls, host_port=IMPALAD, webserver_interface="", webserver_port=25000):
host, port = host_port.split(':')
if webserver_interface == "":
webserver_interface = host
return ImpaladService(host, beeswax_port=port,
webserver_interface=webserver_interface, webserver_port=webserver_port)
@classmethod
def create_hdfs_client(cls):
if pytest.config.option.namenode_http_address is None:
webhdfs_client = get_webhdfs_client_from_conf(HDFS_CONF)
else:
host, port = pytest.config.option.namenode_http_address.split(":")
webhdfs_client = get_webhdfs_client(host, port)
return DelegatingHdfsClient(webhdfs_client, HadoopFsCommandLineClient())
@classmethod
def all_db_names(cls):
results = cls.client.execute("show databases").data
# Extract first column - database name
return [row.split("\t")[0] for row in results]
@classmethod
def cleanup_db(cls, db_name, sync_ddl=1):
cls.client.execute("use default")
cls.client.set_configuration({'sync_ddl': sync_ddl})
cls.client.execute("drop database if exists `" + db_name + "` cascade")
def __restore_query_options(self, query_options_changed, impalad_client):
"""
Restore the list of modified query options to their default values.
"""
# Populate the default query option if it's empty.
if not self.default_query_options:
query_options = impalad_client.get_default_configuration()
for key, value in query_options.iteritems():
self.default_query_options[key.upper()] = value
# Restore all the changed query options.
for query_option in query_options_changed:
query_option = query_option.upper()
if not query_option in self.default_query_options:
continue
default_val = self.default_query_options[query_option]
query_str = 'SET ' + query_option + '="' + default_val + '"'
try:
impalad_client.execute(query_str)
except Exception as e:
LOG.info('Unexpected exception when executing ' + query_str + ' : ' + str(e))
def get_impala_partition_info(self, table_name, *include_fields):
"""
Find information about partitions of a table, as returned by a SHOW PARTITION
statement. Return a list that contains one tuple for each partition.
If 'include_fields' is not specified, the tuples will contain all the fields returned
by SHOW PARTITION. Otherwise, return only those fields whose names are listed in
'include_fields'. Field names are compared case-insensitively.
"""
exec_result = self.client.execute('show partitions %s' % table_name)
fieldSchemas = exec_result.schema.fieldSchemas
fields_dict = {}
for idx, fs in enumerate(fieldSchemas):
fields_dict[fs.name.lower()] = idx
rows = exec_result.get_data().split('\n')
rows.pop()
fields_idx = []
for fn in include_fields:
fn = fn.lower()
assert fn in fields_dict, 'Invalid field: %s' % fn
fields_idx.append(fields_dict[fn])
result = []
for row in rows:
fields = row.split('\t')
if not fields_idx:
result_fields = fields
else:
result_fields = []
for i in fields_idx:
result_fields.append(fields[i])
result.append(tuple(result_fields))
return result
def get_debug_page(self, page_url):
"""Returns the content of the debug page 'page_url' as json."""
response = requests.get(page_url)
assert response.status_code == requests.codes.ok
return json.loads(response.text)
def get_var_current_val(self, var):
"""Returns the current value of a given Impalad flag variable."""
# Parse the /varz endpoint to get the flag information.
varz = self.get_debug_page(VARZ_URL)
assert 'flags' in varz.keys()
filtered_varz = filter(lambda flag: flag['name'] == var, varz['flags'])
assert len(filtered_varz) == 1
assert 'current' in filtered_varz[0].keys()
return filtered_varz[0]['current'].strip()
def get_metric(self, name):
"""Finds the metric with name 'name' and returns its value as an int."""
def iter_metrics(group):
for m in group['metrics']:
yield m
for c in group['child_groups']:
for m in iter_metrics(c):
yield m
metrics = self.get_debug_page(METRICS_URL)['metric_group']
for m in iter_metrics(metrics):
if m['name'] == name:
return int(m['value'])
assert False, "Could not find metric: %s" % name
def __do_replacements(self, s, use_db=None, extra=None):
globs = globals()
repl = dict(('$' + k, globs[k]) for k in [
"FILESYSTEM_PREFIX",
"FILESYSTEM_NAME",
"GROUP_NAME",
"NAMENODE",
"IMPALA_HOME",
"INTERNAL_LISTEN_HOST",
"INTERNAL_LISTEN_IP",
"MANAGED_WAREHOUSE_DIR",
"EXTERNAL_WAREHOUSE_DIR"])
repl.update({
'$SECONDARY_FILESYSTEM': os.environ.get("SECONDARY_FILESYSTEM", ""),
'$USER': getuser()})
if use_db:
repl['$DATABASE'] = use_db
elif '$DATABASE' in s:
raise AssertionError("Query contains $DATABASE but no use_db specified")
if extra:
for k, v in extra.iteritems():
if k in repl:
raise RuntimeError("Key {0} is reserved".format(k))
repl[k] = v
for k, v in repl.iteritems():
s = s.replace(k, v)
return s
def __verify_exceptions(self, expected_strs, actual_str, use_db):
"""
Verifies that at least one of the strings in 'expected_str' is either:
* A row_regex: line that matches the actual exception string 'actual_str'
* A substring of the actual exception string 'actual_str'.
"""
actual_str = actual_str.replace('\n', '')
for expected_str in expected_strs:
# In error messages, some paths are always qualified and some are not.
# So, allow both $NAMENODE and $FILESYSTEM_PREFIX to be used in CATCH.
expected_str = self.__do_replacements(expected_str.strip(), use_db=use_db)
# Remove comments
expected_str = re.sub(COMMENT_LINES_REGEX, '', expected_str)
# Strip newlines so we can split error message into multiple lines
expected_str = expected_str.replace('\n', '')
expected_regex = try_compile_regex(expected_str)
if expected_regex:
if expected_regex.match(actual_str): return
else:
# Not a regex - check if expected substring is present in actual.
if expected_str in actual_str: return
assert False, 'Unexpected exception string. Expected: %s\nNot found in actual: %s' % \
(expected_str, actual_str)
def __verify_results_and_errors(self, vector, test_section, result, use_db):
"""Verifies that both results and error sections are as expected. Rewrites both
by replacing $NAMENODE, $DATABASE and $IMPALA_HOME with their actual values, and
optionally rewriting filenames with __HDFS_FILENAME__, to ensure that expected and
actual values are easily compared.
"""
replace_filenames_with_placeholder = True
for section_name in ('RESULTS', 'ERRORS'):
if section_name in test_section:
if "$NAMENODE" in test_section[section_name]:
replace_filenames_with_placeholder = False
test_section[section_name] = test_section[section_name] \
.replace('$NAMENODE', NAMENODE) \
.replace('$IMPALA_HOME', IMPALA_HOME) \
.replace('$USER', getuser()) \
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME) \
.replace('$INTERNAL_LISTEN_HOST',
INTERNAL_LISTEN_HOST) \
.replace('$INTERNAL_LISTEN_IP', INTERNAL_LISTEN_IP) \
.replace('$MANAGED_WAREHOUSE_DIR',
MANAGED_WAREHOUSE_DIR) \
.replace('$EXTERNAL_WAREHOUSE_DIR',
EXTERNAL_WAREHOUSE_DIR)
if use_db:
test_section[section_name] = test_section[section_name].replace('$DATABASE', use_db)
result_section, type_section = 'RESULTS', 'TYPES'
if vector.get_value('protocol').startswith('hs2'):
# hs2 or hs2-http
if 'HS2_TYPES' in test_section:
assert 'TYPES' in test_section,\
"Base TYPES section must always be included alongside HS2_TYPES"
# In some cases HS2 types are expected differ from Beeswax types (e.g. see
# IMPALA-914), so use the HS2-specific section if present.
type_section = 'HS2_TYPES'
verify_raw_results(test_section, result, vector.get_value('table_format').file_format,
result_section, type_section, pytest.config.option.update_results,
replace_filenames_with_placeholder)
def run_test_case(self, test_file_name, vector, use_db=None, multiple_impalad=False,
encoding=None, test_file_vars=None):
"""
Runs the queries in the specified test based on the vector values
Runs the query using targeting the file format/compression specified in the test
vector and the exec options specified in the test vector. If multiple_impalad=True
a connection to a random impalad will be chosen to execute each test section.
Otherwise, the default impalad client will be used. If 'protocol' (either 'hs2' or
'beeswax') is set in the vector, a client for that protocol is used. Otherwise we
use the default: beeswax.
Additionally, the encoding for all test data can be specified using the 'encoding'
parameter. This is useful when data is ingested in a different encoding (ex.
latin). If not set, the default system encoding will be used.
If a dict 'test_file_vars' is provided, then all keys will be replaced with their
values in queries before they are executed. Callers need to avoid using reserved key
names, see 'reserved_keywords' below.
"""
table_format_info = vector.get_value('table_format')
exec_options = vector.get_value('exec_option')
protocol = vector.get_value('protocol')
target_impalad_clients = list()
if multiple_impalad:
target_impalad_clients =\
[ImpalaTestSuite.create_impala_client(host_port, protocol=protocol)
for host_port in self.__get_cluster_host_ports(protocol)]
else:
if protocol == 'beeswax':
target_impalad_clients = [self.client]
elif protocol == 'hs2-http':
target_impalad_clients = [self.hs2_http_client]
else:
assert protocol == 'hs2'
target_impalad_clients = [self.hs2_client]
# Change the database to reflect the file_format, compression codec etc, or the
# user specified database for all targeted impalad.
for impalad_client in target_impalad_clients:
ImpalaTestSuite.change_database(impalad_client,
table_format_info, use_db, pytest.config.option.scale_factor)
impalad_client.set_configuration(exec_options)
def __exec_in_impala(query, user=None):
"""
Helper to execute a query block in Impala, restoring any custom
query options after the completion of the set of queries.
"""
# Support running multiple queries within the same test section, only verifying the
# result of the final query. The main use case is to allow for 'USE database'
# statements before a query executes, but it is not limited to that.
# TODO: consider supporting result verification of all queries in the future
result = None
target_impalad_client = choice(target_impalad_clients)
if user:
# Create a new client so the session will use the new username.
target_impalad_client = self.create_impala_client(protocol=protocol)
query_options_changed = []
try:
for query in query.split(';'):
set_pattern_match = SET_PATTERN.match(query)
if set_pattern_match:
query_options_changed.append(set_pattern_match.groups()[0])
assert set_pattern_match.groups()[0] not in vector.get_value("exec_option"), \
"%s cannot be set in the '.test' file since it is in the test vector. " \
"Consider deepcopy()-ing the vector and removing this option in the " \
"python test." % set_pattern_match.groups()[0]
result = self.__execute_query(target_impalad_client, query, user=user)
finally:
if len(query_options_changed) > 0:
self.__restore_query_options(query_options_changed, target_impalad_client)
return result
def __exec_in_hive(query, user=None):
"""
Helper to execute a query block in Hive. No special handling of query
options is done, since we use a separate session for each block.
"""
h = ImpalaTestSuite.create_impala_client(HIVE_HS2_HOST_PORT, protocol='hs2',
is_hive=True)
try:
result = None
for query in query.split(';'):
result = h.execute(query, user=user)
return result
finally:
h.close()
sections = self.load_query_test_file(self.get_workload(), test_file_name,
encoding=encoding)
# Assumes that it is same across all the coordinators.
lineage_log_dir = self.get_var_current_val('lineage_event_log_dir')
for test_section in sections:
if 'HIVE_MAJOR_VERSION' in test_section:
needed_hive_major_version = int(test_section['HIVE_MAJOR_VERSION'])
assert needed_hive_major_version in [2, 3]
assert HIVE_MAJOR_VERSION in [2, 3]
if needed_hive_major_version != HIVE_MAJOR_VERSION:
continue
if 'SHELL' in test_section:
assert len(test_section) == 1, \
"SHELL test sections can't contain other sections"
cmd = self.__do_replacements(test_section['SHELL'], use_db=use_db,
extra=test_file_vars)
LOG.info("Shell command: " + cmd)
check_call(cmd, shell=True)
continue
if 'QUERY' in test_section:
query_section = test_section['QUERY']
exec_fn = __exec_in_impala
elif 'HIVE_QUERY' in test_section:
query_section = test_section['HIVE_QUERY']
exec_fn = __exec_in_hive
else:
assert 0, ('Error in test file %s. Test cases require a ' +
'-- QUERY or HIVE_QUERY section.\n%s') %\
(test_file_name, pprint.pformat(test_section))
# TODO: support running query tests against different scale factors
query = QueryTestSectionReader.build_query(
self.__do_replacements(query_section, use_db=use_db, extra=test_file_vars))
if 'QUERY_NAME' in test_section:
LOG.info('Query Name: \n%s\n' % test_section['QUERY_NAME'])
result = None
try:
result = exec_fn(query, user=test_section.get('USER', '').strip() or None)
user = None
if 'USER' in test_section:
user = test_section['USER'].strip()
except Exception as e:
if 'CATCH' in test_section:
self.__verify_exceptions(test_section['CATCH'], str(e), use_db)
continue
raise
if 'CATCH' in test_section and '__NO_ERROR__' not in test_section['CATCH']:
expected_str = self.__do_replacements(" or ".join(test_section['CATCH']).strip(),
use_db=use_db,
extra=test_file_vars)
assert False, "Expected exception: {0}\n\nwhen running:\n\n{1}".format(
expected_str, query)
assert result is not None
assert result.success, "Query failed: {0}".format(result.data)
# Decode the results read back if the data is stored with a specific encoding.
if encoding: result.data = [row.decode(encoding) for row in result.data]
# Replace $NAMENODE in the expected results with the actual namenode URI.
if 'RESULTS' in test_section:
# Combining 'RESULTS' with 'DML_RESULTS" is currently unsupported because
# __verify_results_and_errors calls verify_raw_results which always checks
# ERRORS, TYPES, LABELS, etc. which doesn't make sense if there are two
# different result sets to consider (IMPALA-4471).
assert 'DML_RESULTS' not in test_section
test_section['RESULTS'] = self.__do_replacements(
test_section['RESULTS'], use_db=use_db, extra=test_file_vars)
self.__verify_results_and_errors(vector, test_section, result, use_db)
else:
# TODO: Can't validate errors without expected results for now.
assert 'ERRORS' not in test_section,\
"'ERRORS' sections must have accompanying 'RESULTS' sections"
# If --update_results, then replace references to the namenode URI with $NAMENODE.
# TODO(todd) consider running do_replacements in reverse, though that may cause
# some false replacements for things like username.
if pytest.config.option.update_results and 'RESULTS' in test_section:
test_section['RESULTS'] = test_section['RESULTS'] \
.replace(NAMENODE, '$NAMENODE') \
.replace(IMPALA_HOME, '$IMPALA_HOME') \
.replace(INTERNAL_LISTEN_HOST, '$INTERNAL_LISTEN_HOST') \
.replace(INTERNAL_LISTEN_IP, '$INTERNAL_LISTEN_IP')
rt_profile_info = None
if 'RUNTIME_PROFILE_%s' % table_format_info.file_format in test_section:
# If this table format has a RUNTIME_PROFILE section specifically for it, evaluate
# that section and ignore any general RUNTIME_PROFILE sections.
rt_profile_info = 'RUNTIME_PROFILE_%s' % table_format_info.file_format
elif 'RUNTIME_PROFILE' in test_section:
rt_profile_info = 'RUNTIME_PROFILE'
if rt_profile_info is not None:
rt_profile = verify_runtime_profile(test_section[rt_profile_info],
result.runtime_profile,
update_section=pytest.config.option.update_results)
if pytest.config.option.update_results:
test_section[rt_profile_info] = "".join(rt_profile)
if 'LINEAGE' in test_section:
# Lineage flusher thread runs every 5s by default and is not configurable. Wait
# for that period. (TODO) Get rid of this for faster test execution.
time.sleep(5)
current_query_lineage = self.get_query_lineage(result.query_id, lineage_log_dir)
assert current_query_lineage is not "",\
"No lineage found for query %s in dir %s" %\
(result.query_id, lineage_log_dir)
if pytest.config.option.update_results:
test_section['LINEAGE'] = json.dumps(current_query_lineage, indent=2,
separators=(',', ': '))
else:
verify_lineage(json.loads(test_section['LINEAGE']), current_query_lineage)
if 'DML_RESULTS' in test_section:
assert 'ERRORS' not in test_section
# The limit is specified to ensure the queries aren't unbounded. We shouldn't have
# test files that are checking the contents of tables larger than that anyways.
dml_results_query = "select * from %s limit 1000" % \
test_section['DML_RESULTS_TABLE']
dml_result = exec_fn(dml_results_query)
verify_raw_results(test_section, dml_result,
vector.get_value('table_format').file_format, result_section='DML_RESULTS',
update_section=pytest.config.option.update_results)
if pytest.config.option.update_results:
output_file = os.path.join(EE_TEST_LOGS_DIR,
test_file_name.replace('/','_') + ".test")
write_test_file(output_file, sections, encoding=encoding)
def get_query_lineage(self, query_id, lineage_dir):
"""Walks through the lineage files in lineage_dir to look for a given query_id.
This is an expensive operation is lineage_dir is large, so use carefully."""
assert lineage_dir and os.path.isdir(lineage_dir),\
"Invalid lineage dir %s" % (lineage_dir)
lineage_files = glob.glob(os.path.join(lineage_dir, 'impala_lineage_log_1.0*'))
assert len(lineage_files) > 0, "Directory %s is empty" % (lineage_dir)
# Sort by mtime. Optimized for most recently written lineages.
lineage_files.sort(key=lambda f: os.path.getmtime(f), reverse=True)
for f in lineage_files:
with open(f) as fd:
# A single file can contain a maxmimum of 5000 entries by default.
lines = fd.readlines()
for line in reversed(lines):
line = line.strip()
if len(line) == 0: continue
lineage = json.loads(line)
assert 'queryId' in lineage.keys()
if lineage['queryId'] == query_id:
return lineage
return ""
@staticmethod
def get_db_name_from_format(table_format, scale_factor=''):
return QueryTestSectionReader.get_db_name(table_format, scale_factor)
@classmethod
def change_database(cls, impala_client, table_format=None,
db_name=None, scale_factor=None):
if db_name == None:
assert table_format != None
db_name = QueryTestSectionReader.get_db_name(table_format,
scale_factor if scale_factor else '')
query = 'use %s' % db_name
# Clear the exec_options before executing a USE statement.
# The USE statement should not fail for negative exec_option tests.
impala_client.clear_configuration()
impala_client.execute(query)
def execute_wrapper(function):
"""
Issues a use database query before executing queries.
Database names are dependent on the input format for table, which the table names
remaining the same. A use database is issued before query execution. As such,
database names need to be build pre execution, this method wraps around the different
execute methods and provides a common interface to issue the proper use command.
"""
@wraps(function)
def wrapper(*args, **kwargs):
table_format = None
if kwargs.get('table_format'):
table_format = kwargs.get('table_format')
del kwargs['table_format']
if kwargs.get('vector'):
table_format = kwargs.get('vector').get_value('table_format')
del kwargs['vector']
# self is the implicit first argument
if table_format is not None:
args[0].change_database(args[0].client, table_format)
return function(*args, **kwargs)
return wrapper
@classmethod
@execute_wrapper
def execute_query_expect_success(cls, impalad_client, query, query_options=None,
user=None):
"""Executes a query and asserts if the query fails"""
result = cls.__execute_query(impalad_client, query, query_options, user)
assert result.success
return result
@classmethod
@execute_wrapper
def execute_query_expect_failure(cls, impalad_client, query, query_options=None,
user=None):
"""Executes a query and asserts if the query succeeds"""
result = None
try:
result = cls.__execute_query(impalad_client, query, query_options, user)
except Exception, e:
return e
assert not result.success, "No failure encountered for query %s" % query
return result
@execute_wrapper
def execute_query_unchecked(self, impalad_client, query, query_options=None, user=None):
return self.__execute_query(impalad_client, query, query_options, user)
@execute_wrapper
def execute_query(self, query, query_options=None):
return self.__execute_query(self.client, query, query_options)
def exec_and_time(self, query, query_options=None, impalad=0):
"""Executes a given query on the given impalad and returns the time taken in
millisecondsas seen by the client."""
client = self.create_client_for_nth_impalad(impalad)
if query_options is not None:
client.set_configuration(query_options)
start_time = int(round(time.time() * 1000))
client.execute(query)
end_time = int(round(time.time() * 1000))
return end_time - start_time
def execute_query_using_client(self, client, query, vector):
self.change_database(client, vector.get_value('table_format'))
query_options = vector.get_value('exec_option')
if query_options is not None: client.set_configuration(query_options)
return client.execute(query)
def execute_query_async_using_client(self, client, query, vector):
self.change_database(client, vector.get_value('table_format'))
query_options = vector.get_value('exec_option')
if query_options is not None: client.set_configuration(query_options)
return client.execute_async(query)
def close_query_using_client(self, client, query):
return client.close_query(query)
@execute_wrapper
def execute_query_async(self, query, query_options=None):
if query_options is not None: self.client.set_configuration(query_options)
return self.client.execute_async(query)
@execute_wrapper
def close_query(self, query):
return self.client.close_query(query)
@execute_wrapper
def execute_scalar(self, query, query_options=None):
result = self.__execute_query(self.client, query, query_options)
assert len(result.data) <= 1, 'Multiple values returned from scalar'
return result.data[0] if len(result.data) == 1 else None
def exec_and_compare_hive_and_impala_hs2(self, stmt, compare = lambda x, y: x == y):
"""Compare Hive and Impala results when executing the same statment over HS2"""
# execute_using_jdbc expects a Query object. Convert the query string into a Query
# object
query = Query()
query.query_str = stmt
# Run the statement targeting Hive
exec_opts = JdbcQueryExecConfig(impalad=HIVE_HS2_HOST_PORT, transport='SASL')
hive_results = execute_using_jdbc(query, exec_opts).data
# Run the statement targeting Impala
exec_opts = JdbcQueryExecConfig(impalad=IMPALAD_HS2_HOST_PORT, transport='NOSASL')
impala_results = execute_using_jdbc(query, exec_opts).data
# Compare the results
assert (impala_results is not None) and (hive_results is not None)
assert compare(impala_results, hive_results)
def load_query_test_file(self, workload, file_name, valid_section_names=None,
encoding=None):
"""
Loads/Reads the specified query test file. Accepts the given section names as valid.
Uses a default list of valid section names if valid_section_names is None.
"""
test_file_path = os.path.join(WORKLOAD_DIR, workload, 'queries', file_name + '.test')
LOG.info("Loading query test file: %s", test_file_path)
if not os.path.isfile(test_file_path):
assert False, 'Test file not found: %s' % file_name
return parse_query_test_file(test_file_path, valid_section_names, encoding=encoding)
@classmethod
def __execute_query(cls, impalad_client, query, query_options=None, user=None):
"""Executes the given query against the specified Impalad"""
if query_options is not None: impalad_client.set_configuration(query_options)
return impalad_client.execute(query, user=user)
def clone_table(self, src_tbl, dst_tbl, recover_partitions, vector):
src_loc = self._get_table_location(src_tbl, vector)
self.client.execute("create external table {0} like {1} location '{2}'"\
.format(dst_tbl, src_tbl, src_loc))
if recover_partitions:
self.client.execute("alter table {0} recover partitions".format(dst_tbl))
def appx_equals(self, a, b, diff_perc):
"""Returns True if 'a' and 'b' are within 'diff_perc' percent of each other,
False otherwise. 'diff_perc' must be a float in [0,1]."""
if a == b: return True # Avoid division by 0
assert abs(a - b) / float(max(abs(a), abs(b))) <= diff_perc
def _get_table_location(self, table_name, vector):
""" Returns the HDFS location of the table """
result = self.execute_query_using_client(self.client,
"describe formatted %s" % table_name, vector)
for row in result.data:
if 'Location:' in row:
return row.split('\t')[1]
# This should never happen.
assert 0, 'Unable to get location for table: ' + table_name
# TODO(todd) make this use Thrift to connect to HS2 instead of shelling
# out to beeline for better performance
def run_stmt_in_hive(self, stmt, username=None):
"""
Run a statement in Hive, returning stdout if successful and throwing
RuntimeError(stderr) if not.
"""
# Remove HADOOP_CLASSPATH from environment. Beeline doesn't need it,
# and doing so avoids Hadoop 3's classpath de-duplication code from
# placing $HADOOP_CONF_DIR too late in the classpath to get the right
# log4j configuration file picked up. Some log4j configuration files
# in Hadoop's jars send logging to stdout, confusing Impala's test
# framework.
env = os.environ.copy()
env.pop("HADOOP_CLASSPATH", None)
call = subprocess.Popen(
['beeline',
'--outputformat=csv2',
'-u', 'jdbc:hive2://' + pytest.config.option.hive_server2,
'-n', username or getuser(),
'-e', stmt],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Beeline in Hive 2.1 will read from stdin even when "-e"
# is specified; explicitly make sure there's nothing to
# read to avoid hanging, especially when running interactively
# with py.test.
stdin=file("/dev/null"),
env=env)
(stdout, stderr) = call.communicate()
call.wait()
if call.returncode != 0:
raise RuntimeError(stderr)
return stdout
def hive_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Hive sees them.
The return format is a list of strings. Each string represents a partition
value of a given column in a format like 'column1=7/column2=8'.
"""
return self.run_stmt_in_hive(
'show partitions %s' % table_name).split('\n')[1:-1]
@classmethod
def create_table_info_dimension(cls, exploration_strategy):
# If the user has specified a specific set of table formats to run against, then
# use those. Otherwise, load from the workload test vectors.
if pytest.config.option.table_formats:
table_formats = list()
for tf in pytest.config.option.table_formats.split(','):
dataset = get_dataset_from_workload(cls.get_workload())
table_formats.append(TableFormatInfo.create_from_string(dataset, tf))
tf_dimensions = ImpalaTestDimension('table_format', *table_formats)
else:
tf_dimensions = load_table_info_dimension(cls.get_workload(), exploration_strategy)
# If 'skip_hbase' is specified or the filesystem is isilon, s3, GCS(gs) or local,
# we don't need the hbase dimension.
if pytest.config.option.skip_hbase or TARGET_FILESYSTEM.lower() \
in ['s3', 'isilon', 'local', 'abfs', 'adls', 'gs']:
for tf_dimension in tf_dimensions:
if tf_dimension.value.file_format == "hbase":
tf_dimensions.remove(tf_dimension)
break
return tf_dimensions
@classmethod
def __create_exec_option_dimension(cls):
cluster_sizes = ALL_CLUSTER_SIZES
disable_codegen_options = ALL_DISABLE_CODEGEN_OPTIONS
batch_sizes = ALL_BATCH_SIZES
exec_single_node_option = [0]
if cls.exploration_strategy() == 'core':
disable_codegen_options = [False]
cluster_sizes = ALL_NODES_ONLY
return create_exec_option_dimension(cluster_sizes, disable_codegen_options,
batch_sizes,
exec_single_node_option=exec_single_node_option,
disable_codegen_rows_threshold_options=[0])
@classmethod
def exploration_strategy(cls):
default_strategy = pytest.config.option.exploration_strategy
if pytest.config.option.workload_exploration_strategy:
workload_strategies = pytest.config.option.workload_exploration_strategy.split(',')
for workload_strategy in workload_strategies:
workload_strategy = workload_strategy.split(':')
if len(workload_strategy) != 2:
raise ValueError, 'Invalid workload:strategy format: %s' % workload_strategy
if cls.get_workload() == workload_strategy[0]:
return workload_strategy[1]
return default_strategy
def wait_for_state(self, handle, expected_state, timeout, client=None):
"""Waits for the given 'query_handle' to reach the 'expected_state' using 'client', or
with the default connection if 'client' is None. If it does not reach the given state
within 'timeout' seconds, the method throws an AssertionError.
"""
self.wait_for_any_state(handle, [expected_state], timeout, client)
def wait_for_any_state(self, handle, expected_states, timeout, client=None):
"""Waits for the given 'query_handle' to reach one of 'expected_states' using 'client'
or with the default connection if 'client' is None. If it does not reach one of the
given states within 'timeout' seconds, the method throws an AssertionError. Returns
the final state.
"""
if client is None: client = self.client
start_time = time.time()
actual_state = client.get_state(handle)
while actual_state not in expected_states and time.time() - start_time < timeout:
actual_state = client.get_state(handle)
time.sleep(0.5)
if actual_state not in expected_states:
raise Timeout("query {0} did not reach one of the expected states {1}, "
"last known state {2}".format(handle.get_handle().id, expected_states,
actual_state))
return actual_state
def wait_for_progress(self, handle, expected_progress, timeout, client=None):
"""Waits for the given query handle to reach expected progress rate"""
if client is None: client = self.client
start_time = time.time()
summary = client.get_exec_summary(handle)
while time.time() - start_time < timeout and \
self.__get_query_progress_rate(summary.progress) <= expected_progress:
summary = client.get_exec_summary(handle)
time.sleep(0.5)
actual_progress = self.__get_query_progress_rate(summary.progress)
if actual_progress <= expected_progress:
raise Timeout("query {0} did not reach the expected progress {1}, "
"current progress {2}".format(handle.get_handle().id,
expected_progress, actual_progress))
return actual_progress
def __get_query_progress_rate(self, progress):
if progress is None:
return 0
return float(progress.num_completed_scan_ranges) / progress.total_scan_ranges
def wait_for_db_to_appear(self, db_name, timeout_s):
"""Wait until the database with 'db_name' is present in the impalad's local catalog.
Fail after timeout_s if the doesn't appear."""
start_time = time.time()
while time.time() - start_time < timeout_s:
try:
# This will throw an exception if the database is not present.
self.client.execute("describe database `{db_name}`".format(db_name=db_name))
return
except Exception:
time.sleep(0.2)
continue
raise Exception("DB {0} didn't show up after {1}s", db_name, timeout_s)
def wait_for_table_to_appear(self, db_name, table_name, timeout_s):
"""Wait until the table with 'table_name' in 'db_name' is present in the
impalad's local catalog. Fail after timeout_s if the doesn't appear."""
start_time = time.time()
while time.time() - start_time < timeout_s:
try:
# This will throw an exception if the table is not present.
self.client.execute("describe `{db_name}`.`{table_name}`".format(
db_name=db_name, table_name=table_name))
return
except Exception, ex:
print str(ex)
time.sleep(0.2)
continue
raise Exception("Table {0}.{1} didn't show up after {2}s", db_name, table_name,
timeout_s)
def assert_eventually(self, timeout_s, period_s, condition, error_msg=None):
"""Assert that the condition (a function with no parameters) returns True within the
given timeout. The condition is executed every period_s seconds. The check assumes
that once the condition returns True, it continues to return True. Throws a Timeout
if the condition does not return true within timeout_s seconds. 'error_msg' is an
optional function that must return a string. If set, the result of the function will
be included in the Timeout error message."""
count = 0
start_time = time.time()
while not condition() and time.time() - start_time < timeout_s:
time.sleep(period_s)
count += 1
if not condition():
error_msg_str = " error message: " + error_msg() if error_msg else ""
raise Timeout(
"Check failed to return True after {0} tries and {1} seconds{2}".format(
count, timeout_s, error_msg_str))
def assert_impalad_log_contains(self, level, line_regex, expected_count=1, timeout_s=6):
"""
Convenience wrapper around assert_log_contains for impalad logs.
"""
self.assert_log_contains("impalad", level, line_regex, expected_count, timeout_s)
def assert_catalogd_log_contains(self, level, line_regex, expected_count=1,
timeout_s=6):
"""
Convenience wrapper around assert_log_contains for catalogd logs.
"""
self.assert_log_contains("catalogd", level, line_regex, expected_count, timeout_s)
def assert_log_contains(self, daemon, level, line_regex, expected_count=1, timeout_s=6):
"""
Assert that the daemon log with specified level (e.g. ERROR, WARNING, INFO) contains
expected_count lines with a substring matching the regex. When expected_count is -1,
at least one match is expected.
Retries until 'timeout_s' has expired. The default timeout is the default minicluster
log buffering time (5 seconds) with a one second buffer.
When using this method to check log files of running processes, the caller should
make sure that log buffering has been disabled, for example by adding
'-logbuflevel=-1' to the daemon startup options or set timeout_s to a value higher
than the log flush interval.
"""
pattern = re.compile(line_regex)
start_time = time.time()
while True:
try:
found = 0
if hasattr(self, "impala_log_dir"):
log_dir = self.impala_log_dir
else:
log_dir = EE_TEST_LOGS_DIR
log_file_path = os.path.join(log_dir, daemon + "." + level)
# Resolve symlinks to make finding the file easier.
log_file_path = os.path.realpath(log_file_path)
with open(log_file_path) as log_file:
for line in log_file:
if pattern.search(line):
found += 1
if expected_count == -1:
assert found > 0, "Expected at least one line in file %s matching regex '%s'"\
", but found none." % (log_file_path, line_regex)
else:
assert found == expected_count, \
"Expected %d lines in file %s matching regex '%s', but found %d lines. "\
"Last line was: \n%s" %\
(expected_count, log_file_path, line_regex, found, line)
return
except AssertionError as e:
# Re-throw the exception to the caller only when the timeout is expired. Otherwise
# sleep before retrying.
if time.time() - start_time > timeout_s:
raise
LOG.info("Expected log lines could not be found, sleeping before retrying: %s",
str(e))
time.sleep(1)
| []
| []
| [
"IMPALA_HISTFILE",
"IMPALA_HOME",
"IMPALA_WORKLOAD_DIR",
"TARGET_FILESYSTEM",
"SECONDARY_FILESYSTEM",
"INTERNAL_LISTEN_HOST",
"IMPALA_EE_TEST_LOGS_DIR"
]
| [] | ["IMPALA_HISTFILE", "IMPALA_HOME", "IMPALA_WORKLOAD_DIR", "TARGET_FILESYSTEM", "SECONDARY_FILESYSTEM", "INTERNAL_LISTEN_HOST", "IMPALA_EE_TEST_LOGS_DIR"] | python | 7 | 0 | |
tests/conftest.py | # -*- coding:utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import random
import string
import pytest
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
browsers = {
# 'firefox': webdriver.Firefox,
# 'chrome': webdriver.Chrome,
'phantomjs': webdriver.PhantomJS,
}
def random_string(n):
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(n)
)
@pytest.fixture(scope='session',
params=browsers.keys())
def driver(request):
if 'DISPLAY' not in os.environ:
pytest.skip('Test requires display server (export DISPLAY)')
try:
b = browsers[request.param]()
except WebDriverException as e:
pytest.skip(e)
else:
b.set_window_size(1200, 800)
request.addfinalizer(lambda *args: b.quit())
return b
@pytest.fixture
def testpages(db):
from tests.testapp.models import TestPage
return TestPage.objects.bulk_create(
[TestPage(pk=pk, content1=random_string(50), content2=random_string(50)) for pk in range(10)]
)
@pytest.fixture
def flatpages(db):
from django.contrib.flatpages.models import FlatPage
return FlatPage.objects.bulk_create(
[FlatPage(pk=pk, title=random_string(50), url=random_string(50)) for pk in range(1)]
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
python_server/datatypes.py | from pydantic import BaseModel
from typing import List
class PolygonsInterpolateData(BaseModel):
polygons: List[list]
displacement_f: float = None
displacement: float = 10
min_area: float = 10
max_iter: int = 100
class RandomPointsInsidePolygonData(BaseModel):
polygon: List[list]
n: int = None
seed: int = -1
class ClippedVoronoiData(BaseModel):
polygon: List[list]
points: list
class RecursiveVoronoiData(BaseModel):
polygon: list
n: int
d: int = 0
seed: int = -1
round_corner: bool = False
erode: int = 0
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py | import os
import metricbeat
from nose.plugins.attrib import attr
APACHE_FIELDS = metricbeat.COMMON_FIELDS + ["apache-status"]
APACHE_STATUS_FIELDS = ["hostname", "totalAccesses", "totalKBytes",
"reqPerSec", "bytesPerSec", "bytesPerReq",
"busyWorkers", "idleWorkers", "uptime", "cpu",
"connections", "load", "scoreboard"]
CPU_FIELDS = ["cpuLoad", "cpuUser", "cpuSystem", "cpuChildrenUser",
"cpuChildrenSystem"]
class ApacheStatusTest(metricbeat.BaseTest):
@attr('integration')
def test_output(self):
"""
Apache module outputs an event.
"""
self.render_config_template(modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": [os.getenv('APACHE_HOST')],
}])
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1)
)
proc.check_kill_and_wait()
# Ensure no errors or warnings exist in the log.
log = self.get_log()
self.assertNotRegexpMatches(log, "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
# Verify the required fields are present.
self.assertItemsEqual(APACHE_FIELDS, evt.keys())
apache_status = evt["apache-status"]
self.assertItemsEqual(APACHE_STATUS_FIELDS, apache_status.keys())
self.assertItemsEqual(CPU_FIELDS, apache_status["cpu"].keys())
# There are more fields that could be checked.
# Verify all fields present are documented.
self.assert_fields_are_documented(evt)
| []
| []
| [
"APACHE_HOST"
]
| [] | ["APACHE_HOST"] | python | 1 | 0 | |
examples/service/chat/channel/fetch/channel_fetch_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v2 "github.com/RJPearson94/twilio-sdk-go/service/chat/v2"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var chatClient *v2.Chat
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
chatClient = twilio.NewWithCredentials(creds).Chat.V2
}
func main() {
resp, err := chatClient.
Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Fetch()
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
]
| []
| [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
http/forkschedule_test.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http_test
import (
"context"
"os"
"testing"
client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/http"
"github.com/stretchr/testify/require"
)
func TestForkSchedule(t *testing.T) {
tests := []struct {
name string
}{
{
name: "Good",
},
}
service, err := http.New(context.Background(),
http.WithTimeout(timeout),
http.WithAddress(os.Getenv("HTTP_ADDRESS")),
)
require.NoError(t, err)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
forkSchedule, err := service.(client.ForkScheduleProvider).ForkSchedule(context.Background())
require.NoError(t, err)
require.NotNil(t, forkSchedule)
require.NotEmpty(t, forkSchedule)
require.NotNil(t, forkSchedule[0].PreviousVersion)
require.NotNil(t, forkSchedule[0].CurrentVersion)
})
}
}
| [
"\"HTTP_ADDRESS\""
]
| []
| [
"HTTP_ADDRESS"
]
| [] | ["HTTP_ADDRESS"] | go | 1 | 0 | |
dumper/file/file.go | package file
import (
"bytes"
"crypto/md5"
"encoding/json"
"io"
"log"
"os"
"strings"
"github.com/fsnotify/fsnotify"
"github.com/ldez/traefik-certs-dumper/v2/dumper"
"github.com/ldez/traefik-certs-dumper/v2/hook"
)
// Dump Dumps "acme.json" file to certificates.
func Dump(acmeFile string, baseConfig *dumper.BaseConfig) error {
err := dump(acmeFile, baseConfig)
if err != nil {
return err
}
if baseConfig.Watch {
return watch(acmeFile, baseConfig)
}
return nil
}
func dump(acmeFile string, baseConfig *dumper.BaseConfig) error {
data, err := readFile(acmeFile)
if err != nil {
return err
}
return dumper.Dump(data, baseConfig)
}
func readFile(acmeFile string) (*dumper.StoredData, error) {
source, err := os.Open(acmeFile)
if err != nil {
return nil, err
}
data := &dumper.StoredData{}
if err = json.NewDecoder(source).Decode(data); err != nil {
return nil, err
}
return data, nil
}
func watch(acmeFile string, baseConfig *dumper.BaseConfig) error {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() { _ = watcher.Close() }()
done := make(chan bool)
go func() {
var previousHash []byte
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
if isDebug() {
log.Println("event:", event)
}
hash, errW := manageEvent(watcher, event, acmeFile, previousHash, baseConfig)
if errW != nil {
log.Println("error:", errW)
done <- true
return
}
previousHash = hash
case errW, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", errW)
done <- true
return
}
}
}()
err = watcher.Add(acmeFile)
if err != nil {
return err
}
<-done
return nil
}
func manageEvent(watcher *fsnotify.Watcher, event fsnotify.Event, acmeFile string, previousHash []byte, baseConfig *dumper.BaseConfig) ([]byte, error) {
err := manageRename(watcher, event, acmeFile)
if err != nil {
return nil, err
}
hash, err := calculateHash(acmeFile)
if err != nil {
return nil, err
}
if !bytes.Equal(previousHash, hash) {
if isDebug() {
log.Println("detected changes on file:", event.Name)
}
if errD := dump(acmeFile, baseConfig); errD != nil {
return nil, errD
}
if isDebug() {
log.Println("Dumped new certificate data.")
}
hook.Exec(baseConfig.Hook)
}
return hash, nil
}
func manageRename(watcher *fsnotify.Watcher, event fsnotify.Event, acmeFile string) error {
if event.Op&fsnotify.Rename != fsnotify.Rename {
return nil
}
if err := watcher.Remove(acmeFile); err != nil {
return err
}
return watcher.Add(acmeFile)
}
func calculateHash(acmeFile string) ([]byte, error) {
file, err := os.Open(acmeFile)
if err != nil {
return nil, err
}
defer func() { _ = file.Close() }()
h := md5.New()
_, err = io.Copy(h, file)
if err != nil {
return nil, err
}
return h.Sum(nil), nil
}
func isDebug() bool {
return strings.EqualFold(os.Getenv("TCD_DEBUG"), "true")
}
| [
"\"TCD_DEBUG\""
]
| []
| [
"TCD_DEBUG"
]
| [] | ["TCD_DEBUG"] | go | 1 | 0 | |
zerver/management/commands/backup.py | import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import parse_os_release, run, TIMESTAMP_FORMAT
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
# Fix support for multi-line usage strings
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--output", default=None, nargs="?", help="Filename of output tarball"
)
parser.add_argument("--skip-db", action='store_true', help="Skip database backup")
parser.add_argument("--skip-uploads", action='store_true', help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix="zulip-backup-%s-" % (timestamp,)
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf")
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject"))
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options['skip_db']:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file", os.path.join(tmp, "zulip-backup", "database"),
"--host", settings.DATABASES["default"]["HOST"],
"--port", settings.DATABASES["default"]["PORT"],
"--username", settings.DATABASES["default"]["USER"],
"--dbname", settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if not options['skip_uploads'] and settings.LOCAL_UPLOADS_DIR is not None and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR)
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path), name.replace("\\", r"\\")
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix="zulip-backup-%s-" % (timestamp,),
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
["tar", "-C", tmp, "-cPzf", tarball_path]
+ transform_args
+ ["--"]
+ members
)
print("Backup tarball written to %s" % (tarball_path,))
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
| []
| []
| [
"PGPASSWORD"
]
| [] | ["PGPASSWORD"] | python | 1 | 0 | |
compound_disease/compound_treats_disease/edge_prediction_experiment/edge_prediction_experiment.py | #!/usr/bin/env python
# coding: utf-8
# # Compound Treats Disease Edge Prediction
# This notebook is designed to take the next step moving from predicted sentences to edge predictions. After training the discriminator model, each sentences contains a confidence score for the likelihood of mentioning a relationship. Multiple relationships contain multiple sentences, which makes establishing an edge unintuitive. Is taking the max score appropiate for determining existence of an edge? Does taking the mean of each relationship make more sense? The answer towards these questions are shown below.
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
import math
import os
import sys
import pandas as pd
from sklearn.metrics import precision_recall_curve, roc_curve, auc
import matplotlib.pyplot as plt
import plotnine as p9
import seaborn as sns
sys.path.append(os.path.abspath('../../../modules'))
from utils.notebook_utils.dataframe_helper import mark_sentence, tag_sentence
# In[2]:
#Set up the environment
username = "danich1"
password = "snorkel"
dbname = "pubmeddb"
#Path subject to change for different os
database_str = "postgresql+psycopg2://{}:{}@/{}?host=/var/run/postgresql".format(username, password, dbname)
os.environ['SNORKELDB'] = database_str
from snorkel import SnorkelSession
session = SnorkelSession()
# In[3]:
from snorkel.learning.pytorch.rnn.utils import candidate_to_tokens
from snorkel.models import Candidate, candidate_subclass
# In[4]:
CompoundDisease = candidate_subclass('CompoundDisease', ['Compound', 'Disease'])
# In[5]:
total_candidates_df = (
pd
.read_csv("input/all_ctd_candidates.tsv.xz", sep="\t")
.sort_values("candidate_id")
)
total_candidates_df.head(2)
# In[6]:
sentence_prediction_df = (
pd
.read_csv("input/all_predicted_ctd_sentences.tsv.xz", sep="\t")
.sort_values("candidate_id")
)
sentence_prediction_df.head(2)
# In[7]:
# DataFrame that combines likelihood scores with each candidate sentence
total_candidates_pred_df = (
total_candidates_df[[
"doid_id", "doid_name",
"drugbank_id", "drug_name",
"text", "hetionet",
"candidate_id", "split"
]]
.merge(sentence_prediction_df, on="candidate_id")
)
#total_candidates_pred_df.to_csv(
# "output/combined_predicted_ctd_sentences.tsv.xz",
# sep="\t", index=False, compression="xz"
#)
total_candidates_pred_df.head(2)
# In[8]:
# DataFrame that groups disease and compound mentions together and takes
# the max, median and mean of each group
grouped_candidates_pred_df=(
total_candidates_pred_df
.groupby(["doid_id", "drugbank_id"], as_index=False)
.agg({
"pred": ['max', 'mean', 'median'],
'hetionet': 'max',
"drug_name": 'first',
"doid_name": 'first',
"split": 'first'
})
)
grouped_candidates_pred_df.head(2)
# In[9]:
grouped_candidates_pred_df.columns = [
"_".join(col)
if col[1] != '' and col[0] not in ['hetionet', 'drug_name', 'doid_name', 'split'] else col[0]
for col in grouped_candidates_pred_df.columns.values
]
grouped_candidates_pred_df.head(2)
# In[10]:
grouped_candidates_pred_subset_df = (
grouped_candidates_pred_df
.query("split==11")
.drop("split", axis=1)
)
grouped_candidates_pred_subset_df.head(2)
# In[11]:
grouped_candidates_pred_subset_df.hetionet.value_counts()
# # Best Sentence Representation Metric
# This section aims to answer the question: What metric (Mean, Max, Median) best predicts Hetionet Edges?
# In[12]:
performance_map = {}
# In[13]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_max,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_max,
)
performance_map['max'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[14]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_mean,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_mean,
)
performance_map['mean'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[15]:
precision, recall, pr_threshold = precision_recall_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_median,
)
fpr, tpr, roc_threshold = roc_curve(
grouped_candidates_pred_subset_df.hetionet,
grouped_candidates_pred_subset_df.pred_median,
)
performance_map['median'] = {
"precision":precision, "recall":recall,
"pr_threshold":pr_threshold, "false_pos":fpr,
"true_pos":tpr, "roc_threshold":roc_threshold,
}
# In[16]:
for key in performance_map:
plt.plot(
performance_map[key]['false_pos'],
performance_map[key]['true_pos'],
label=f"{key}:AUC ({auc(performance_map[key]['false_pos'], performance_map[key]['true_pos']):.3f})"
)
plt.plot([0,1], [0,1], linestyle='--', color='black')
plt.legend()
plt.show()
# In[17]:
for key in performance_map:
plt.plot(
performance_map[key]['recall'],
performance_map[key]['precision'],
label=f"{key}:AUC ({auc(performance_map[key]['recall'], performance_map[key]['precision']):.3f})"
)
plt.legend()
plt.show()
# # Optimal Cutoff Using PR-Curve
# In[18]:
threshold_df = (
pd.DataFrame(
list(
zip(
performance_map['max']['precision'],
performance_map['max']['recall'],
performance_map['max']['pr_threshold']
)
),
columns=["precision", "recall", "pr_threshold"]
)
.sort_values("precision", ascending=False)
)
threshold_df.head(2)
# In[19]:
#precision_thresholds = pd.np.linspace(0,1,num=5)
precision_thresholds = threshold_df.round(2).drop_duplicates("precision").precision.values
# Add the lowest precision rather than
# Keep it zero
precision_thresholds = (
pd.np.where(
precision_thresholds==0,
threshold_df.precision.min(),
precision_thresholds
)
)
performance_records = []
for precision_cutoff in precision_thresholds:
cutoff = (
threshold_df
.query("precision>=@precision_cutoff")
.pr_threshold
.min()
)
values_added = (
grouped_candidates_pred_subset_df
.query("pred_max >= @cutoff")
.hetionet
.value_counts()
)
series_keys = list(values_added.keys())
for key in series_keys:
performance_records.append(
{
"edges": values_added[key],
"in_hetionet": "Existing" if key == 1 else "Novel",
"precision": precision_cutoff,
"sen_cutoff": cutoff
}
)
edges_added_df = (
pd
.DataFrame
.from_records(performance_records)
)
edges_added_df.head(10)
# In[20]:
ax = sns.scatterplot(x="precision", y="edges", hue="in_hetionet", data=edges_added_df)
ax.set(yscale="log")
# In[21]:
edges_added_df.to_csv("output/precision_ctd_edges_added.tsv", index=False, sep="\t")
# # Total Recalled Edges
# How many edges of hetionet can we recall using an equal error rate cutoff score?
# In[23]:
gen_pred_df = (
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/train/22_sampled_train.tsv.xz", sep="\t")
.iloc[:, [0,-1]]
.append(
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/tune/22_sampled_dev.tsv", sep="\t")
.iloc[:, [0,-1]]
)
.append(
pd.read_csv("../label_sampling_experiment/results/CtD/marginals/test/22_sampled_test.tsv", sep="\t")
.iloc[:, [0,-1]]
)
)
gen_pred_df.columns = ["gen_pred", "candidate_id"]
gen_pred_df.head(2)
# In[24]:
(
total_candidates_pred_df.iloc[
total_candidates_pred_df
.groupby(["drugbank_id", "doid_id"], as_index=False)
.agg({
"pred": 'idxmax'
})
.pred
]
.merge(gen_pred_df, on=["candidate_id"])
.assign(edge_type="CtD")
.sort_values("pred", ascending=False)
.head(10)
.sort_values("candidate_id")
.assign(text=lambda x: tag_sentence(x, CompoundDisease))
.merge(total_candidates_df[["n_sentences", "candidate_id"]], on="candidate_id")
.sort_values("pred", ascending=False)
.drop_duplicates()
.assign(hetionet=lambda x: x.hetionet.apply(lambda x: "Existing" if x == 1 else "Novel"))
[["edge_type", "drug_name", "doid_name", "gen_pred", "pred", "n_sentences", "hetionet", "text"]]
.to_csv("output/top_ten_edge_predictions.tsv", sep="\t", index=False, float_format="%.3g")
)
# In[25]:
datarows = []
fpr, tpr, threshold = roc_curve(
grouped_candidates_pred_df.hetionet.values,
grouped_candidates_pred_df.pred_max.values
)
fnr = 1 - tpr
optimal_threshold = threshold[pd.np.nanargmin(pd.np.absolute((fnr - fpr)))]
datarows.append({
"recall":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[1] /
grouped_candidates_pred_df
.hetionet.
value_counts()[1]
),
"edges":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[1]
),
"in_hetionet": "Existing",
"total": int(grouped_candidates_pred_df.hetionet.value_counts()[1]),
"relation":"CtD"
})
datarows.append({
"edges":(
grouped_candidates_pred_df
.query("pred_max > @optimal_threshold")
.hetionet
.value_counts()[0]
),
"in_hetionet": "Novel",
"relation":"CtD"
})
edges_df = pd.DataFrame.from_records(datarows)
edges_df
# In[26]:
g = (
p9.ggplot(edges_df, p9.aes(x="relation", y="edges", fill="in_hetionet"))
+ p9.geom_col(position="dodge")
+ p9.geom_text(
p9.aes(
label=(
edges_df
.apply(
lambda x:
f"{x['edges']} ({x['recall']*100:.0f}%)"
if not math.isnan(x['recall']) else
f"{x['edges']}",
axis=1
)
)
),
position=p9.position_dodge(width=1),
size=9,
va="bottom"
)
+ p9.scale_y_log10()
+ p9.theme(
axis_text_y=p9.element_blank(),
axis_ticks_major = p9.element_blank(),
rect=p9.element_blank()
)
)
print(g)
| []
| []
| [
"SNORKELDB"
]
| [] | ["SNORKELDB"] | python | 1 | 0 | |
env/lib/python3.9/site-packages/treebeard/tests/test_treebeard.py | """Unit/Functional tests"""
import datetime
import os
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
from django.templatetags.static import static
from django.contrib.admin.options import TO_FIELD_VAR
import pytest
from treebeard import numconv
from treebeard.admin import admin_factory
from treebeard.exceptions import (
InvalidPosition,
InvalidMoveToDescendant,
PathOverflow,
MissingNodeOrderBy,
NodeAlreadySaved,
)
from treebeard.forms import movenodeform_factory
from treebeard.tests import models
from treebeard.tests.admin import register_all as admin_register_all
admin_register_all()
BASE_DATA = [
{"data": {"desc": "1"}},
{
"data": {"desc": "2"},
"children": [
{"data": {"desc": "21"}},
{"data": {"desc": "22"}},
{
"data": {"desc": "23"},
"children": [
{"data": {"desc": "231"}},
],
},
{"data": {"desc": "24"}},
],
},
{"data": {"desc": "3"}},
{
"data": {"desc": "4"},
"children": [
{"data": {"desc": "41"}},
],
},
]
UNCHANGED = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
@pytest.fixture(scope="function", params=models.BASE_MODELS + models.PROXY_MODELS)
def model(request):
request.param.load_bulk(BASE_DATA)
return request.param
@pytest.fixture(scope="function", params=models.BASE_MODELS + models.PROXY_MODELS)
def model_without_data(request):
return request.param
@pytest.fixture(scope="function", params=models.BASE_MODELS)
def model_without_proxy(request):
request.param.load_bulk(BASE_DATA)
return request.param
@pytest.fixture(scope="function", params=models.UNICODE_MODELS)
def model_with_unicode(request):
return request.param
@pytest.fixture(scope="function", params=models.SORTED_MODELS)
def sorted_model(request):
return request.param
@pytest.fixture(scope="function", params=models.RELATED_MODELS)
def related_model(request):
return request.param
@pytest.fixture(scope="function", params=models.INHERITED_MODELS)
def inherited_model(request):
return request.param
@pytest.fixture(scope="function", params=models.MP_SHORTPATH_MODELS)
def mpshort_model(request):
return request.param
@pytest.fixture(scope="function", params=[models.MP_TestNodeShortPath])
def mpshortnotsorted_model(request):
return request.param
@pytest.fixture(scope="function", params=[models.MP_TestNodeAlphabet])
def mpalphabet_model(request):
return request.param
@pytest.fixture(scope="function", params=[models.MP_TestNodeSortedAutoNow])
def mpsortedautonow_model(request):
return request.param
@pytest.fixture(scope="function", params=[models.MP_TestNodeSmallStep])
def mpsmallstep_model(request):
return request.param
@pytest.fixture(scope="function", params=[models.MP_TestManyToManyWithUser])
def mpm2muser_model(request):
return request.param
class TestTreeBase(object):
def got(self, model):
if model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in model.objects.values_list("tree_id", "lft", "rgt"):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
return [
(o.desc, o.get_depth(), o.get_children_count()) for o in model.get_tree()
]
def _assert_get_annotated_list(self, model, expected, parent=None):
results = model.get_annotated_list(parent)
got = [
(obj[0].desc, obj[1]["open"], obj[1]["close"], obj[1]["level"])
for obj in results
]
assert expected == got
assert all([type(obj[0]) == model for obj in results])
@pytest.mark.django_db
class TestEmptyTree(TestTreeBase):
def test_load_bulk_empty(self, model_without_data):
ids = model_without_data.load_bulk(BASE_DATA)
got_descs = [obj.desc for obj in model_without_data.objects.filter(pk__in=ids)]
expected_descs = [x[0] for x in UNCHANGED]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model_without_data) == UNCHANGED
def test_dump_bulk_empty(self, model_without_data):
assert model_without_data.dump_bulk() == []
def test_add_root_empty(self, model_without_data):
model_without_data.add_root(desc="1")
expected = [("1", 1, 0)]
assert self.got(model_without_data) == expected
def test_get_root_nodes_empty(self, model_without_data):
got = model_without_data.get_root_nodes()
expected = []
assert [node.desc for node in got] == expected
def test_get_first_root_node_empty(self, model_without_data):
got = model_without_data.get_first_root_node()
assert got is None
def test_get_last_root_node_empty(self, model_without_data):
got = model_without_data.get_last_root_node()
assert got is None
def test_get_tree(self, model_without_data):
got = list(model_without_data.get_tree())
assert got == []
def test_get_annotated_list(self, model_without_data):
expected = []
self._assert_get_annotated_list(model_without_data, expected)
def test_add_multiple_root_nodes_adds_sibling_leaves(self, model_without_data):
model_without_data.add_root(desc="1")
model_without_data.add_root(desc="2")
model_without_data.add_root(desc="3")
model_without_data.add_root(desc="4")
# these are all sibling root nodes (depth=1), and leaf nodes (children=0)
expected = [("1", 1, 0), ("2", 1, 0), ("3", 1, 0), ("4", 1, 0)]
assert self.got(model_without_data) == expected
class TestNonEmptyTree(TestTreeBase):
pass
@pytest.mark.django_db
class TestClassMethods(TestNonEmptyTree):
def test_load_bulk_existing(self, model):
# inserting on an existing node
node = model.objects.get(desc="231")
ids = model.load_bulk(BASE_DATA, node)
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 4),
("1", 4, 0),
("2", 4, 4),
("21", 5, 0),
("22", 5, 0),
("23", 5, 1),
("231", 6, 0),
("24", 5, 0),
("3", 4, 0),
("4", 4, 1),
("41", 5, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
expected_descs = ["1", "2", "21", "22", "23", "231", "24", "3", "4", "41"]
got_descs = [obj.desc for obj in model.objects.filter(pk__in=ids)]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == expected
def test_get_tree_all(self, model):
nodes = model.get_tree()
got = [(o.desc, o.get_depth(), o.get_children_count()) for o in nodes]
assert got == UNCHANGED
assert all([type(o) == model for o in nodes])
def test_dump_bulk_all(self, model):
assert model.dump_bulk(keep_ids=False) == BASE_DATA
def test_get_tree_node(self, model):
node = model.objects.get(desc="231")
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
nodes = model.get_tree(node)
got = [(o.desc, o.get_depth(), o.get_children_count()) for o in nodes]
expected = [
("231", 3, 4),
("1", 4, 0),
("2", 4, 4),
("21", 5, 0),
("22", 5, 0),
("23", 5, 1),
("231", 6, 0),
("24", 5, 0),
("3", 4, 0),
("4", 4, 1),
("41", 5, 0),
]
assert got == expected
assert all([type(o) == model for o in nodes])
def test_get_tree_leaf(self, model):
node = model.objects.get(desc="1")
assert 0 == node.get_children_count()
nodes = model.get_tree(node)
got = [(o.desc, o.get_depth(), o.get_children_count()) for o in nodes]
expected = [("1", 1, 0)]
assert got == expected
assert all([type(o) == model for o in nodes])
def test_get_annotated_list_all(self, model):
expected = [
("1", True, [], 0),
("2", False, [], 0),
("21", True, [], 1),
("22", False, [], 1),
("23", False, [], 1),
("231", True, [0], 2),
("24", False, [0], 1),
("3", False, [], 0),
("4", False, [], 0),
("41", True, [0, 1], 1),
]
self._assert_get_annotated_list(model, expected)
def test_get_annotated_list_node(self, model):
node = model.objects.get(desc="2")
expected = [
("2", True, [], 0),
("21", True, [], 1),
("22", False, [], 1),
("23", False, [], 1),
("231", True, [0], 2),
("24", False, [0, 1], 1),
]
self._assert_get_annotated_list(model, expected, node)
def test_get_annotated_list_leaf(self, model):
node = model.objects.get(desc="1")
expected = [("1", True, [0], 0)]
self._assert_get_annotated_list(model, expected, node)
def test_dump_bulk_node(self, model):
node = model.objects.get(desc="231")
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = model.dump_bulk(node, False)
expected = [{"data": {"desc": "231"}, "children": BASE_DATA}]
assert got == expected
def test_load_and_dump_bulk_keeping_ids(self, model):
exp = model.dump_bulk(keep_ids=True)
model.objects.all().delete()
model.load_bulk(exp, None, True)
got = model.dump_bulk(keep_ids=True)
assert got == exp
# do we really have an unchaged tree after the dump/delete/load?
got = [
(o.desc, o.get_depth(), o.get_children_count()) for o in model.get_tree()
]
assert got == UNCHANGED
def test_load_and_dump_bulk_with_fk(self, related_model):
# https://bitbucket.org/tabo/django-treebeard/issue/48/
related_model.objects.all().delete()
related, created = models.RelatedModel.objects.get_or_create(
desc="Test %s" % related_model.__name__
)
related_data = [
{"data": {"desc": "1", "related": related.pk}},
{
"data": {"desc": "2", "related": related.pk},
"children": [
{"data": {"desc": "21", "related": related.pk}},
{"data": {"desc": "22", "related": related.pk}},
{
"data": {"desc": "23", "related": related.pk},
"children": [
{"data": {"desc": "231", "related": related.pk}},
],
},
{"data": {"desc": "24", "related": related.pk}},
],
},
{"data": {"desc": "3", "related": related.pk}},
{
"data": {"desc": "4", "related": related.pk},
"children": [
{"data": {"desc": "41", "related": related.pk}},
],
},
]
related_model.load_bulk(related_data)
got = related_model.dump_bulk(keep_ids=False)
assert got == related_data
def test_get_root_nodes(self, model):
got = model.get_root_nodes()
expected = ["1", "2", "3", "4"]
assert [node.desc for node in got] == expected
assert all([type(node) == model for node in got])
def test_get_first_root_node(self, model):
got = model.get_first_root_node()
assert got.desc == "1"
assert type(got) == model
def test_get_last_root_node(self, model):
got = model.get_last_root_node()
assert got.desc == "4"
assert type(got) == model
def test_add_root(self, model):
obj = model.add_root(desc="5")
assert obj.get_depth() == 1
got = model.get_last_root_node()
assert got.desc == "5"
assert type(got) == model
def test_add_root_with_passed_instance(self, model):
obj = model(desc="5")
result = model.add_root(instance=obj)
assert result == obj
got = model.get_last_root_node()
assert got.desc == "5"
assert type(got) == model
def test_add_root_with_already_saved_instance(self, model):
obj = model.objects.get(desc="4")
with pytest.raises(NodeAlreadySaved):
model.add_root(instance=obj)
@pytest.mark.django_db
class TestSimpleNodeMethods(TestNonEmptyTree):
def test_is_root(self, model):
data = [
("2", True),
("1", True),
("4", True),
("21", False),
("24", False),
("22", False),
("231", False),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_root()
assert got == expected
def test_is_leaf(self, model):
data = [
("2", False),
("23", False),
("231", True),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_leaf()
assert got == expected
def test_get_root(self, model):
data = [
("2", "2"),
("1", "1"),
("4", "4"),
("21", "2"),
("24", "2"),
("22", "2"),
("231", "2"),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_root()
assert node.desc == expected
assert type(node) == model
def test_get_parent(self, model):
data = [
("2", None),
("1", None),
("4", None),
("21", "2"),
("24", "2"),
("22", "2"),
("231", "23"),
]
data = dict(data)
objs = {}
for desc, expected in data.items():
node = model.objects.get(desc=desc)
parent = node.get_parent()
if expected:
assert parent.desc == expected
assert type(parent) == model
else:
assert parent is None
objs[desc] = node
# corrupt the objects' parent cache
node._parent_obj = "CORRUPTED!!!"
for desc, expected in data.items():
node = objs[desc]
# asking get_parent to not use the parent cache (since we
# corrupted it in the previous loop)
parent = node.get_parent(True)
if expected:
assert parent.desc == expected
assert type(parent) == model
else:
assert parent is None
def test_get_children(self, model):
data = [
("2", ["21", "22", "23", "24"]),
("23", ["231"]),
("231", []),
]
for desc, expected in data:
children = model.objects.get(desc=desc).get_children()
assert [node.desc for node in children] == expected
assert all([type(node) == model for node in children])
def test_get_children_count(self, model):
data = [
("2", 4),
("23", 1),
("231", 0),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_children_count()
assert got == expected
def test_get_siblings(self, model):
data = [
("2", ["1", "2", "3", "4"]),
("21", ["21", "22", "23", "24"]),
("231", ["231"]),
]
for desc, expected in data:
siblings = model.objects.get(desc=desc).get_siblings()
assert [node.desc for node in siblings] == expected
assert all([type(node) == model for node in siblings])
def test_get_first_sibling(self, model):
data = [
("2", "1"),
("1", "1"),
("4", "1"),
("21", "21"),
("24", "21"),
("22", "21"),
("231", "231"),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_sibling()
assert node.desc == expected
assert type(node) == model
def test_get_prev_sibling(self, model):
data = [
("2", "1"),
("1", None),
("4", "3"),
("21", None),
("24", "23"),
("22", "21"),
("231", None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_prev_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_next_sibling(self, model):
data = [
("2", "3"),
("1", "2"),
("4", None),
("21", "22"),
("24", None),
("22", "23"),
("231", None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_next_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_last_sibling(self, model):
data = [
("2", "4"),
("1", "4"),
("4", "4"),
("21", "24"),
("24", "24"),
("22", "24"),
("231", "231"),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_sibling()
assert node.desc == expected
assert type(node) == model
def test_get_first_child(self, model):
data = [
("2", "21"),
("21", None),
("23", "231"),
("231", None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_last_child(self, model):
data = [
("2", "24"),
("21", None),
("23", "231"),
("231", None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_ancestors(self, model):
data = [
("2", []),
("21", ["2"]),
("231", ["2", "23"]),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_ancestors()
assert [node.desc for node in nodes] == expected
assert all([type(node) == model for node in nodes])
def test_get_descendants(self, model):
data = [
("2", ["21", "22", "23", "231", "24"]),
("23", ["231"]),
("231", []),
("1", []),
("4", ["41"]),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_descendants()
assert [node.desc for node in nodes] == expected
assert all([type(node) == model for node in nodes])
def test_get_descendant_count(self, model):
data = [
("2", 5),
("23", 1),
("231", 0),
("1", 0),
("4", 1),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_descendant_count()
assert got == expected
def test_is_sibling_of(self, model):
data = [
("2", "2", True),
("2", "1", True),
("21", "2", False),
("231", "2", False),
("22", "23", True),
("231", "23", False),
("231", "231", True),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_sibling_of(node2) == expected
def test_is_child_of(self, model):
data = [
("2", "2", False),
("2", "1", False),
("21", "2", True),
("231", "2", False),
("231", "23", True),
("231", "231", False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_child_of(node2) == expected
def test_is_descendant_of(self, model):
data = [
("2", "2", False),
("2", "1", False),
("21", "2", True),
("231", "2", True),
("231", "23", True),
("231", "231", False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_descendant_of(node2) == expected
@pytest.mark.django_db
class TestAddChild(TestNonEmptyTree):
def test_add_child_to_leaf(self, model):
model.objects.get(desc="231").add_child(desc="2311")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 1),
("2311", 4, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_child_to_node(self, model):
model.objects.get(desc="2").add_child(desc="25")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("25", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_child_with_passed_instance(self, model):
child = model(desc="2311")
result = model.objects.get(desc="231").add_child(instance=child)
assert result == child
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 1),
("2311", 4, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_child_with_already_saved_instance(self, model):
child = model.objects.get(desc="21")
with pytest.raises(NodeAlreadySaved):
model.objects.get(desc="2").add_child(instance=child)
def test_add_child_with_pk_set(self, model):
"""
If the model is using a natural primary key then it will be
already set when the instance is inserted.
"""
child = model(pk=999999, desc="natural key")
result = model.objects.get(desc="2").add_child(instance=child)
assert result == child
def test_add_child_post_save(self, model):
try:
@receiver(post_save, dispatch_uid="test_add_child_post_save")
def on_post_save(instance, **kwargs):
parent = instance.get_parent()
parent.refresh_from_db()
assert parent.get_descendant_count() == 1
# It's important that we're testing a leaf node
parent = model.objects.get(desc="231")
assert parent.is_leaf()
parent.add_child(desc="2311")
finally:
post_save.disconnect(dispatch_uid="test_add_child_post_save")
@pytest.mark.django_db
class TestAddSibling(TestNonEmptyTree):
def test_add_sibling_invalid_pos(self, model):
with pytest.raises(InvalidPosition):
model.objects.get(desc="231").add_sibling("invalid_pos")
def test_add_sibling_missing_nodeorderby(self, model):
node_wchildren = model.objects.get(desc="2")
with pytest.raises(MissingNodeOrderBy):
node_wchildren.add_sibling("sorted-sibling", desc="aaa")
def test_add_sibling_last_root(self, model):
node_wchildren = model.objects.get(desc="2")
obj = node_wchildren.add_sibling("last-sibling", desc="5")
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == "5"
def test_add_sibling_last(self, model):
node = model.objects.get(desc="231")
obj = node.add_sibling("last-sibling", desc="232")
assert obj.get_depth() == 3
assert node.get_last_sibling().desc == "232"
def test_add_sibling_first_root(self, model):
node_wchildren = model.objects.get(desc="2")
obj = node_wchildren.add_sibling("first-sibling", desc="new")
assert obj.get_depth() == 1
expected = [
("new", 1, 0),
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_first(self, model):
node_wchildren = model.objects.get(desc="23")
obj = node_wchildren.add_sibling("first-sibling", desc="new")
assert obj.get_depth() == 2
expected = [
("1", 1, 0),
("2", 1, 5),
("new", 2, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_left_root(self, model):
node_wchildren = model.objects.get(desc="2")
obj = node_wchildren.add_sibling("left", desc="new")
assert obj.get_depth() == 1
expected = [
("1", 1, 0),
("new", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_left(self, model):
node_wchildren = model.objects.get(desc="23")
obj = node_wchildren.add_sibling("left", desc="new")
assert obj.get_depth() == 2
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("new", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_left_noleft_root(self, model):
node = model.objects.get(desc="1")
obj = node.add_sibling("left", desc="new")
assert obj.get_depth() == 1
expected = [
("new", 1, 0),
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_left_noleft(self, model):
node = model.objects.get(desc="231")
obj = node.add_sibling("left", desc="new")
assert obj.get_depth() == 3
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 2),
("new", 3, 0),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_right_root(self, model):
node_wchildren = model.objects.get(desc="2")
obj = node_wchildren.add_sibling("right", desc="new")
assert obj.get_depth() == 1
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("new", 1, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_right(self, model):
node_wchildren = model.objects.get(desc="23")
obj = node_wchildren.add_sibling("right", desc="new")
assert obj.get_depth() == 2
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("new", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_right_noright_root(self, model):
node = model.objects.get(desc="4")
obj = node.add_sibling("right", desc="new")
assert obj.get_depth() == 1
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
("new", 1, 0),
]
assert self.got(model) == expected
def test_add_sibling_right_noright(self, model):
node = model.objects.get(desc="231")
obj = node.add_sibling("right", desc="new")
assert obj.get_depth() == 3
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 2),
("231", 3, 0),
("new", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_add_sibling_with_passed_instance(self, model):
node_wchildren = model.objects.get(desc="2")
obj = model(desc="5")
result = node_wchildren.add_sibling("last-sibling", instance=obj)
assert result == obj
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == "5"
def test_add_sibling_already_saved_instance(self, model):
node_wchildren = model.objects.get(desc="2")
existing_node = model.objects.get(desc="4")
with pytest.raises(NodeAlreadySaved):
node_wchildren.add_sibling("last-sibling", instance=existing_node)
def test_add_child_with_pk_set(self, model):
"""
If the model is using a natural primary key then it will be
already set when the instance is inserted.
"""
child = model(pk=999999, desc="natural key")
result = model.objects.get(desc="2").add_child(instance=child)
assert result == child
@pytest.mark.django_db
class TestDelete(TestTreeBase):
@staticmethod
@pytest.fixture(
scope="function",
params=zip(models.BASE_MODELS, models.DEP_MODELS),
ids=lambda fv: f"base={fv[0].__name__} dep={fv[1].__name__}",
)
def delete_model(request):
base_model, dep_model = request.param
base_model.load_bulk(BASE_DATA)
for node in base_model.objects.all():
dep_model(node=node).save()
return base_model
def test_delete_leaf(self, delete_model):
delete_model.objects.get(desc="231").delete()
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(delete_model) == expected
def test_delete_node(self, delete_model):
delete_model.objects.get(desc="23").delete()
expected = [
("1", 1, 0),
("2", 1, 3),
("21", 2, 0),
("22", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(delete_model) == expected
def test_delete_root(self, delete_model):
delete_model.objects.get(desc="2").delete()
expected = [("1", 1, 0), ("3", 1, 0), ("4", 1, 1), ("41", 2, 0)]
assert self.got(delete_model) == expected
def test_delete_filter_root_nodes(self, delete_model):
delete_model.objects.filter(desc__in=("2", "3")).delete()
expected = [("1", 1, 0), ("4", 1, 1), ("41", 2, 0)]
assert self.got(delete_model) == expected
def test_delete_filter_children(self, delete_model):
delete_model.objects.filter(desc__in=("2", "23", "231")).delete()
expected = [("1", 1, 0), ("3", 1, 0), ("4", 1, 1), ("41", 2, 0)]
assert self.got(delete_model) == expected
def test_delete_nonexistant_nodes(self, delete_model):
delete_model.objects.filter(desc__in=("ZZZ", "XXX")).delete()
assert self.got(delete_model) == UNCHANGED
def test_delete_same_node_twice(self, delete_model):
delete_model.objects.filter(desc__in=("2", "2")).delete()
expected = [("1", 1, 0), ("3", 1, 0), ("4", 1, 1), ("41", 2, 0)]
assert self.got(delete_model) == expected
def test_delete_all_root_nodes(self, delete_model):
delete_model.get_root_nodes().delete()
count = delete_model.objects.count()
assert count == 0
def test_delete_all_nodes(self, delete_model):
delete_model.objects.all().delete()
count = delete_model.objects.count()
assert count == 0
@pytest.mark.django_db
class TestMoveErrors(TestNonEmptyTree):
def test_move_invalid_pos(self, model):
node = model.objects.get(desc="231")
with pytest.raises(InvalidPosition):
node.move(node, "invalid_pos")
def test_move_to_descendant(self, model):
node = model.objects.get(desc="2")
target = model.objects.get(desc="231")
with pytest.raises(InvalidMoveToDescendant):
node.move(target, "first-sibling")
def test_move_missing_nodeorderby(self, model):
node = model.objects.get(desc="231")
with pytest.raises(MissingNodeOrderBy):
node.move(node, "sorted-child")
with pytest.raises(MissingNodeOrderBy):
node.move(node, "sorted-sibling")
@pytest.mark.django_db
class TestMoveSortedErrors(TestTreeBase):
def test_nonsorted_move_in_sorted(self, sorted_model):
node = sorted_model.add_root(val1=3, val2=3, desc="zxy")
with pytest.raises(InvalidPosition):
node.move(node, "left")
@pytest.mark.django_db
class TestMoveLeafRoot(TestNonEmptyTree):
def test_move_leaf_last_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "last-sibling")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
("231", 1, 0),
]
assert self.got(model) == expected
def test_move_leaf_first_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "first-sibling")
expected = [
("231", 1, 0),
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_left_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "left")
expected = [
("1", 1, 0),
("231", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_right_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("231", 1, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_last_child_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "last-child")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("231", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_first_child_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="231").move(target, "first-child")
expected = [
("1", 1, 0),
("2", 1, 5),
("231", 2, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
@pytest.mark.django_db
class TestMoveLeaf(TestNonEmptyTree):
def test_move_leaf_last_sibling(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "last-sibling")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("231", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_first_sibling(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "first-sibling")
expected = [
("1", 1, 0),
("2", 1, 5),
("231", 2, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_left_sibling(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "left")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("231", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_right_sibling(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("231", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_left_sibling_itself(self, model):
target = model.objects.get(desc="231")
model.objects.get(desc="231").move(target, "left")
assert self.got(model) == UNCHANGED
def test_move_leaf_last_child(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "last-child")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 1),
("231", 3, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_leaf_first_child(self, model):
target = model.objects.get(desc="22")
model.objects.get(desc="231").move(target, "first-child")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 1),
("231", 3, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
@pytest.mark.django_db
class TestMoveBranchRoot(TestNonEmptyTree):
def test_move_branch_first_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "first-sibling")
expected = [
("4", 1, 1),
("41", 2, 0),
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_last_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "last-sibling")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_branch_left_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "left")
expected = [
("1", 1, 0),
("4", 1, 1),
("41", 2, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_right_sibling_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("4", 1, 1),
("41", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling_root(self, model):
target = model.objects.get(desc="2").get_first_sibling()
model.objects.get(desc="4").move(target, "left")
expected = [
("4", 1, 1),
("41", 2, 0),
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling_root(self, model):
target = model.objects.get(desc="2").get_last_sibling()
model.objects.get(desc="4").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_branch_first_child_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "first-child")
expected = [
("1", 1, 0),
("2", 1, 5),
("4", 2, 1),
("41", 3, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_last_child_root(self, model):
target = model.objects.get(desc="2")
model.objects.get(desc="4").move(target, "last-child")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("4", 2, 1),
("41", 3, 0),
("3", 1, 0),
]
assert self.got(model) == expected
@pytest.mark.django_db
class TestMoveBranch(TestNonEmptyTree):
def test_move_branch_first_sibling(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "first-sibling")
expected = [
("1", 1, 0),
("2", 1, 5),
("4", 2, 1),
("41", 3, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_last_sibling(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "last-sibling")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("4", 2, 1),
("41", 3, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_left_sibling(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "left")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("4", 2, 1),
("41", 3, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_right_sibling(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("4", 2, 1),
("41", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling(self, model):
target = model.objects.get(desc="23").get_first_sibling()
model.objects.get(desc="4").move(target, "left")
expected = [
("1", 1, 0),
("2", 1, 5),
("4", 2, 1),
("41", 3, 0),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling(self, model):
target = model.objects.get(desc="23").get_last_sibling()
model.objects.get(desc="4").move(target, "right")
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 1),
("231", 3, 0),
("24", 2, 0),
("4", 2, 1),
("41", 3, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_left_itself_sibling(self, model):
target = model.objects.get(desc="4")
model.objects.get(desc="4").move(target, "left")
assert self.got(model) == UNCHANGED
def test_move_branch_first_child(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "first-child")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 2),
("4", 3, 1),
("41", 4, 0),
("231", 3, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
def test_move_branch_last_child(self, model):
target = model.objects.get(desc="23")
model.objects.get(desc="4").move(target, "last-child")
expected = [
("1", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 2),
("231", 3, 0),
("4", 3, 1),
("41", 4, 0),
("24", 2, 0),
("3", 1, 0),
]
assert self.got(model) == expected
@pytest.mark.django_db
class TestTreeSorted(TestTreeBase):
def got(self, sorted_model):
return [
(o.val1, o.val2, o.desc, o.get_depth(), o.get_children_count())
for o in sorted_model.get_tree()
]
def test_add_root_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc="zxy")
sorted_model.add_root(val1=1, val2=4, desc="bcd")
sorted_model.add_root(val1=2, val2=5, desc="zxy")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=4, val2=1, desc="fgh")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=2, val2=2, desc="qwe")
sorted_model.add_root(val1=3, val2=2, desc="vcx")
expected = [
(1, 4, "bcd", 1, 0),
(2, 2, "qwe", 1, 0),
(2, 5, "zxy", 1, 0),
(3, 2, "vcx", 1, 0),
(3, 3, "abc", 1, 0),
(3, 3, "abc", 1, 0),
(3, 3, "zxy", 1, 0),
(4, 1, "fgh", 1, 0),
]
assert self.got(sorted_model) == expected
def test_add_child_root_sorted(self, sorted_model):
root = sorted_model.add_root(val1=0, val2=0, desc="aaa")
root.add_child(val1=3, val2=3, desc="zxy")
root.add_child(val1=1, val2=4, desc="bcd")
root.add_child(val1=2, val2=5, desc="zxy")
root.add_child(val1=3, val2=3, desc="abc")
root.add_child(val1=4, val2=1, desc="fgh")
root.add_child(val1=3, val2=3, desc="abc")
root.add_child(val1=2, val2=2, desc="qwe")
root.add_child(val1=3, val2=2, desc="vcx")
expected = [
(0, 0, "aaa", 1, 8),
(1, 4, "bcd", 2, 0),
(2, 2, "qwe", 2, 0),
(2, 5, "zxy", 2, 0),
(3, 2, "vcx", 2, 0),
(3, 3, "abc", 2, 0),
(3, 3, "abc", 2, 0),
(3, 3, "zxy", 2, 0),
(4, 1, "fgh", 2, 0),
]
assert self.got(sorted_model) == expected
def test_add_child_nonroot_sorted(self, sorted_model):
get_node = lambda node_id: sorted_model.objects.get(pk=node_id)
root_id = sorted_model.add_root(val1=0, val2=0, desc="a").pk
node_id = get_node(root_id).add_child(val1=0, val2=0, desc="ac").pk
get_node(root_id).add_child(val1=0, val2=0, desc="aa")
get_node(root_id).add_child(val1=0, val2=0, desc="av")
get_node(node_id).add_child(val1=0, val2=0, desc="aca")
get_node(node_id).add_child(val1=0, val2=0, desc="acc")
get_node(node_id).add_child(val1=0, val2=0, desc="acb")
expected = [
(0, 0, "a", 1, 3),
(0, 0, "aa", 2, 0),
(0, 0, "ac", 2, 3),
(0, 0, "aca", 3, 0),
(0, 0, "acb", 3, 0),
(0, 0, "acc", 3, 0),
(0, 0, "av", 2, 0),
]
assert self.got(sorted_model) == expected
def test_move_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc="zxy")
sorted_model.add_root(val1=1, val2=4, desc="bcd")
sorted_model.add_root(val1=2, val2=5, desc="zxy")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=4, val2=1, desc="fgh")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=2, val2=2, desc="qwe")
sorted_model.add_root(val1=3, val2=2, desc="vcx")
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.move(target, "sorted-child")
expected = [
(1, 4, "bcd", 1, 7),
(2, 2, "qwe", 2, 0),
(2, 5, "zxy", 2, 0),
(3, 2, "vcx", 2, 0),
(3, 3, "abc", 2, 0),
(3, 3, "abc", 2, 0),
(3, 3, "zxy", 2, 0),
(4, 1, "fgh", 2, 0),
]
assert self.got(sorted_model) == expected
def test_move_sortedsibling(self, sorted_model):
# https://bitbucket.org/tabo/django-treebeard/issue/27
sorted_model.add_root(val1=3, val2=3, desc="zxy")
sorted_model.add_root(val1=1, val2=4, desc="bcd")
sorted_model.add_root(val1=2, val2=5, desc="zxy")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=4, val2=1, desc="fgh")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=2, val2=2, desc="qwe")
sorted_model.add_root(val1=3, val2=2, desc="vcx")
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.val1 -= 2
node.save()
node.move(target, "sorted-sibling")
expected = [
(0, 2, "qwe", 1, 0),
(0, 5, "zxy", 1, 0),
(1, 2, "vcx", 1, 0),
(1, 3, "abc", 1, 0),
(1, 3, "abc", 1, 0),
(1, 3, "zxy", 1, 0),
(1, 4, "bcd", 1, 0),
(2, 1, "fgh", 1, 0),
]
assert self.got(sorted_model) == expected
@pytest.mark.django_db
class TestInheritedModels(TestTreeBase):
@staticmethod
@pytest.fixture(
scope="function",
params=zip(models.BASE_MODELS, models.INHERITED_MODELS),
ids=lambda fv: f"base={fv[0].__name__} inherited={fv[1].__name__}",
)
def inherited_model(request):
base_model, inherited_model = request.param
base_model.add_root(desc="1")
base_model.add_root(desc="2")
node21 = inherited_model(desc="21")
base_model.objects.get(desc="2").add_child(instance=node21)
base_model.objects.get(desc="21").add_child(desc="211")
base_model.objects.get(desc="21").add_child(desc="212")
base_model.objects.get(desc="2").add_child(desc="22")
node3 = inherited_model(desc="3")
base_model.add_root(instance=node3)
return inherited_model
def test_get_tree_all(self, inherited_model):
got = [
(o.desc, o.get_depth(), o.get_children_count())
for o in inherited_model.get_tree()
]
expected = [
("1", 1, 0),
("2", 1, 2),
("21", 2, 2),
("211", 3, 0),
("212", 3, 0),
("22", 2, 0),
("3", 1, 0),
]
assert got == expected
def test_get_tree_node(self, inherited_model):
node = inherited_model.objects.get(desc="21")
got = [
(o.desc, o.get_depth(), o.get_children_count())
for o in inherited_model.get_tree(node)
]
expected = [
("21", 2, 2),
("211", 3, 0),
("212", 3, 0),
]
assert got == expected
def test_get_root_nodes(self, inherited_model):
got = inherited_model.get_root_nodes()
expected = ["1", "2", "3"]
assert [node.desc for node in got] == expected
def test_get_first_root_node(self, inherited_model):
got = inherited_model.get_first_root_node()
assert got.desc == "1"
def test_get_last_root_node(self, inherited_model):
got = inherited_model.get_last_root_node()
assert got.desc == "3"
def test_is_root(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.is_root() is False
assert node3.is_root() is True
def test_is_leaf(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.is_leaf() is False
assert node3.is_leaf() is True
def test_get_root(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_root().desc == "2"
assert node3.get_root().desc == "3"
def test_get_parent(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_parent().desc == "2"
assert node3.get_parent() is None
def test_get_children(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert [node.desc for node in node21.get_children()] == ["211", "212"]
assert [node.desc for node in node3.get_children()] == []
def test_get_children_count(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_children_count() == 2
assert node3.get_children_count() == 0
def test_get_siblings(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert [node.desc for node in node21.get_siblings()] == ["21", "22"]
assert [node.desc for node in node3.get_siblings()] == ["1", "2", "3"]
def test_get_first_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_first_sibling().desc == "21"
assert node3.get_first_sibling().desc == "1"
def test_get_prev_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_prev_sibling() is None
assert node3.get_prev_sibling().desc == "2"
def test_get_next_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_next_sibling().desc == "22"
assert node3.get_next_sibling() is None
def test_get_last_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_last_sibling().desc == "22"
assert node3.get_last_sibling().desc == "3"
def test_get_first_child(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_first_child().desc == "211"
assert node3.get_first_child() is None
def test_get_last_child(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_last_child().desc == "212"
assert node3.get_last_child() is None
def test_get_ancestors(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert [node.desc for node in node21.get_ancestors()] == ["2"]
assert [node.desc for node in node3.get_ancestors()] == []
def test_get_descendants(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert [node.desc for node in node21.get_descendants()] == ["211", "212"]
assert [node.desc for node in node3.get_descendants()] == []
def test_get_descendant_count(self, inherited_model):
node21 = inherited_model.objects.get(desc="21")
node3 = inherited_model.objects.get(desc="3")
assert node21.get_descendant_count() == 2
assert node3.get_descendant_count() == 0
def test_cascading_deletion(self, inherited_model):
# Deleting a node by calling delete() on the inherited_model class
# should delete descendants, even if those descendants are not
# instances of inherited_model
base_model = inherited_model.__bases__[0]
node21 = inherited_model.objects.get(desc="21")
node21.delete()
node2 = base_model.objects.get(desc="2")
for desc in ["21", "211", "212"]:
assert not base_model.objects.filter(desc=desc).exists()
assert [node.desc for node in node2.get_descendants()] == ["22"]
class TestMP_TreeAlphabet(TestTreeBase):
@pytest.mark.skipif(
not os.getenv("TREEBEARD_TEST_ALPHABET", False),
reason="TREEBEARD_TEST_ALPHABET env variable not set.",
)
def test_alphabet(self, mpalphabet_model):
"""This isn't actually a test, it's an informational routine."""
basealpha = numconv.BASE85
got_err = False
last_good = None
for alphabetlen in range(3, len(basealpha) + 1):
alphabet = basealpha[0:alphabetlen]
assert len(alphabet) >= 3
expected = [alphabet[0] + char for char in alphabet[1:]]
expected.extend([alphabet[1] + char for char in alphabet])
expected.append(alphabet[2] + alphabet[0])
# remove all nodes
mpalphabet_model.objects.all().delete()
# change the model's alphabet
mpalphabet_model.alphabet = alphabet
mpalphabet_model.numconv_obj_ = None
# insert root nodes
for pos in range(len(alphabet) * 2):
try:
mpalphabet_model.add_root(numval=pos)
except:
got_err = True
break
if got_err:
break
got = [obj.path for obj in mpalphabet_model.objects.all()]
if got != expected:
break
last_good = alphabet
assert False, "Best BASE85 based alphabet for your setup: {} (base {})".format(
last_good, len(last_good)
)
@pytest.mark.django_db
class TestHelpers(TestTreeBase):
@staticmethod
@pytest.fixture(scope="function", params=models.BASE_MODELS + models.PROXY_MODELS)
def helpers_model(request):
model = request.param
model.load_bulk(BASE_DATA)
for node in model.get_root_nodes():
model.load_bulk(BASE_DATA, node)
model.add_root(desc="5")
return model
def test_descendants_group_count_root(self, helpers_model):
expected = [
(o.desc, o.get_descendant_count()) for o in helpers_model.get_root_nodes()
]
got = [
(o.desc, o.descendants_count)
for o in helpers_model.get_descendants_group_count()
]
assert got == expected
def test_descendants_group_count_node(self, helpers_model):
parent = helpers_model.get_root_nodes().get(desc="2")
expected = [(o.desc, o.get_descendant_count()) for o in parent.get_children()]
got = [
(o.desc, o.descendants_count)
for o in helpers_model.get_descendants_group_count(parent)
]
assert got == expected
@pytest.mark.django_db
class TestMP_TreeSortedAutoNow(TestTreeBase):
"""
The sorting mechanism used by treebeard when adding a node can fail if the
ordering is using an "auto_now" field
"""
def test_sorted_by_autonow_workaround(self, mpsortedautonow_model):
# workaround
for i in range(1, 5):
mpsortedautonow_model.add_root(
desc="node%d" % (i,), created=datetime.datetime.now()
)
def test_sorted_by_autonow_FAIL(self, mpsortedautonow_model):
"""
This test asserts that we have a problem.
fix this, somehow
"""
mpsortedautonow_model.add_root(desc="node1")
with pytest.raises(ValueError):
mpsortedautonow_model.add_root(desc="node2")
@pytest.mark.django_db
class TestMP_TreeStepOverflow(TestTreeBase):
def test_add_root(self, mpsmallstep_model):
method = mpsmallstep_model.add_root
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_child(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
method = root.add_child
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_sibling(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
positions = ("first-sibling", "left", "right", "last-sibling")
for pos in positions:
with pytest.raises(PathOverflow):
root.get_last_child().add_sibling(pos)
def test_move(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
newroot = mpsmallstep_model.add_root()
targets = [
(root, ["first-child", "last-child"]),
(
root.get_first_child(),
["first-sibling", "left", "right", "last-sibling"],
),
]
for target, positions in targets:
for pos in positions:
with pytest.raises(PathOverflow):
newroot.move(target, pos)
@pytest.mark.django_db
class TestMP_TreeShortPath(TestTreeBase):
"""Test a tree with a very small path field (max_length=4) and a
steplen of 1
"""
def test_short_path(self, mpshortnotsorted_model):
obj = mpshortnotsorted_model.add_root()
obj = obj.add_child().add_child().add_child()
with pytest.raises(PathOverflow):
obj.add_child()
@pytest.mark.django_db
class TestMP_TreeFindProblems(TestTreeBase):
def test_find_problems(self, mpalphabet_model):
mpalphabet_model.alphabet = "01234"
mpalphabet_model(path="01", depth=1, numchild=0, numval=0).save()
mpalphabet_model(path="1", depth=1, numchild=0, numval=0).save()
mpalphabet_model(path="111", depth=1, numchild=0, numval=0).save()
mpalphabet_model(path="abcd", depth=1, numchild=0, numval=0).save()
mpalphabet_model(path="qa#$%!", depth=1, numchild=0, numval=0).save()
mpalphabet_model(path="0201", depth=2, numchild=0, numval=0).save()
mpalphabet_model(path="020201", depth=3, numchild=0, numval=0).save()
mpalphabet_model(path="03", depth=1, numchild=2, numval=0).save()
mpalphabet_model(path="0301", depth=2, numchild=0, numval=0).save()
mpalphabet_model(path="030102", depth=3, numchild=10, numval=0).save()
mpalphabet_model(path="04", depth=10, numchild=1, numval=0).save()
mpalphabet_model(path="0401", depth=20, numchild=0, numval=0).save()
def got(ids):
return [o.path for o in mpalphabet_model.objects.filter(pk__in=ids)]
(
evil_chars,
bad_steplen,
orphans,
wrong_depth,
wrong_numchild,
) = mpalphabet_model.find_problems()
assert ["abcd", "qa#$%!"] == got(evil_chars)
assert ["1", "111"] == got(bad_steplen)
assert ["0201", "020201"] == got(orphans)
assert ["03", "0301", "030102"] == got(wrong_numchild)
assert ["04", "0401"] == got(wrong_depth)
@pytest.mark.django_db
class TestMP_TreeFix(TestTreeBase):
expected_no_holes = {
models.MP_TestNodeShortPath: [
("1", "b", 1, 2),
("11", "u", 2, 1),
("111", "i", 3, 1),
("1111", "e", 4, 0),
("12", "o", 2, 0),
("2", "d", 1, 0),
("3", "g", 1, 0),
("4", "a", 1, 4),
("41", "a", 2, 0),
("42", "a", 2, 0),
("43", "u", 2, 1),
("431", "i", 3, 1),
("4311", "e", 4, 0),
("44", "o", 2, 0),
],
models.MP_TestSortedNodeShortPath: [
("1", "a", 1, 4),
("11", "a", 2, 0),
("12", "a", 2, 0),
("13", "o", 2, 0),
("14", "u", 2, 1),
("141", "i", 3, 1),
("1411", "e", 4, 0),
("2", "b", 1, 2),
("21", "o", 2, 0),
("22", "u", 2, 1),
("221", "i", 3, 1),
("2211", "e", 4, 0),
("3", "d", 1, 0),
("4", "g", 1, 0),
],
}
expected_with_holes = {
models.MP_TestNodeShortPath: [
("1", "b", 1, 2),
("13", "u", 2, 1),
("134", "i", 3, 1),
("1343", "e", 4, 0),
("14", "o", 2, 0),
("2", "d", 1, 0),
("3", "g", 1, 0),
("4", "a", 1, 4),
("41", "a", 2, 0),
("42", "a", 2, 0),
("43", "u", 2, 1),
("434", "i", 3, 1),
("4343", "e", 4, 0),
("44", "o", 2, 0),
],
models.MP_TestSortedNodeShortPath: [
("1", "b", 1, 2),
("13", "u", 2, 1),
("134", "i", 3, 1),
("1343", "e", 4, 0),
("14", "o", 2, 0),
("2", "d", 1, 0),
("3", "g", 1, 0),
("4", "a", 1, 4),
("41", "a", 2, 0),
("42", "a", 2, 0),
("43", "u", 2, 1),
("434", "i", 3, 1),
("4343", "e", 4, 0),
("44", "o", 2, 0),
],
}
def got(self, model):
return [
(o.path, o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()
]
def add_broken_test_data(self, model):
model(path="4", depth=2, numchild=2, desc="a").save()
model(path="13", depth=1000, numchild=0, desc="u").save()
model(path="14", depth=4, numchild=500, desc="o").save()
model(path="134", depth=321, numchild=543, desc="i").save()
model(path="1343", depth=321, numchild=543, desc="e").save()
model(path="42", depth=1, numchild=1, desc="a").save()
model(path="43", depth=1000, numchild=0, desc="u").save()
model(path="44", depth=4, numchild=500, desc="o").save()
model(path="434", depth=321, numchild=543, desc="i").save()
model(path="4343", depth=321, numchild=543, desc="e").save()
model(path="41", depth=1, numchild=1, desc="a").save()
model(path="3", depth=221, numchild=322, desc="g").save()
model(path="1", depth=10, numchild=3, desc="b").save()
model(path="2", depth=10, numchild=3, desc="d").save()
def test_fix_tree_non_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=False)
got = self.got(mpshort_model)
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
def test_fix_tree_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=True)
got = self.got(mpshort_model)
expected = self.expected_no_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
def test_fix_tree_with_fix_paths(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(fix_paths=True)
got = self.got(mpshort_model)
expected = self.expected_no_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
@pytest.mark.django_db
class TestIssues(TestTreeBase):
# test for http://code.google.com/p/django-treebeard/issues/detail?id=14
def test_many_to_many_django_user_anonymous(self, mpm2muser_model):
# Using AnonymousUser() in the querysets will expose non-treebeard
# related problems in Django 1.0
#
# Postgres:
# ProgrammingError: can't adapt
# SQLite:
# InterfaceError: Error binding parameter 4 - probably unsupported
# type.
# MySQL compared a string to an integer field:
# `treebeard_mp_testissue14_users`.`user_id` = 'AnonymousUser'
#
# Using a None field instead works (will be translated to IS NULL).
#
# anonuserobj = AnonymousUser()
anonuserobj = None
def qs_check(qs, expected):
assert [o.name for o in qs] == expected
def qs_check_first_or_user(expected, root, user):
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)), expected
)
user = User.objects.create_user("test_user", "[email protected]", "testpasswd")
user.save()
root = mpm2muser_model.add_root(name="the root node")
root.add_child(name="first")
second = root.add_child(name="second")
qs_check(root.get_children(), ["first", "second"])
qs_check(root.get_children().filter(Q(name="first")), ["first"])
qs_check(root.get_children().filter(Q(users=user)), [])
qs_check_first_or_user(["first"], root, user)
qs_check_first_or_user(["first", "second"], root, anonuserobj)
user = User.objects.get(username="test_user")
second.users.add(user)
qs_check_first_or_user(["first", "second"], root, user)
qs_check_first_or_user(["first"], root, anonuserobj)
@pytest.mark.django_db
class TestMoveNodeForm(TestNonEmptyTree):
def _get_nodes_list(self, nodes):
return [
(pk, "%s%s" % (" " * 4 * (depth - 1), str)) for pk, str, depth in nodes
]
def _assert_nodes_in_choices(self, form, nodes):
choices = form.fields["_ref_node_id"].choices
assert choices.pop(0)[0] is None
assert nodes == [(choice[0], choice[1]) for choice in choices]
def _move_node_helper(self, node, safe_parent_nodes):
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ["desc", "_position", "_ref_node_id"] == list(form.base_fields.keys())
got = [choice[0] for choice in form.fields["_position"].choices]
assert ["first-child", "left", "right"] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
def _get_node_ids_strs_and_depths(self, nodes):
return [(node.pk, str(node), node.get_depth()) for node in nodes]
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
safe_parent_nodes = self._get_node_ids_strs_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_leaf_node(self, model):
nodes = list(model.get_tree())
safe_parent_nodes = self._get_node_ids_strs_and_depths(nodes)
node = nodes.pop()
self._move_node_helper(node, safe_parent_nodes)
def test_form_admin(self, model):
request = None
nodes = list(model.get_tree())
safe_parent_nodes = self._get_node_ids_strs_and_depths(nodes)
for node in model.objects.all():
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
got = list(ma.get_form(request).base_fields.keys())
desc_pos_refnodeid = ["desc", "_position", "_ref_node_id"]
assert desc_pos_refnodeid == got
got = ma.get_fieldsets(request)
expected = [(None, {"fields": desc_pos_refnodeid})]
assert got == expected
got = ma.get_fieldsets(request, node)
assert got == expected
form = ma.get_form(request)()
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
@pytest.mark.django_db
class TestModelAdmin(TestNonEmptyTree):
def test_default_fields(self, model):
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
assert list(ma.get_form(None).base_fields.keys()) == [
"desc",
"_position",
"_ref_node_id",
]
@pytest.mark.django_db
class TestSortedForm(TestTreeSorted):
def test_sorted_form(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc="zxy")
sorted_model.add_root(val1=1, val2=4, desc="bcd")
sorted_model.add_root(val1=2, val2=5, desc="zxy")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=4, val2=1, desc="fgh")
sorted_model.add_root(val1=3, val2=3, desc="abc")
sorted_model.add_root(val1=2, val2=2, desc="qwe")
sorted_model.add_root(val1=3, val2=2, desc="vcx")
form_class = movenodeform_factory(sorted_model)
form = form_class()
assert list(form.fields.keys()) == [
"val1",
"val2",
"desc",
"_position",
"_ref_node_id",
]
form = form_class(instance=sorted_model.objects.get(desc="bcd"))
assert list(form.fields.keys()) == [
"val1",
"val2",
"desc",
"_position",
"_ref_node_id",
]
assert "id__position" in str(form)
assert "id__ref_node_id" in str(form)
@pytest.mark.django_db
class TestForm(TestNonEmptyTree):
def test_form(self, model):
form_class = movenodeform_factory(model)
form = form_class()
assert list(form.fields.keys()) == ["desc", "_position", "_ref_node_id"]
form = form_class(instance=model.objects.get(desc="1"))
assert list(form.fields.keys()) == ["desc", "_position", "_ref_node_id"]
assert "id__position" in str(form)
assert "id__ref_node_id" in str(form)
def test_move_node_form(self, model):
form_class = movenodeform_factory(model)
bad_node = model.objects.get(desc="1").add_child(
desc='Benign<script>alert("Compromised");</script>'
)
form = form_class(instance=bad_node)
rendered_html = form.as_p()
assert "Benign" in rendered_html
assert "<script>" not in rendered_html
assert "<script>" in rendered_html
def test_get_position_ref_node(self, model):
form_class = movenodeform_factory(model)
instance_parent = model.objects.get(desc="1")
form = form_class(instance=instance_parent)
assert form._get_position_ref_node(instance_parent) == {
"_position": "first-child",
"_ref_node_id": "",
}
instance_child = model.objects.get(desc="21")
form = form_class(instance=instance_child)
assert form._get_position_ref_node(instance_child) == {
"_position": "first-child",
"_ref_node_id": model.objects.get(desc="2").pk,
}
instance_grandchild = model.objects.get(desc="22")
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
"_position": "right",
"_ref_node_id": model.objects.get(desc="21").pk,
}
instance_grandchild = model.objects.get(desc="231")
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
"_position": "first-child",
"_ref_node_id": model.objects.get(desc="23").pk,
}
def test_clean_cleaned_data(self, model):
instance_parent = model.objects.get(desc="1")
_position = "first-child"
_ref_node_id = ""
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
"_position": _position,
"_ref_node_id": _ref_node_id,
"desc": instance_parent.desc,
},
)
assert form.is_valid()
assert form._clean_cleaned_data() == (_position, _ref_node_id)
def test_save_edit(self, model):
instance_parent = model.objects.get(desc="1")
original_count = len(model.objects.all())
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
"_position": "first-child",
"_ref_node_id": model.objects.get(desc="2").pk,
"desc": instance_parent.desc,
},
)
assert form.is_valid()
saved_instance = form.save()
assert original_count == model.objects.all().count()
assert saved_instance.get_children_count() == 0
assert saved_instance.get_depth() == 2
assert not saved_instance.is_root()
assert saved_instance.is_leaf()
# Return to original state
form_class = movenodeform_factory(model)
form = form_class(
instance=saved_instance,
data={
"_position": "first-child",
"_ref_node_id": "",
"desc": saved_instance.desc,
},
)
assert form.is_valid()
restored_instance = form.save()
assert original_count == model.objects.all().count()
assert restored_instance.get_children_count() == 0
assert restored_instance.get_depth() == 1
assert restored_instance.is_root()
assert restored_instance.is_leaf()
def test_save_new(self, model):
original_count = model.objects.all().count()
assert original_count == 10
_position = "first-child"
form_class = movenodeform_factory(model)
form = form_class(data={"_position": _position, "desc": "New Form Test"})
assert form.is_valid()
assert form.save() is not None
assert original_count < model.objects.all().count()
def test_save_new_with_pk_set(self, model):
"""
If the model is using a natural primary key then it will be
already set when the instance is inserted.
"""
original_count = model.objects.all().count()
assert original_count == 10
_position = "first-child"
form_class = movenodeform_factory(model)
form = form_class(
data={"_position": _position, "id": 999999, "desc": "New Form Test"}
)
assert form.is_valid()
# Fake a natural key by updating the instance directly, because
# the model form will have removed the id from cleaned data because
# it thinks it is an AutoField.
form.instance.id = 999999
assert form.save() is not None
assert original_count < model.objects.all().count()
class TestAdminTreeTemplateTags(TestCase):
def test_treebeard_css(self):
template = Template("{% load admin_tree %}{% treebeard_css %}")
context = Context()
rendered = template.render(context)
expected = (
'<link rel="stylesheet" type="text/css" '
'href="' + static("treebeard/treebeard-admin.css") + '"/>'
)
assert expected == rendered
def test_treebeard_js(self):
template = Template("{% load admin_tree %}{% treebeard_js %}")
context = Context()
rendered = template.render(context)
expected = (
'<script type="text/javascript" src="jsi18n"></script>'
'<script type="text/javascript" '
'src="' + static("treebeard/treebeard-admin.js") + '"></script>'
"<script>(function($){"
"jQuery = $.noConflict(true);"
"})(django.jQuery);</script>"
'<script type="text/javascript" '
'src="' + static("treebeard/jquery-ui-1.8.5.custom.min.js") + '"></script>'
)
assert expected == rendered
@pytest.mark.django_db
class TestAdminTree(TestNonEmptyTree):
template = Template(
"{% load admin_tree %}{% spaceless %}"
"{% result_tree cl request %}{% endspaceless %}"
)
def test_result_tree(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get("/admin/tree/")
request.user = AnonymousUser()
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">%s</a>' % (url, str(object))
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in table_output
def test_unicode_result_tree(self, model_with_unicode):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_with_unicode
# Add a unicode description
model.add_root(desc="áéîøü")
request = RequestFactory().get("/admin/tree/")
request.user = AnonymousUser()
site = AdminSite()
form_class = movenodeform_factory(model)
ModelAdmin = admin_factory(form_class)
class UnicodeModelAdmin(ModelAdmin):
list_display = ("__str__", "desc")
m = UnicodeModelAdmin(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">%s</a>' % (url, object.desc)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in table_output
def test_result_filtered(self, model_without_proxy):
"""Test template changes with filters or pagination."""
model = model_without_proxy
# Filtered GET
request = RequestFactory().get("/admin/tree/?desc=1")
request.user = AnonymousUser()
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
# Filtered
assert '<input type="hidden" id="has-filters" value="1"/>' in table_output
# Not Filtered GET, it should ignore pagination
request = RequestFactory().get("/admin/tree/?p=1")
request.user = AnonymousUser()
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in table_output
# Not Filtered GET, it should ignore all
request = RequestFactory().get("/admin/tree/?all=1")
request.user = AnonymousUser()
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in table_output
@pytest.mark.django_db
class TestAdminTreeList(TestNonEmptyTree):
template = Template(
"{% load admin_tree_list %}{% spaceless %}"
"{% result_tree cl request %}{% endspaceless %}"
)
def test_result_tree_list(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get("/admin/tree/")
request.user = AnonymousUser()
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
output_template = '<li><a href="%s/" >%s</a>'
for object in model.objects.all():
expected_output = output_template % (object.pk, str(object))
assert expected_output in table_output
def test_result_tree_list_with_action(self, model_without_proxy):
model = model_without_proxy
request = RequestFactory().get("/admin/tree/")
request.user = AnonymousUser()
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request, "action_form": True})
table_output = self.template.render(context)
output_template = (
'<input type="checkbox" class="action-select" '
'value="%s" name="_selected_action" />'
'<a href="%s/" >%s</a>'
)
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk, str(object))
assert expected_output in table_output
def test_result_tree_list_with_get(self, model_without_proxy):
model = model_without_proxy
pk_field = model._meta.pk.attname
# Test t GET parameter with value id
request = RequestFactory().get(
"/admin/tree/?{0}={1}".format(TO_FIELD_VAR, pk_field)
)
request.user = AnonymousUser()
site = AdminSite()
admin_register_all(site)
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(
request,
model,
list_display,
list_display_links,
m.list_filter,
m.date_hierarchy,
m.search_fields,
m.list_select_related,
m.list_per_page,
m.list_max_show_all,
m.list_editable,
m,
None,
)
cl.formset = None
context = Context({"cl": cl, "request": request})
table_output = self.template.render(context)
output_template = "opener.dismissRelatedLookupPopup(window, '%s');"
for object in model.objects.all():
expected_output = output_template % object.pk
assert expected_output in table_output
@pytest.mark.django_db
class TestTreeAdmin(TestNonEmptyTree):
site = AdminSite()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = user
return request
def _mocked_request(self, data):
request_factory = RequestFactory()
request = request_factory.post("/", data=data)
setattr(request, "session", "session")
messages = FallbackStorage(request)
setattr(request, "_messages", messages)
return request
def _get_admin_obj(self, model_class):
form_class = movenodeform_factory(model_class)
admin_class = admin_factory(form_class)
return admin_class(model_class, self.site)
def test_changelist_view(self):
tmp_user = self._create_superuser("changelist_tmp")
request = self._mocked_authenticated_request("/", tmp_user)
admin_obj = self._get_admin_obj(models.AL_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template == "admin/tree_list.html"
admin_obj = self._get_admin_obj(models.MP_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template != "admin/tree_list.html"
def test_get_node(self, model):
admin_obj = self._get_admin_obj(model)
target = model.objects.get(desc="2")
assert admin_obj.get_node(target.pk) == target
def test_move_node_validate_keyerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.move_node(request)
assert response.status_code == 400
request = self._mocked_request(data={"node_id": 1})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_node_validate_valueerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(
data={"node_id": 1, "sibling_id": 2, "as_child": "invalid"}
)
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_validate_missing_nodeorderby(self, model):
node = model.objects.get(desc="231")
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(
True, node, "sorted-child", request, target=node
)
assert response.status_code == 400
response = admin_obj.try_to_move_node(
True, node, "sorted-sibling", request, target=node
)
assert response.status_code == 400
def test_move_validate_invalid_pos(self, model):
node = model.objects.get(desc="231")
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(
True, node, "invalid_pos", request, target=node
)
assert response.status_code == 400
def test_move_validate_to_descendant(self, model):
node = model.objects.get(desc="2")
target = model.objects.get(desc="231")
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(
True, node, "first-sibling", request, target
)
assert response.status_code == 400
def test_move_left(self, model):
node = model.objects.get(desc="231")
target = model.objects.get(desc="2")
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(
data={"node_id": node.pk, "sibling_id": target.pk, "as_child": 0}
)
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [
("1", 1, 0),
("231", 1, 0),
("2", 1, 4),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
def test_move_last_child(self, model):
node = model.objects.get(desc="231")
target = model.objects.get(desc="2")
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(
data={"node_id": node.pk, "sibling_id": target.pk, "as_child": 1}
)
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [
("1", 1, 0),
("2", 1, 5),
("21", 2, 0),
("22", 2, 0),
("23", 2, 0),
("24", 2, 0),
("231", 2, 0),
("3", 1, 0),
("4", 1, 1),
("41", 2, 0),
]
assert self.got(model) == expected
@pytest.mark.django_db
class TestMPFormPerformance(object):
def test_form_add_subtree_no_of_queries(self, django_assert_num_queries):
model = models.MP_TestNode
model.load_bulk(BASE_DATA)
form_class = movenodeform_factory(model)
form = form_class()
with django_assert_num_queries(len(model.get_root_nodes()) + 1):
form.mk_dropdown_tree(model)
| []
| []
| [
"TREEBEARD_TEST_ALPHABET"
]
| [] | ["TREEBEARD_TEST_ALPHABET"] | python | 1 | 0 | |
test/e2e/aws_ec2_test.go | package e2e_test
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/solo-io/gloo/test/helpers"
"github.com/solo-io/solo-kit/pkg/api/v1/resources"
"github.com/aws/aws-sdk-go/aws/credentials"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/rotisserie/eris"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
glooec2 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/aws/ec2"
"github.com/solo-io/gloo/projects/gloo/pkg/defaults"
"github.com/solo-io/gloo/test/services"
"github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
)
/*
# Configure an EC2 instance for this test
- Do this if this test ever starts to fail because the EC2 instance that it tests against has become unavailable.
- Provision an EC2 instance
- Use an "amazon linux" image
- Configure the security group to allow http traffic on port 80
- Tag your instance with the following tags
- svc: worldwide-hello
- Set up your EC2 instance
- ssh into your instance
- download a demo app: an http response code echo app
- this app responds to requests with the corresponding response code
- ex: http://<my-instance-ip>/?code=404 produces a `404` response
- make the app executable
- run it in the background
```bash
wget https://mitch-solo-public.s3.amazonaws.com/echoapp2
chmod +x echoapp2
sudo ./echoapp2 --port 80 &
```
- Note: other dummy webservers will work fine - you may just need to update the path of the request
- Currently, we call the /metrics path during our tests
- Verify that you can reach the app
- `curl` the app, you should see a help menu for the app
```bash
curl http://<instance-public-ip>/
```
*/
var _ = Describe("AWS EC2 Plugin utils test", func() {
const region = "us-east-1"
var (
ctx context.Context
cancel context.CancelFunc
testClients services.TestClients
envoyInstance *services.EnvoyInstance
secret *gloov1.Secret
upstream *gloov1.Upstream
roleArn string
)
addCredentials := func() {
localAwsCredentials := credentials.NewSharedCredentials("", "")
v, err := localAwsCredentials.Get()
if err != nil {
Skip("no AWS creds available")
}
// role arn format: "arn:aws:iam::[account_number]:role/[role_name]"
roleArn = os.Getenv("AWS_ARN_ROLE_1")
if roleArn == "" {
Skip("no AWS role ARN available")
}
var opts clients.WriteOpts
accessKey := v.AccessKeyID
secretKey := v.SecretAccessKey
secret = &gloov1.Secret{
Metadata: &core.Metadata{
Namespace: "default",
Name: region,
},
Kind: &gloov1.Secret_Aws{
Aws: &gloov1.AwsSecret{
AccessKey: accessKey,
SecretKey: secretKey,
},
},
}
_, err = testClients.SecretClient.Write(secret, opts)
Expect(err).NotTo(HaveOccurred())
}
addUpstream := func() {
secretRef := secret.Metadata.Ref()
upstream = &gloov1.Upstream{
Metadata: &core.Metadata{
Namespace: "default",
Name: region,
},
UpstreamType: &gloov1.Upstream_AwsEc2{
AwsEc2: &glooec2.UpstreamSpec{
Region: region,
SecretRef: secretRef,
RoleArn: roleArn,
Filters: []*glooec2.TagFilter{
{
Spec: &glooec2.TagFilter_KvPair_{
KvPair: &glooec2.TagFilter_KvPair{
Key: "svc",
Value: "worldwide-hello",
},
},
},
},
PublicIp: true,
Port: 80,
},
},
}
var opts clients.WriteOpts
_, err := testClients.UpstreamClient.Write(upstream, opts)
Expect(err).NotTo(HaveOccurred())
}
validateUrl := func(url, substring string) {
Eventually(func() (string, error) {
res, err := http.Get(url)
if err != nil {
return "", eris.Wrapf(err, "unable to call GET")
}
if res.StatusCode != http.StatusOK {
return "", eris.New(fmt.Sprintf("%v is not OK", res.StatusCode))
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", eris.Wrapf(err, "unable to read body")
}
return string(body), nil
}, "10s", "1s").Should(ContainSubstring(substring))
}
validateEc2Endpoint := func(envoyPort uint32, substring string) {
// first make sure that the instance is ready (to avoid false negatives)
By("verifying instance is ready - if this failed, you may need to restart the EC2 instance")
// Stitch the url together to avoid bot spam
// The IP address corresponds to the public ip of an EC2 instance managed by Solo.io for the purpose of
// verifying that the EC2 upstream works as expected.
// The port is where the app listens for connections. The instance has been configured with an inbound traffic
// rule that allows port 80.
// TODO[test enhancement] - create an EC2 instance on demand (or auto-skip the test) if the expected instance is unavailable
// See notes in the header of this file for instructions on how to restore the instance
ec2Port := 80
// This is an Elastic IP in us-east-1 and can be reassigned if the instance ever goes down
ec2Url := fmt.Sprintf("http://%v:%v/metrics", strings.Join([]string{"100", "24", "224", "6"}, "."), ec2Port)
validateUrl(ec2Url, substring)
// do the actual verification
By("verifying Gloo has routed to the instance")
gatewayUrl := fmt.Sprintf("http://%v:%v/metrics", "localhost", envoyPort)
validateUrl(gatewayUrl, substring)
}
AfterEach(func() {
if envoyInstance != nil {
_ = envoyInstance.Clean()
}
cancel()
})
// NOTE: you need to configure EC2 instances before running this
It("be able to call upstream function", func() {
err := envoyInstance.RunWithRoleAndRestXds(services.DefaultProxyName, testClients.GlooPort, testClients.RestXdsPort)
Expect(err).NotTo(HaveOccurred())
proxy := &gloov1.Proxy{
Metadata: &core.Metadata{
Name: "proxy",
Namespace: "default",
},
Listeners: []*gloov1.Listener{{
Name: "listener",
BindAddress: "::",
BindPort: defaults.HttpPort,
ListenerType: &gloov1.Listener_HttpListener{
HttpListener: &gloov1.HttpListener{
VirtualHosts: []*gloov1.VirtualHost{{
Name: "virt1",
Domains: []string{"*"},
Routes: []*gloov1.Route{{
Action: &gloov1.Route_RouteAction{
RouteAction: &gloov1.RouteAction{
Destination: &gloov1.RouteAction_Single{
Single: &gloov1.Destination{
DestinationType: &gloov1.Destination_Upstream{
Upstream: upstream.Metadata.Ref(),
},
},
},
},
},
}},
}},
},
},
}},
}
var opts clients.WriteOpts
_, err = testClients.ProxyClient.Write(proxy, opts)
Expect(err).NotTo(HaveOccurred())
helpers.EventuallyResourceAccepted(func() (resources.InputResource, error) {
return testClients.ProxyClient.Read(proxy.Metadata.Namespace, proxy.Metadata.Name, clients.ReadOpts{})
})
validateEc2Endpoint(defaults.HttpPort, "Counts")
})
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
defaults.HttpPort = services.NextBindPort()
defaults.HttpsPort = services.NextBindPort()
testClients = services.RunGateway(ctx, false)
var err error
envoyInstance, err = envoyFactory.NewEnvoyInstance()
Expect(err).NotTo(HaveOccurred())
addCredentials()
addUpstream()
})
})
| [
"\"AWS_ARN_ROLE_1\""
]
| []
| [
"AWS_ARN_ROLE_1"
]
| [] | ["AWS_ARN_ROLE_1"] | go | 1 | 0 | |
common/src/main/java/io/netty/util/internal/PlatformDependent.java | /*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.util.internal;
import io.netty.util.CharsetUtil;
import io.netty.util.internal.chmv8.ConcurrentHashMapV8;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Utility that detects various properties specific to the current runtime
* environment, such as Java version and the availability of the
* {@code sun.misc.Unsafe} object.
* <p>
* You can disable the use of {@code sun.misc.Unsafe} if you specify
* the system property <strong>io.netty.noUnsafe</strong>.
*/
public final class PlatformDependent {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PlatformDependent.class);
private static final Pattern MAX_DIRECT_MEMORY_SIZE_ARG_PATTERN = Pattern.compile(
"\\s*-XX:MaxDirectMemorySize\\s*=\\s*([0-9]+)\\s*([kKmMgG]?)\\s*$");
private static final boolean IS_ANDROID = isAndroid0();
private static final boolean IS_WINDOWS = isWindows0();
private static volatile Boolean IS_ROOT;
private static final int JAVA_VERSION = javaVersion0();
private static final boolean CAN_ENABLE_TCP_NODELAY_BY_DEFAULT = !isAndroid();
private static final boolean HAS_UNSAFE = hasUnsafe0();
private static final boolean CAN_USE_CHM_V8 = HAS_UNSAFE && JAVA_VERSION < 8;
private static final boolean DIRECT_BUFFER_PREFERRED =
HAS_UNSAFE && !SystemPropertyUtil.getBoolean("io.netty.noPreferDirect", false);
private static final long MAX_DIRECT_MEMORY = maxDirectMemory0();
private static final long ARRAY_BASE_OFFSET = PlatformDependent0.arrayBaseOffset();
private static final boolean HAS_JAVASSIST = hasJavassist0();
private static final File TMPDIR = tmpdir0();
private static final int BIT_MODE = bitMode0();
private static final int ADDRESS_SIZE = addressSize0();
static {
if (logger.isDebugEnabled()) {
logger.debug("-Dio.netty.noPreferDirect: {}", !DIRECT_BUFFER_PREFERRED);
}
if (!hasUnsafe() && !isAndroid()) {
logger.info(
"Your platform does not provide complete low-level API for accessing direct buffers reliably. " +
"Unless explicitly requested, heap buffer will always be preferred to avoid potential system " +
"unstability.");
}
}
/**
* Returns {@code true} if and only if the current platform is Android
*/
public static boolean isAndroid() {
return IS_ANDROID;
}
/**
* Return {@code true} if the JVM is running on Windows
*/
public static boolean isWindows() {
return IS_WINDOWS;
}
/**
* Return {@code true} if the current user is root. Note that this method returns
* {@code false} if on Windows.
*/
public static boolean isRoot() {
if (IS_ROOT == null) {
synchronized (PlatformDependent.class) {
if (IS_ROOT == null) {
IS_ROOT = isRoot0();
}
}
}
return IS_ROOT;
}
/**
* Return the version of Java under which this library is used.
*/
public static int javaVersion() {
return JAVA_VERSION;
}
/**
* Returns {@code true} if and only if it is fine to enable TCP_NODELAY socket option by default.
*/
public static boolean canEnableTcpNoDelayByDefault() {
return CAN_ENABLE_TCP_NODELAY_BY_DEFAULT;
}
/**
* Return {@code true} if {@code sun.misc.Unsafe} was found on the classpath and can be used for acclerated
* direct memory access.
*/
public static boolean hasUnsafe() {
return HAS_UNSAFE;
}
/**
* Returns {@code true} if the platform has reliable low-level direct buffer access API and a user specified
* {@code -Dio.netty.preferDirect} option.
*/
public static boolean directBufferPreferred() {
return DIRECT_BUFFER_PREFERRED;
}
/**
* Returns the maximum memory reserved for direct buffer allocation.
*/
public static long maxDirectMemory() {
return MAX_DIRECT_MEMORY;
}
/**
* Returns {@code true} if and only if Javassist is available.
*/
public static boolean hasJavassist() {
return HAS_JAVASSIST;
}
/**
* Returns the temporary directory.
*/
public static File tmpdir() {
return TMPDIR;
}
/**
* Returns the bit mode of the current VM (usually 32 or 64.)
*/
public static int bitMode() {
return BIT_MODE;
}
/**
* Return the address size of the OS.
* 4 (for 32 bits systems ) and 8 (for 64 bits systems).
*/
public static int addressSize() {
return ADDRESS_SIZE;
}
public static long allocateMemory(long size) {
return PlatformDependent0.allocateMemory(size);
}
public static void freeMemory(long address) {
PlatformDependent0.freeMemory(address);
}
/**
* Raises an exception bypassing compiler checks for checked exceptions.
*/
public static void throwException(Throwable t) {
if (hasUnsafe()) {
PlatformDependent0.throwException(t);
} else {
PlatformDependent.<RuntimeException>throwException0(t);
}
}
@SuppressWarnings("unchecked")
private static <E extends Throwable> void throwException0(Throwable t) throws E {
throw (E) t;
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap() {
if (CAN_USE_CHM_V8) {
return new ConcurrentHashMapV8<K, V>();
} else {
return new ConcurrentHashMap<K, V>();
}
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(int initialCapacity) {
if (CAN_USE_CHM_V8) {
return new ConcurrentHashMapV8<K, V>(initialCapacity);
} else {
return new ConcurrentHashMap<K, V>(initialCapacity);
}
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(int initialCapacity, float loadFactor) {
if (CAN_USE_CHM_V8) {
return new ConcurrentHashMapV8<K, V>(initialCapacity, loadFactor);
} else {
return new ConcurrentHashMap<K, V>(initialCapacity, loadFactor);
}
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(
int initialCapacity, float loadFactor, int concurrencyLevel) {
if (CAN_USE_CHM_V8) {
return new ConcurrentHashMapV8<K, V>(initialCapacity, loadFactor, concurrencyLevel);
} else {
return new ConcurrentHashMap<K, V>(initialCapacity, loadFactor, concurrencyLevel);
}
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(Map<? extends K, ? extends V> map) {
if (CAN_USE_CHM_V8) {
return new ConcurrentHashMapV8<K, V>(map);
} else {
return new ConcurrentHashMap<K, V>(map);
}
}
/**
* Try to deallocate the specified direct {@link ByteBuffer}. Please note this method does nothing if
* the current platform does not support this operation or the specified buffer is not a direct buffer.
*/
public static void freeDirectBuffer(ByteBuffer buffer) {
if (hasUnsafe() && !isAndroid()) {
// only direct to method if we are not running on android.
// See https://github.com/netty/netty/issues/2604
PlatformDependent0.freeDirectBuffer(buffer);
}
}
public static long directBufferAddress(ByteBuffer buffer) {
return PlatformDependent0.directBufferAddress(buffer);
}
public static Object getObject(Object object, long fieldOffset) {
return PlatformDependent0.getObject(object, fieldOffset);
}
public static Object getObjectVolatile(Object object, long fieldOffset) {
return PlatformDependent0.getObjectVolatile(object, fieldOffset);
}
public static int getInt(Object object, long fieldOffset) {
return PlatformDependent0.getInt(object, fieldOffset);
}
public static long objectFieldOffset(Field field) {
return PlatformDependent0.objectFieldOffset(field);
}
public static byte getByte(long address) {
return PlatformDependent0.getByte(address);
}
public static short getShort(long address) {
return PlatformDependent0.getShort(address);
}
public static int getInt(long address) {
return PlatformDependent0.getInt(address);
}
public static long getLong(long address) {
return PlatformDependent0.getLong(address);
}
public static void putOrderedObject(Object object, long address, Object value) {
PlatformDependent0.putOrderedObject(object, address, value);
}
public static void putByte(long address, byte value) {
PlatformDependent0.putByte(address, value);
}
public static void putShort(long address, short value) {
PlatformDependent0.putShort(address, value);
}
public static void putInt(long address, int value) {
PlatformDependent0.putInt(address, value);
}
public static void putLong(long address, long value) {
PlatformDependent0.putLong(address, value);
}
public static void copyMemory(long srcAddr, long dstAddr, long length) {
PlatformDependent0.copyMemory(srcAddr, dstAddr, length);
}
public static void copyMemory(byte[] src, int srcIndex, long dstAddr, long length) {
PlatformDependent0.copyMemory(src, ARRAY_BASE_OFFSET + srcIndex, null, dstAddr, length);
}
public static void copyMemory(long srcAddr, byte[] dst, int dstIndex, long length) {
PlatformDependent0.copyMemory(null, srcAddr, dst, ARRAY_BASE_OFFSET + dstIndex, length);
}
/**
* Compare two {@code byte} arrays for equality. For performance reasons no bounds checking on the
* parameters is performed.
*
* @param bytes1 the first byte array.
* @param startPos1 the position (inclusive) to start comparing in {@code bytes1}.
* @param endPos1 the position (exclusive) to stop comparing in {@code bytes1}.
* @param bytes2 the second byte array.
* @param startPos2 the position (inclusive) to start comparing in {@code bytes2}.
* @param endPos2 the position (exclusive) to stop comparing in {@code bytes2}.
*/
public static boolean equals(byte[] bytes1, int startPos1, int endPos1, byte[] bytes2, int startPos2, int endPos2) {
if (!hasUnsafe() || !PlatformDependent0.unalignedAccess()) {
return safeEquals(bytes1, startPos1, endPos1, bytes2, startPos2, endPos2);
}
return PlatformDependent0.equals(bytes1, startPos1, endPos1, bytes2, startPos2, endPos2);
}
/**
* Create a new optimized {@link AtomicReferenceFieldUpdater} or {@code null} if it
* could not be created. Because of this the caller need to check for {@code null} and if {@code null} is returned
* use {@link AtomicReferenceFieldUpdater#newUpdater(Class, Class, String)} as fallback.
*/
public static <U, W> AtomicReferenceFieldUpdater<U, W> newAtomicReferenceFieldUpdater(
Class<U> tclass, String fieldName) {
if (hasUnsafe()) {
try {
return PlatformDependent0.newAtomicReferenceFieldUpdater(tclass, fieldName);
} catch (Throwable ignore) {
// ignore
}
}
return null;
}
/**
* Create a new optimized {@link AtomicIntegerFieldUpdater} or {@code null} if it
* could not be created. Because of this the caller need to check for {@code null} and if {@code null} is returned
* use {@link AtomicIntegerFieldUpdater#newUpdater(Class, String)} as fallback.
*/
public static <T> AtomicIntegerFieldUpdater<T> newAtomicIntegerFieldUpdater(
Class<?> tclass, String fieldName) {
if (hasUnsafe()) {
try {
return PlatformDependent0.newAtomicIntegerFieldUpdater(tclass, fieldName);
} catch (Throwable ignore) {
// ignore
}
}
return null;
}
/**
* Create a new optimized {@link AtomicLongFieldUpdater} or {@code null} if it
* could not be created. Because of this the caller need to check for {@code null} and if {@code null} is returned
* use {@link AtomicLongFieldUpdater#newUpdater(Class, String)} as fallback.
*/
public static <T> AtomicLongFieldUpdater<T> newAtomicLongFieldUpdater(
Class<?> tclass, String fieldName) {
if (hasUnsafe()) {
try {
return PlatformDependent0.newAtomicLongFieldUpdater(tclass, fieldName);
} catch (Throwable ignore) {
// ignore
}
}
return null;
}
/**
* Create a new {@link Queue} which is safe to use for multiple producers (different threads) and a single
* consumer (one thread!).
*/
public static <T> Queue<T> newMpscQueue() {
return new MpscLinkedQueue<T>();
}
/**
* Return the {@link ClassLoader} for the given {@link Class}.
*/
public static ClassLoader getClassLoader(final Class<?> clazz) {
return PlatformDependent0.getClassLoader(clazz);
}
/**
* Return the context {@link ClassLoader} for the current {@link Thread}.
*/
public static ClassLoader getContextClassLoader() {
return PlatformDependent0.getContextClassLoader();
}
/**
* Return the system {@link ClassLoader}.
*/
public static ClassLoader getSystemClassLoader() {
return PlatformDependent0.getSystemClassLoader();
}
private static boolean isAndroid0() {
boolean android;
try {
Class.forName("android.app.Application", false, getSystemClassLoader());
android = true;
} catch (Exception e) {
// Failed to load the class uniquely available in Android.
android = false;
}
if (android) {
logger.debug("Platform: Android");
}
return android;
}
private static boolean isWindows0() {
boolean windows = SystemPropertyUtil.get("os.name", "").toLowerCase(Locale.US).contains("win");
if (windows) {
logger.debug("Platform: Windows");
}
return windows;
}
private static boolean isRoot0() {
if (isWindows()) {
return false;
}
String[] ID_COMMANDS = { "/usr/bin/id", "/bin/id", "/usr/xpg4/bin/id", "id"};
Pattern UID_PATTERN = Pattern.compile("^(?:0|[1-9][0-9]*)$");
for (String idCmd: ID_COMMANDS) {
Process p = null;
BufferedReader in = null;
String uid = null;
try {
p = Runtime.getRuntime().exec(new String[] { idCmd, "-u" });
in = new BufferedReader(new InputStreamReader(p.getInputStream(), CharsetUtil.US_ASCII));
uid = in.readLine();
in.close();
for (;;) {
try {
int exitCode = p.waitFor();
if (exitCode != 0) {
uid = null;
}
break;
} catch (InterruptedException e) {
// Ignore
}
}
} catch (Exception e) {
// Failed to run the command.
uid = null;
} finally {
if (in != null) {
try {
in.close();
} catch (IOException e) {
// Ignore
}
}
if (p != null) {
try {
p.destroy();
} catch (Exception e) {
// Android sometimes triggers an ErrnoException.
}
}
}
if (uid != null && UID_PATTERN.matcher(uid).matches()) {
logger.debug("UID: {}", uid);
return "0".equals(uid);
}
}
logger.debug("Could not determine the current UID using /usr/bin/id; attempting to bind at privileged ports.");
Pattern PERMISSION_DENIED = Pattern.compile(".*(?:denied|not.*permitted).*");
for (int i = 1023; i > 0; i --) {
ServerSocket ss = null;
try {
ss = new ServerSocket();
ss.setReuseAddress(true);
ss.bind(new InetSocketAddress(i));
if (logger.isDebugEnabled()) {
logger.debug("UID: 0 (succeded to bind at port {})", i);
}
return true;
} catch (Exception e) {
// Failed to bind.
// Check the error message so that we don't always need to bind 1023 times.
String message = e.getMessage();
if (message == null) {
message = "";
}
message = message.toLowerCase();
if (PERMISSION_DENIED.matcher(message).matches()) {
break;
}
} finally {
if (ss != null) {
try {
ss.close();
} catch (Exception e) {
// Ignore.
}
}
}
}
logger.debug("UID: non-root (failed to bind at any privileged ports)");
return false;
}
@SuppressWarnings("LoopStatementThatDoesntLoop")
private static int javaVersion0() {
int javaVersion;
// Not really a loop
for (;;) {
// Android
if (isAndroid()) {
javaVersion = 6;
break;
}
try {
Class.forName("java.time.Clock", false, getClassLoader(Object.class));
javaVersion = 8;
break;
} catch (Exception e) {
// Ignore
}
try {
Class.forName("java.util.concurrent.LinkedTransferQueue", false, getClassLoader(BlockingQueue.class));
javaVersion = 7;
break;
} catch (Exception e) {
// Ignore
}
javaVersion = 6;
break;
}
if (logger.isDebugEnabled()) {
logger.debug("Java version: {}", javaVersion);
}
return javaVersion;
}
private static boolean hasUnsafe0() {
boolean noUnsafe = SystemPropertyUtil.getBoolean("io.netty.noUnsafe", false);
logger.debug("-Dio.netty.noUnsafe: {}", noUnsafe);
if (isAndroid()) {
logger.debug("sun.misc.Unsafe: unavailable (Android)");
return false;
}
if (noUnsafe) {
logger.debug("sun.misc.Unsafe: unavailable (io.netty.noUnsafe)");
return false;
}
// Legacy properties
boolean tryUnsafe;
if (SystemPropertyUtil.contains("io.netty.tryUnsafe")) {
tryUnsafe = SystemPropertyUtil.getBoolean("io.netty.tryUnsafe", true);
} else {
tryUnsafe = SystemPropertyUtil.getBoolean("org.jboss.netty.tryUnsafe", true);
}
if (!tryUnsafe) {
logger.debug("sun.misc.Unsafe: unavailable (io.netty.tryUnsafe/org.jboss.netty.tryUnsafe)");
return false;
}
try {
boolean hasUnsafe = PlatformDependent0.hasUnsafe();
logger.debug("sun.misc.Unsafe: {}", hasUnsafe ? "available" : "unavailable");
return hasUnsafe;
} catch (Throwable t) {
// Probably failed to initialize PlatformDependent0.
return false;
}
}
private static long maxDirectMemory0() {
long maxDirectMemory = 0;
try {
// Try to get from sun.misc.VM.maxDirectMemory() which should be most accurate.
Class<?> vmClass = Class.forName("sun.misc.VM", true, getSystemClassLoader());
Method m = vmClass.getDeclaredMethod("maxDirectMemory");
maxDirectMemory = ((Number) m.invoke(null)).longValue();
} catch (Throwable t) {
// Ignore
}
if (maxDirectMemory > 0) {
return maxDirectMemory;
}
try {
// Now try to get the JVM option (-XX:MaxDirectMemorySize) and parse it.
// Note that we are using reflection because Android doesn't have these classes.
Class<?> mgmtFactoryClass = Class.forName(
"java.lang.management.ManagementFactory", true, getSystemClassLoader());
Class<?> runtimeClass = Class.forName(
"java.lang.management.RuntimeMXBean", true, getSystemClassLoader());
Object runtime = mgmtFactoryClass.getDeclaredMethod("getRuntimeMXBean").invoke(null);
@SuppressWarnings("unchecked")
List<String> vmArgs = (List<String>) runtimeClass.getDeclaredMethod("getInputArguments").invoke(runtime);
for (int i = vmArgs.size() - 1; i >= 0; i --) {
Matcher m = MAX_DIRECT_MEMORY_SIZE_ARG_PATTERN.matcher(vmArgs.get(i));
if (!m.matches()) {
continue;
}
maxDirectMemory = Long.parseLong(m.group(1));
switch (m.group(2).charAt(0)) {
case 'k': case 'K':
maxDirectMemory *= 1024;
break;
case 'm': case 'M':
maxDirectMemory *= 1024 * 1024;
break;
case 'g': case 'G':
maxDirectMemory *= 1024 * 1024 * 1024;
break;
}
break;
}
} catch (Throwable t) {
// Ignore
}
if (maxDirectMemory <= 0) {
maxDirectMemory = Runtime.getRuntime().maxMemory();
logger.debug("maxDirectMemory: {} bytes (maybe)", maxDirectMemory);
} else {
logger.debug("maxDirectMemory: {} bytes", maxDirectMemory);
}
return maxDirectMemory;
}
private static boolean hasJavassist0() {
if (isAndroid()) {
return false;
}
boolean noJavassist = SystemPropertyUtil.getBoolean("io.netty.noJavassist", false);
logger.debug("-Dio.netty.noJavassist: {}", noJavassist);
if (noJavassist) {
logger.debug("Javassist: unavailable (io.netty.noJavassist)");
return false;
}
try {
JavassistTypeParameterMatcherGenerator.generate(Object.class, getClassLoader(PlatformDependent.class));
logger.debug("Javassist: available");
return true;
} catch (Throwable t) {
// Failed to generate a Javassist-based matcher.
logger.debug("Javassist: unavailable");
logger.debug(
"You don't have Javassist in your class path or you don't have enough permission " +
"to load dynamically generated classes. Please check the configuration for better performance.");
return false;
}
}
private static File tmpdir0() {
File f;
try {
f = toDirectory(SystemPropertyUtil.get("io.netty.tmpdir"));
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {}", f);
return f;
}
f = toDirectory(SystemPropertyUtil.get("java.io.tmpdir"));
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {} (java.io.tmpdir)", f);
return f;
}
// This shouldn't happen, but just in case ..
if (isWindows()) {
f = toDirectory(System.getenv("TEMP"));
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {} (%TEMP%)", f);
return f;
}
String userprofile = System.getenv("USERPROFILE");
if (userprofile != null) {
f = toDirectory(userprofile + "\\AppData\\Local\\Temp");
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {} (%USERPROFILE%\\AppData\\Local\\Temp)", f);
return f;
}
f = toDirectory(userprofile + "\\Local Settings\\Temp");
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {} (%USERPROFILE%\\Local Settings\\Temp)", f);
return f;
}
}
} else {
f = toDirectory(System.getenv("TMPDIR"));
if (f != null) {
logger.debug("-Dio.netty.tmpdir: {} ($TMPDIR)", f);
return f;
}
}
} catch (Exception ignored) {
// Environment variable inaccessible
}
// Last resort.
if (isWindows()) {
f = new File("C:\\Windows\\Temp");
} else {
f = new File("/tmp");
}
logger.warn("Failed to get the temporary directory; falling back to: {}", f);
return f;
}
@SuppressWarnings("ResultOfMethodCallIgnored")
private static File toDirectory(String path) {
if (path == null) {
return null;
}
File f = new File(path);
f.mkdirs();
if (!f.isDirectory()) {
return null;
}
try {
return f.getAbsoluteFile();
} catch (Exception ignored) {
return f;
}
}
private static int bitMode0() {
// Check user-specified bit mode first.
int bitMode = SystemPropertyUtil.getInt("io.netty.bitMode", 0);
if (bitMode > 0) {
logger.debug("-Dio.netty.bitMode: {}", bitMode);
return bitMode;
}
// And then the vendor specific ones which is probably most reliable.
bitMode = SystemPropertyUtil.getInt("sun.arch.data.model", 0);
if (bitMode > 0) {
logger.debug("-Dio.netty.bitMode: {} (sun.arch.data.model)", bitMode);
return bitMode;
}
bitMode = SystemPropertyUtil.getInt("com.ibm.vm.bitmode", 0);
if (bitMode > 0) {
logger.debug("-Dio.netty.bitMode: {} (com.ibm.vm.bitmode)", bitMode);
return bitMode;
}
// os.arch also gives us a good hint.
String arch = SystemPropertyUtil.get("os.arch", "").toLowerCase(Locale.US).trim();
if ("amd64".equals(arch) || "x86_64".equals(arch)) {
bitMode = 64;
} else if ("i386".equals(arch) || "i486".equals(arch) || "i586".equals(arch) || "i686".equals(arch)) {
bitMode = 32;
}
if (bitMode > 0) {
logger.debug("-Dio.netty.bitMode: {} (os.arch: {})", bitMode, arch);
}
// Last resort: guess from VM name and then fall back to most common 64-bit mode.
String vm = SystemPropertyUtil.get("java.vm.name", "").toLowerCase(Locale.US);
Pattern BIT_PATTERN = Pattern.compile("([1-9][0-9]+)-?bit");
Matcher m = BIT_PATTERN.matcher(vm);
if (m.find()) {
return Integer.parseInt(m.group(1));
} else {
return 64;
}
}
private static int addressSize0() {
if (!hasUnsafe()) {
return -1;
}
return PlatformDependent0.addressSize();
}
private static boolean safeEquals(byte[] bytes1, int startPos1, int endPos1,
byte[] bytes2, int startPos2, int endPos2) {
final int len1 = endPos1 - startPos1;
final int len2 = endPos2 - startPos2;
if (len1 != len2) {
return false;
}
for (int i = 0; i < len1; i++) {
if (bytes1[startPos1 + i] != bytes2[startPos2 + i]) {
return false;
}
}
return true;
}
private PlatformDependent() {
// only static method supported
}
}
| [
"\"TEMP\"",
"\"USERPROFILE\"",
"\"TMPDIR\""
]
| []
| [
"TMPDIR",
"USERPROFILE",
"TEMP"
]
| [] | ["TMPDIR", "USERPROFILE", "TEMP"] | java | 3 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'news_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/code.cloudfoundry.org/cli/command/v2/bind_service_command.go | package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
"code.cloudfoundry.org/cli/command/flag"
)
type BindServiceCommand struct {
RequiredArgs flag.BindServiceArgs `positional-args:"yes"`
ParametersAsJSON string `short:"c" description:"Valid JSON object containing service-specific configuration parameters, provided either in-line or in a file. For a list of supported configuration parameters, see documentation for the particular service offering."`
usage interface{} `usage:"CF_NAME bind-service APP_NAME SERVICE_INSTANCE [-c PARAMETERS_AS_JSON]\n\n Optionally provide service-specific configuration parameters in a valid JSON object in-line:\n\n CF_NAME bind-service APP_NAME SERVICE_INSTANCE -c '{\"name\":\"value\",\"name\":\"value\"}'\n\n Optionally provide a file containing service-specific configuration parameters in a valid JSON object. \n The path to the parameters file can be an absolute or relative path to a file.\n CF_NAME bind-service APP_NAME SERVICE_INSTANCE -c PATH_TO_FILE\n\n Example of valid JSON object:\n {\n \"permissions\": \"read-only\"\n }\n\nEXAMPLES:\n Linux/Mac:\n CF_NAME bind-service myapp mydb -c '{\"permissions\":\"read-only\"}'\n\n Windows Command Line:\n CF_NAME bind-service myapp mydb -c \"{\\\"permissions\\\":\\\"read-only\\\"}\"\n\n Windows PowerShell:\n CF_NAME bind-service myapp mydb -c '{\\\"permissions\\\":\\\"read-only\\\"}'\n\n CF_NAME bind-service myapp mydb -c ~/workspace/tmp/instance_config.json"`
relatedCommands interface{} `related_commands:"services"`
}
func (_ BindServiceCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ BindServiceCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
| [
"\"CF_TRACE\""
]
| []
| [
"CF_TRACE"
]
| [] | ["CF_TRACE"] | go | 1 | 0 | |
services/azblob/tests/utils_test.go | package tests
import (
"os"
"testing"
"github.com/google/uuid"
azblob "go.beyondstorage.io/services/azblob/v3"
ps "go.beyondstorage.io/v5/pairs"
"go.beyondstorage.io/v5/types"
)
func setupTest(t *testing.T) types.Storager {
t.Log("Setup test for azblob")
store, err := azblob.NewStorager(
ps.WithCredential(os.Getenv("STORAGE_AZBLOB_CREDENTIAL")),
ps.WithName(os.Getenv("STORAGE_AZBLOB_NAME")),
ps.WithEndpoint(os.Getenv("STORAGE_AZBLOB_ENDPOINT")),
ps.WithWorkDir("/"+uuid.New().String()+"/"),
azblob.WithStorageFeatures(azblob.StorageFeatures{
VirtualDir: true,
}),
)
if err != nil {
t.Errorf("new storager: %v", err)
}
return store
}
| [
"\"STORAGE_AZBLOB_CREDENTIAL\"",
"\"STORAGE_AZBLOB_NAME\"",
"\"STORAGE_AZBLOB_ENDPOINT\""
]
| []
| [
"STORAGE_AZBLOB_CREDENTIAL",
"STORAGE_AZBLOB_ENDPOINT",
"STORAGE_AZBLOB_NAME"
]
| [] | ["STORAGE_AZBLOB_CREDENTIAL", "STORAGE_AZBLOB_ENDPOINT", "STORAGE_AZBLOB_NAME"] | go | 3 | 0 | |
pkg/agent/runtime.go | package agent
import (
"bufio"
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"path"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/aylei/kubectl-debug/pkg/nsenter"
term "github.com/aylei/kubectl-debug/pkg/util"
containerd "github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
glog "github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/oci"
"github.com/containerd/containerd/pkg/progress"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/typeurl"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
dockerclient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/google/uuid"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
kubetype "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/remotecommand"
kubeletremote "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
)
type ContainerRuntimeScheme string
const (
DockerScheme ContainerRuntimeScheme = "docker"
ContainerdScheme ContainerRuntimeScheme = "containerd"
KubectlDebugNS string = "kctldbg"
K8NS string = "k8s.io"
)
type ContainerInfo struct {
Pid int64
MountDestinations []string
}
type RunConfig struct {
context context.Context
timeout time.Duration
idOfContainerToDebug string
image string
command []string
stdin io.Reader
stdout io.WriteCloser
stderr io.WriteCloser
tty bool
resize <-chan remotecommand.TerminalSize
clientHostName string
clientUserName string
verbosity int
audit bool
auditFifo string
auditShim []string
}
func (c *RunConfig) getContextWithTimeout() (context.Context, context.CancelFunc) {
return context.WithTimeout(c.context, c.timeout)
}
type ContainerRuntime interface {
PullImage(ctx context.Context, image string,
skipTLS bool, authStr string,
cfg RunConfig) error
ContainerInfo(ctx context.Context, cfg RunConfig) (ContainerInfo, error)
RunDebugContainer(cfg RunConfig) error
}
type DockerContainerRuntime struct {
client *dockerclient.Client
}
var DockerContainerRuntimeImplementsContainerRuntime ContainerRuntime = (*DockerContainerRuntime)(nil)
func (c *DockerContainerRuntime) PullImage(ctx context.Context,
image string, skipTLS bool, authStr string,
cfg RunConfig) error {
authBytes := base64.URLEncoding.EncodeToString([]byte(authStr))
out, err := c.client.ImagePull(ctx, image, types.ImagePullOptions{RegistryAuth: string(authBytes)})
if err != nil {
return err
}
defer out.Close()
// write pull progress to user
if cfg.verbosity > 0 {
term.DisplayJSONMessagesStream(out, cfg.stdout, 1, true, nil)
}
return nil
}
func (c *DockerContainerRuntime) ContainerInfo(ctx context.Context, cfg RunConfig) (ContainerInfo, error) {
var ret ContainerInfo
cntnr, err := c.client.ContainerInspect(ctx, cfg.idOfContainerToDebug)
if err != nil {
return ContainerInfo{}, err
}
ret.Pid = int64(cntnr.State.Pid)
for _, mount := range cntnr.Mounts {
ret.MountDestinations = append(ret.MountDestinations, mount.Destination)
}
return ret, nil
}
func (c *DockerContainerRuntime) RunDebugContainer(cfg RunConfig) error {
createdBody, err := c.CreateContainer(cfg)
if err != nil {
return err
}
if err := c.StartContainer(cfg, createdBody.ID); err != nil {
return err
}
defer c.CleanContainer(cfg, createdBody.ID)
cfg.stdout.Write([]byte("container created, open tty...\n\r"))
// from now on, should pipe stdin to the container and no long read stdin
// close(m.stopListenEOF)
return c.AttachToContainer(cfg, createdBody.ID)
}
func (c *DockerContainerRuntime) CreateContainer(cfg RunConfig) (*container.ContainerCreateCreatedBody, error) {
config := &container.Config{
Entrypoint: strslice.StrSlice(cfg.command),
Image: cfg.image,
Tty: true,
OpenStdin: true,
StdinOnce: true,
}
hostConfig := &container.HostConfig{
NetworkMode: container.NetworkMode(c.containerMode(cfg.idOfContainerToDebug)),
UsernsMode: container.UsernsMode(c.containerMode(cfg.idOfContainerToDebug)),
IpcMode: container.IpcMode(c.containerMode(cfg.idOfContainerToDebug)),
PidMode: container.PidMode(c.containerMode(cfg.idOfContainerToDebug)),
CapAdd: strslice.StrSlice([]string{"SYS_PTRACE", "SYS_ADMIN"}),
}
ctx, cancel := cfg.getContextWithTimeout()
defer cancel()
body, err := c.client.ContainerCreate(ctx, config, hostConfig, nil, "")
if err != nil {
return nil, err
}
return &body, nil
}
func (c *DockerContainerRuntime) containerMode(idOfCntnrToDbg string) string {
return fmt.Sprintf("container:%s", idOfCntnrToDbg)
}
// Run a new container, this container will join the network,
// mount, and pid namespace of the given container
func (c *DockerContainerRuntime) StartContainer(cfg RunConfig, id string) error {
ctx, cancel := cfg.getContextWithTimeout()
defer cancel()
err := c.client.ContainerStart(ctx, id, types.ContainerStartOptions{})
if err != nil {
return err
}
return nil
}
func (c *DockerContainerRuntime) CleanContainer(cfg RunConfig, id string) {
// cleanup procedure should use background context
ctx, cancel := context.WithTimeout(context.Background(), cfg.timeout)
defer cancel()
// wait the container gracefully exit
statusCh, errCh := c.client.ContainerWait(ctx, id, container.WaitConditionNotRunning)
var rmErr error
select {
case err := <-errCh:
if err != nil {
log.Println("error waiting container exit, kill with --force")
// timeout or error occurs, try force remove anywawy
rmErr = c.RmContainer(cfg, id, true)
}
case <-statusCh:
rmErr = c.RmContainer(cfg, id, false)
}
if rmErr != nil {
log.Printf("error remove container: %s \n", id)
} else if cfg.verbosity > 0 {
log.Printf("Debug session end, debug container %s removed", id)
}
}
func (c *DockerContainerRuntime) RmContainer(cfg RunConfig, id string, force bool) error {
// cleanup procedure should use background context
ctx, cancel := context.WithTimeout(context.Background(), cfg.timeout)
defer cancel()
return c.client.ContainerRemove(ctx, id,
types.ContainerRemoveOptions{
Force: true,
})
}
// AttachToContainer do `docker attach`. Blocks until container I/O complete
func (c *DockerContainerRuntime) AttachToContainer(cfg RunConfig, container string) error {
HandleResizing(cfg.resize, func(size remotecommand.TerminalSize) {
c.resizeContainerTTY(cfg, container, uint(size.Height), uint(size.Width))
})
opts := types.ContainerAttachOptions{
Stream: true,
Stdin: cfg.stdin != nil,
Stdout: cfg.stdout != nil,
Stderr: cfg.stderr != nil,
}
ctx, cancel := cfg.getContextWithTimeout()
defer cancel()
resp, err := c.client.ContainerAttach(ctx, container, opts)
if err != nil {
return err
}
defer resp.Close()
return c.holdHijackedConnection(cfg, resp)
}
func (c *DockerContainerRuntime) resizeContainerTTY(cfg RunConfig, id string, height, width uint) error {
ctx, cancel := cfg.getContextWithTimeout()
defer cancel()
return c.client.ContainerResize(ctx, id, types.ResizeOptions{
Height: height,
Width: width,
})
}
// holdHijackedConnection hold the HijackedResponse, redirect the inputStream to the connection, and redirect the response
// stream to stdout and stderr. NOTE: If needed, we could also add context in this function.
func (c *DockerContainerRuntime) holdHijackedConnection(cfg RunConfig, resp types.HijackedResponse) error {
receiveStdout := make(chan error)
if cfg.stdout != nil || cfg.stderr != nil {
go func() {
receiveStdout <- c.redirectResponseToOutputStream(cfg, resp.Reader)
}()
}
stdinDone := make(chan struct{})
go func() {
if cfg.stdin != nil {
io.Copy(resp.Conn, cfg.stdin)
}
resp.CloseWrite()
close(stdinDone)
}()
select {
case err := <-receiveStdout:
return err
case <-stdinDone:
if cfg.stdout != nil || cfg.stderr != nil {
return <-receiveStdout
}
}
return nil
}
func (c *DockerContainerRuntime) redirectResponseToOutputStream(cfg RunConfig, resp io.Reader) error {
var stdout io.Writer = cfg.stdout
if stdout == nil {
stdout = ioutil.Discard
}
var stderr io.Writer = cfg.stderr
if stderr == nil {
stderr = ioutil.Discard
}
var err error
if cfg.tty {
_, err = io.Copy(stdout, resp)
} else {
_, err = stdcopy.StdCopy(stdout, stderr, resp)
}
return err
}
type ContainerdContainerRuntime struct {
client *containerd.Client
image containerd.Image
}
var ContainerdContainerRuntimeImplementsContainerRuntime ContainerRuntime = (*ContainerdContainerRuntime)(nil)
var PushTracker = docker.NewInMemoryTracker()
type jobs struct {
name string
added map[digest.Digest]struct{}
descs []ocispec.Descriptor
mu sync.Mutex
resolved bool
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
func (j *jobs) jobs() []ocispec.Descriptor {
j.mu.Lock()
defer j.mu.Unlock()
var descs []ocispec.Descriptor
return append(descs, j.descs...)
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: map[digest.Digest]struct{}{},
}
}
type StatusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func Display(w io.Writer, statuses []StatusInfo, start time.Time) {
var total int64
for _, status := range statuses {
total += status.Offset
switch status.Status {
case "downloading", "uploading":
var bar progress.Bar
if status.Total > 0.0 {
bar = progress.Bar(float64(status.Offset) / float64(status.Total))
}
fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\r\n",
status.Ref,
status.Status,
bar,
progress.Bytes(status.Offset), progress.Bytes(status.Total))
case "resolving", "waiting":
bar := progress.Bar(0.0)
fmt.Fprintf(w, "%s:\t%s\t%40r\t\r\n",
status.Ref,
status.Status,
bar)
default:
bar := progress.Bar(1.0)
fmt.Fprintf(w, "%s:\t%s\t%40r\t\r\n",
status.Ref,
status.Status,
bar)
}
}
fmt.Fprintf(w, "elapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\r\n",
time.Since(start).Seconds(),
// TODO(stevvooe): These calculations are actually way off.
// Need to account for previously downloaded data. These
// will basically be right for a download the first time
// but will be skewed if restarting, as it includes the
// data into the start time before.
progress.Bytes(total),
progress.NewBytesPerSecond(total, time.Since(start)))
}
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.Writer) {
var (
ticker = time.NewTicker(100 * time.Millisecond)
fw = progress.NewWriter(out)
start = time.Now()
statuses = map[string]StatusInfo{}
done bool
)
defer ticker.Stop()
outer:
for {
select {
case <-ticker.C:
fw.Flush()
tw := tabwriter.NewWriter(fw, 1, 8, 1, ' ', 0)
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = StatusInfo{
Ref: ongoing.name,
Status: resolved,
}
keys := []string{ongoing.name}
activeSeen := map[string]struct{}{}
if !done {
active, err := cs.ListStatuses(ctx, "")
if err != nil {
glog.G(ctx).WithError(err).Error("active check failed")
continue
}
// update status of active entries!
for _, active := range active {
statuses[active.Ref] = StatusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
activeSeen[active.Ref] = struct{}{}
}
}
// now, update the items in jobs that are not in active
for _, j := range ongoing.jobs() {
key := remotes.MakeRefKey(ctx, j)
keys = append(keys, key)
if _, ok := activeSeen[key]; ok {
continue
}
status, ok := statuses[key]
if !done && (!ok || status.Status == "downloading") {
info, err := cs.Info(ctx, j.Digest)
if err != nil {
if !errdefs.IsNotFound(err) {
glog.G(ctx).WithError(err).Errorf("failed to get content info")
continue outer
} else {
statuses[key] = StatusInfo{
Ref: key,
Status: "waiting",
}
}
} else if info.CreatedAt.After(start) {
statuses[key] = StatusInfo{
Ref: key,
Status: "done",
Offset: info.Size,
Total: info.Size,
UpdatedAt: info.CreatedAt,
}
} else {
statuses[key] = StatusInfo{
Ref: key,
Status: "exists",
}
}
} else if done {
if ok {
if status.Status != "done" && status.Status != "exists" {
status.Status = "done"
statuses[key] = status
}
} else {
statuses[key] = StatusInfo{
Ref: key,
Status: "done",
}
}
}
}
var ordered []StatusInfo
for _, key := range keys {
ordered = append(ordered, statuses[key])
}
Display(tw, ordered, start)
tw.Flush()
if done {
fw.Flush()
return
}
case <-ctx.Done():
done = true // allow ui to update once more
}
}
}
func (c *ContainerdContainerRuntime) PullImage(
ctx context.Context, image string, skipTLS bool,
authStr string,
cfg RunConfig) error {
authStr, err := url.QueryUnescape(authStr)
if err != nil {
log.Printf("Failed to decode authStr: %v\r\n", err)
return err
}
ctx = namespaces.WithNamespace(ctx, KubectlDebugNS)
ongoing := newJobs(image)
pctx, stopProgress := context.WithCancel(ctx)
if cfg.verbosity > 0 {
progress := make(chan struct{})
go func() {
if cfg.stdout != nil {
// no progress bar, because it hides some debug logs
showProgress(pctx, ongoing, c.client.ContentStore(), cfg.stdout)
}
close(progress)
}()
}
rslvrOpts := docker.ResolverOptions{
Tracker: PushTracker,
}
rmtOpts := []containerd.RemoteOpt{
containerd.WithPullUnpack,
}
crds := strings.Split(authStr, ":")
if cfg.verbosity > 0 {
log.Printf("User name for pull : %v\r\n", crds[0])
}
var useCrds = len(crds) == 2
if useCrds || skipTLS {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: skipTLS,
},
ExpectContinueTimeout: 5 * time.Second,
}
rslvrOpts.Client = &http.Client{
Transport: tr,
}
if useCrds {
if cfg.verbosity > 0 {
log.Println("Setting credentials call back")
}
crdsClbck := func(host string) (string, string, error) {
if cfg.verbosity > 0 {
log.Printf("crdsClbck returning username: %v\r\n", crds[0])
}
return crds[0], crds[1], nil
}
authOpts := []docker.AuthorizerOpt{
docker.WithAuthClient(rslvrOpts.Client), docker.WithAuthCreds(crdsClbck),
}
rslvrOpts.Authorizer = docker.NewDockerAuthorizer(authOpts...)
}
rmtOpts = append(rmtOpts, containerd.WithResolver(docker.NewResolver(rslvrOpts)))
}
c.image, err = c.client.Pull(ctx, image, rmtOpts...)
stopProgress()
if err != nil {
log.Printf("Failed to download image: %v\r\n", err)
return err
}
return err
}
func (c *ContainerdContainerRuntime) ContainerInfo(
ctx context.Context, cfg RunConfig) (ContainerInfo, error) {
var ret ContainerInfo
ctx = namespaces.WithNamespace(ctx, K8NS)
cntnr, err := c.client.LoadContainer(ctx, cfg.idOfContainerToDebug)
if err != nil {
log.Printf("Failed to access target container %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return ContainerInfo{}, err
}
tsk, err := cntnr.Task(ctx, nil)
if err != nil {
log.Printf("Failed to get task of target container %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return ContainerInfo{}, err
}
pids, err := tsk.Pids(ctx)
if err != nil {
log.Printf("Failed to get pids of target container %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return ContainerInfo{}, err
}
info, err := cntnr.Info(ctx, containerd.WithoutRefreshedMetadata)
if err != nil {
log.Printf("Failed to load target container info %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return ContainerInfo{}, err
}
if cfg.verbosity > 0 {
log.Printf("Pids from target container: %+v\r\n", pids)
}
ret.Pid = int64(pids[0].Pid)
if info.Spec != nil && info.Spec.Value != nil {
v, err := typeurl.UnmarshalAny(info.Spec)
if err != nil {
log.Printf("Error unmarshalling spec for container %s : %v\r\n",
cfg.idOfContainerToDebug, err)
}
for _, mnt := range v.(*specs.Spec).Mounts {
ret.MountDestinations = append(
ret.MountDestinations, mnt.Destination)
fmt.Printf("%+v\r\n", mnt)
}
}
return ret, nil
}
const (
// netNSFormat is the format of network namespace of a process.
netNSFormat = "/proc/%v/ns/net"
// ipcNSFormat is the format of ipc namespace of a process.
ipcNSFormat = "/proc/%v/ns/ipc"
// utsNSFormat is the format of uts namespace of a process.
userNSFormat = "/proc/%v/ns/user"
// pidNSFormat is the format of pid namespace of a process.
pidNSFormat = "/proc/%v/ns/pid"
)
func GetNetworkNamespace(pid int64) string {
return fmt.Sprintf(netNSFormat, pid)
}
func GetIPCNamespace(pid int64) string {
return fmt.Sprintf(ipcNSFormat, pid)
}
func GetUserNamespace(pid int64) string {
return fmt.Sprintf(userNSFormat, pid)
}
func GetPIDNamespace(pid int64) string {
return fmt.Sprintf(pidNSFormat, pid)
}
func (c *ContainerdContainerRuntime) RunDebugContainer(cfg RunConfig) error {
defer c.client.Close()
uuid := uuid.New().String()
fifoNm := ""
if cfg.audit {
fifoDir, _ := path.Split(cfg.auditFifo)
err := os.MkdirAll(fifoDir, 0777)
if err != nil {
fmt.Printf("Failed to create directory for audit fifos, %v : %v\r\n", fifoDir, err)
return err
}
fifoNm = strings.ReplaceAll(cfg.auditFifo, "KCTLDBG-CONTAINER-ID", uuid)
if cfg.verbosity > 0 {
log.Printf("Creating fifo %v for receiving audit data.\r\n", fifoNm)
}
err = syscall.Mkfifo(fifoNm, 0600)
if err != nil {
fmt.Printf("Failed to create audit fifo %v : %v\r\n", fifoNm, err)
return err
} else {
defer os.Remove(fifoNm)
}
go func() {
log.Println("Audit read thread started.")
fl, rdErr := os.Open(fifoNm)
if rdErr != nil {
log.Printf("Audit read thread aborting. Failed to open fifo : %v\r\n", rdErr)
return
}
log.Println("Audit read thread started.")
defer fl.Close()
rdr := bufio.NewReader(fl)
var ln []byte
for {
ln, _, rdErr = rdr.ReadLine()
if rdErr != nil {
break
}
log.Printf("audit - user: %v debugee: %v exec: %v\r\n", cfg.clientUserName,
cfg.idOfContainerToDebug, string(ln))
}
if rdErr != nil {
if rdErr == io.EOF {
log.Printf("EOF reached while reading from %v. Audit read thread exiting.\r\n", fifoNm)
} else {
log.Printf("Error %v while reading from %v. Audit read thread exiting.\r\n", rdErr, fifoNm)
}
}
}()
}
// If audit, create thread for reading from fifo, defer clean up of thread
ctx := namespaces.WithNamespace(cfg.context, KubectlDebugNS)
var spcOpts []oci.SpecOpts
spcOpts = append(spcOpts, oci.WithImageConfig(c.image))
spcOpts = append(spcOpts, oci.WithPrivileged)
// if audit, build command vector array using shim + cfg.command
// Make sure to replace KCTLDBG-FIFO with the actual fifo path ( Or maybe that is done before we get this far )
if cfg.audit {
cmd := append([]string(nil), cfg.auditShim...)
for i, s := range cmd {
cmd[i] = strings.ReplaceAll(s, "KCTLDBG-FIFO", fifoNm)
}
cmd = append(cmd, cfg.command...)
spcOpts = append(spcOpts, oci.WithProcessArgs(cmd...))
} else {
spcOpts = append(spcOpts, oci.WithProcessArgs(cfg.command...))
}
spcOpts = append(spcOpts, oci.WithTTY)
// If fifo, make sure fifo is bind mounted
trgtInf, err := c.ContainerInfo(ctx, cfg)
if err != nil {
log.Printf("Failed to get a pid from target container %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return err
}
spcOpts = append(spcOpts, oci.WithLinuxNamespace(specs.LinuxNamespace{
Type: specs.NetworkNamespace,
Path: GetNetworkNamespace(trgtInf.Pid),
}))
if fifoNm != "" {
kbctlDbgMnt := specs.Mount{
Destination: fifoNm,
Source: fifoNm,
Type: "bind",
Options: []string{"bind", "rw"},
}
spcOpts = append(spcOpts, oci.WithMounts([]specs.Mount{kbctlDbgMnt}))
}
// 2020-04-21 d :
// Tried setting the user namespace without success.
// - If I just use WithLinuxNamespace and don't use WithUserNamespace
// then I get the error "User namespaces enabled, but no uid mappings found.: unknown"
// - If I then add WithUserNamespace I instead get the error
// "getting the final child's pid from pipe caused \"EOF\"": unknown"
//
// I examined one of our environments, checked available kubernetes settings it seems
// really all containers are sharing the host user namespace. I then stumbled on this
// https://kubernetes.io/blog/2018/07/18/11-ways-not-to-get-hacked/
// article which claims that Kubernetes doesn't provide a way to set up separate user
// namespaces for containers.
// Consequently am just going to comment this out for now.
// spcOpts = append(spcOpts, oci.WithLinuxNamespace(specs.LinuxNamespace{
// Type: specs.UserNamespace,
// Path: GetUserNamespace(trgtInf.Pid),
// }))
// spcOpts = append(spcOpts, oci.WithUserNamespace(0, 0, 1024))
spcOpts = append(spcOpts, oci.WithLinuxNamespace(specs.LinuxNamespace{
Type: specs.IPCNamespace,
Path: GetIPCNamespace(trgtInf.Pid),
}))
spcOpts = append(spcOpts, oci.WithLinuxNamespace(specs.LinuxNamespace{
Type: specs.PIDNamespace,
Path: GetPIDNamespace(trgtInf.Pid),
}))
cntnr, err := c.client.NewContainer(
ctx,
// Was using "dbg-[idOfContainerToDebug]" but this meant that you couldn't use multiple debug containers for the same debugee
// e.g. You couldn't have 1 running tcpdump and another one generating traffic.
uuid,
containerd.WithImage(c.image),
containerd.WithNewSnapshot("netshoot-snapshot-"+uuid, c.image), // Had hoped this would fix 2020/04/17 17:04:31 runtime.go:672: Failed to create container for debugging 3d4059893a086fc7c59991fde9835ac7e35b754cd017a300292af9c721a4e6b9 : rootfs absolute path is required but it did not
containerd.WithNewSpec(spcOpts...),
)
if cntnr != nil {
// Label the container so we have some idea of who created it and why
lbls := make(map[string]string)
lbls["ClientHostName"] = cfg.clientHostName
lbls["ClientUserName"] = cfg.clientUserName
lbls["IdOfDebuggee"] = cfg.idOfContainerToDebug
cntnr.SetLabels(ctx, lbls)
defer func() {
cdctx, ccancel := context.WithTimeout(context.Background(), cfg.timeout)
defer ccancel()
cdctx = namespaces.WithNamespace(cdctx, KubectlDebugNS)
cderr := cntnr.Delete(cdctx, containerd.WithSnapshotCleanup)
if cderr != nil {
log.Printf("Failed to delete container for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, cderr)
}
}()
}
if err != nil {
log.Printf("Failed to create container for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return err
}
var stdIo cio.Opt
if cfg.stderr == nil {
// 2020-04-21 d : Otherwise create fails with
// E0421 14:16:36.797876 24356 attach.go:54] error attaching to container: failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown
stdIo = cio.WithStreams(cfg.stdin, cfg.stdout, cfg.stdout)
} else {
stdIo = cio.WithStreams(cfg.stdin, cfg.stdout, cfg.stderr)
}
tsk, err := cntnr.NewTask(ctx,
cio.NewCreator(
stdIo,
cio.WithTerminal,
))
if tsk != nil {
defer func() {
tdctx, tcancel := context.WithTimeout(context.Background(), cfg.timeout)
defer tcancel()
tdctx = namespaces.WithNamespace(tdctx, KubectlDebugNS)
_, tderr := tsk.Delete(tdctx, containerd.WithProcessKill)
if tderr != nil {
log.Printf("Failed to delete task for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, tderr)
}
}()
}
if err != nil {
log.Printf("Failed to create task for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return err
}
exitStatusC, err := tsk.Wait(ctx)
if err != nil {
log.Printf("Failed to get exit channel for task for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return err
}
HandleResizing(cfg.resize, func(size remotecommand.TerminalSize) {
c.resizeContainerTTY(ctx, cfg.idOfContainerToDebug, tsk, size.Height,
size.Width)
})
if err := tsk.Start(ctx); err != nil {
return err
}
status := <-exitStatusC
_, _, err = status.Result()
if err != nil {
log.Printf("Failed to get exit status for task for debugging %s : %v\r\n",
cfg.idOfContainerToDebug, err)
return err
}
return nil
}
func (c *ContainerdContainerRuntime) resizeContainerTTY(ctx context.Context,
trgtId string, tsk containerd.Task, height, width uint16) error {
err := tsk.Resize(ctx, uint32(width), uint32(height))
if err != nil {
log.Printf("Failed to resize debugger task %+v for debuggee %+v : %+v\r\n",
tsk.Pid(), trgtId, err)
}
return nil
}
// DebugAttacher implements Attacher
// we use this struct in order to inject debug info (image, command) in the debug procedure
type DebugAttacher struct {
containerRuntime ContainerRuntime
image string
authStr string
registrySkipTLS bool
lxcfsEnabled bool
command []string
timeout time.Duration
idOfContainerToDebug string
verbosity int
clientHostName string
clientUserName string
// control the preparing of debug container
stopListenEOF chan struct{}
context context.Context
cancel context.CancelFunc
// audit options
audit bool
auditFifo string
auditShim []string
}
var DebugAttacherImplementsAttacher kubeletremote.Attacher = (*DebugAttacher)(nil)
// Implement kubeletremote.Attacher
func (a *DebugAttacher) AttachContainer(name string, uid kubetype.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
if a.verbosity > 0 {
log.Println("Enter")
if resize == nil {
log.Println("Resize channel is nil")
}
}
return a.DebugContainer(RunConfig{
context: a.context,
timeout: a.timeout,
idOfContainerToDebug: a.idOfContainerToDebug,
image: a.image,
command: a.command,
stdin: in,
stdout: out,
stderr: err,
tty: tty,
resize: resize,
clientHostName: a.clientHostName,
clientUserName: a.clientUserName,
verbosity: a.verbosity,
audit: a.audit,
auditFifo: a.auditFifo,
auditShim: a.auditShim,
})
}
// DebugContainer executes the main debug flow
func (m *DebugAttacher) DebugContainer(cfg RunConfig) error {
if m.verbosity > 0 {
log.Printf("Accept new debug request:\n\t target container: %s \n\t image: %s \n\t command: %v \n", m.idOfContainerToDebug, m.image, m.command)
}
// the following steps may takes much time,
// so we listen to EOF from stdin
// which helps user to terminate the procedure proactively
// FIXME: the following logic will 'eat' a character
//var buf bytes.Buffer
//tee := io.TeeReader(stdin, &buf)
//go func() {
// p := make([]byte, 4)
// OUTER:
// for {
// select {
// case <- m.stopListenEOF:
// break OUTER
// default:
// n, err := tee.Read(p)
// // 4 -> EOT
// if (n > 0 && binary.LittleEndian.Uint32(p) == 4) || err == io.EOF {
// log.Println("receive ctrl-d or EOF when preparing debug container, cancel session")
// m.cancel()
// break OUTER
// }
// }
// }
//} ()
// step 0: set container procfs correct by lxcfs
if cfg.verbosity > 0 {
cfg.stdout.Write([]byte(fmt.Sprintf("set container procfs correct %t .. \n\r", m.lxcfsEnabled)))
}
if m.lxcfsEnabled {
if err := CheckLxcfsMount(); err != nil {
return err
}
if err := m.SetContainerLxcfs(cfg); err != nil {
return err
}
}
// step 1: pull image
if cfg.verbosity > 0 {
cfg.stdout.Write([]byte(fmt.Sprintf("pulling image %s, skip TLS %v... \n\r", m.image, m.registrySkipTLS)))
}
err := m.containerRuntime.PullImage(m.context, m.image,
m.registrySkipTLS, m.authStr, cfg)
if err != nil {
return err
}
// step 2: run debug container (join the namespaces of target container)
if cfg.verbosity > 0 {
cfg.stdout.Write([]byte("starting debug container...\n\r"))
}
return m.containerRuntime.RunDebugContainer(cfg)
}
func (m *DebugAttacher) SetContainerLxcfs(cfg RunConfig) error {
ctx, cancel := cfg.getContextWithTimeout()
defer cancel()
cntnrInf, err := m.containerRuntime.ContainerInfo(ctx, cfg)
if err != nil {
return err
}
for _, mntDst := range cntnrInf.MountDestinations {
if mntDst == LxcfsRootDir {
if cfg.verbosity > 0 {
log.Printf("remount lxcfs when the rootdir of lxcfs of target container has been mounted. \n\t ")
}
for _, procfile := range LxcfsProcFiles {
nsenter := &nsenter.MountNSEnter{
Target: cntnrInf.Pid,
MountLxcfs: true,
}
_, stderr, err := nsenter.Execute("--", "mount", "-B", LxcfsHomeDir+procfile, procfile)
if err != nil {
log.Printf("bind mount lxcfs files failed. \n\t reason: %s", stderr)
return err
}
}
}
}
return nil
}
// RuntimeManager is responsible for docker operation
type RuntimeManager struct {
dockerClient *dockerclient.Client
containerdClient *containerd.Client
timeout time.Duration
verbosity int
idOfContainerToDebug string
containerScheme ContainerRuntimeScheme
clientHostName string
clientUserName string
audit bool
auditFifo string
auditShim []string
}
func NewRuntimeManager(srvCfg Config, containerUri string, verbosity int,
hstNm, usrNm string) (*RuntimeManager, error) {
if len(containerUri) < 1 {
return nil, errors.New("target container id must be provided")
}
containerUriParts := strings.SplitN(containerUri, "://", 2)
if len(containerUriParts) != 2 {
msg := fmt.Sprintf("target container id must have form scheme:id but was %v", containerUri)
if verbosity > 0 {
log.Println(msg)
}
return nil, errors.New(msg)
}
containerScheme := ContainerRuntimeScheme(containerUriParts[0])
idOfContainerToDebug := containerUriParts[1]
var dockerClient *dockerclient.Client
var containerdClient *containerd.Client
switch containerScheme {
case DockerScheme:
{
var err error
dockerClient, err = dockerclient.NewClient(srvCfg.DockerEndpoint, "", nil, nil)
if err != nil {
return nil, err
}
}
case ContainerdScheme:
{
var err error
var clntOpts []containerd.ClientOpt
if os.Getenv("KCTLDBG_CONTAINERDV1_SHIM") != "" {
if verbosity > 0 {
log.Println("Using containerd v1 runtime")
}
clntOpts = append(clntOpts,
containerd.WithDefaultRuntime("io.containerd.runc.v1"))
}
containerdClient, err = containerd.New(srvCfg.ContainerdEndpoint,
clntOpts...)
if err != nil {
return nil, err
}
}
default:
{
msg := "only docker and containerd container runtimes are suppored right now"
log.Println(msg)
return nil, errors.New(msg)
}
}
return &RuntimeManager{
dockerClient: dockerClient,
containerdClient: containerdClient,
timeout: srvCfg.RuntimeTimeout,
verbosity: verbosity,
idOfContainerToDebug: idOfContainerToDebug,
containerScheme: containerScheme,
clientHostName: hstNm,
clientUserName: usrNm,
audit: srvCfg.Audit,
auditFifo: srvCfg.AuditFifo,
auditShim: srvCfg.AuditShim,
}, nil
}
// GetAttacher returns an implementation of Attacher
func (m *RuntimeManager) GetAttacher(image, authStr string,
lxcfsEnabled, registrySkipTLS bool,
command []string, context context.Context,
cancel context.CancelFunc) kubeletremote.Attacher {
var containerRuntime ContainerRuntime
if m.dockerClient != nil {
containerRuntime = &DockerContainerRuntime{
client: m.dockerClient,
}
} else {
containerRuntime = &ContainerdContainerRuntime{
client: m.containerdClient,
}
}
return &DebugAttacher{
containerRuntime: containerRuntime,
image: image,
authStr: authStr,
lxcfsEnabled: lxcfsEnabled,
registrySkipTLS: registrySkipTLS,
command: command,
context: context,
idOfContainerToDebug: m.idOfContainerToDebug,
verbosity: m.verbosity,
timeout: m.timeout,
cancel: cancel,
stopListenEOF: make(chan struct{}),
clientHostName: m.clientHostName,
clientUserName: m.clientUserName,
audit: m.audit,
auditFifo: m.auditFifo,
auditShim: m.auditShim,
}
}
| [
"\"KCTLDBG_CONTAINERDV1_SHIM\""
]
| []
| [
"KCTLDBG_CONTAINERDV1_SHIM"
]
| [] | ["KCTLDBG_CONTAINERDV1_SHIM"] | go | 1 | 0 | |
train/train_wc.py | # Original work Copyright 2018 The Google AI Language Team Authors.
# Modified work Copyright 2019 Rowan Zellers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Training script! """
import tensorflow as tf
import sys
import os
sys.path.append('/data/home/share1/gpt2-ml')
from train.dataloader import input_fn_builder
from train.modeling import model_fn_builder, GroverConfig
flags = tf.flags
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
## Required parameters
flags.DEFINE_string(
"config_file", 'configs/mega.json',
"The config json file corresponding to the pre-trained news model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained model).")
flags.DEFINE_integer(
"max_seq_length", 1024,
"The maximum total input sequence length after BPE tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer("train_batch_size", 1, "Total batch size for training.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for adafactor.")
flags.DEFINE_integer("num_train_steps", 110000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 500, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1600,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1600,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
news_config = GroverConfig.from_json_file(FLAGS.config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
print(FLAGS.input_file.split(","))
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
)
# # If TPU is not available, this will fall back to normal Estimator on CPU
# # or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.train_batch_size,
params={'model_dir': FLAGS.output_dir}
)
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
seq_length=FLAGS.max_seq_length,
is_training=True)
print("Start trainning.............................................")
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tests/test_config_loader.py | # -*- coding: utf-8 -*-
# #!/usr/bin/env python
# """Tests for `lincolntools-config` package."""
import os
import pytest
import yaml
from lincolntools.config import ConfigLoader
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data',
)
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample', 'config.yaml')
)
def test_read_single_file(datafiles):
filenames = [str(f) for f in datafiles.listdir()]
os.environ['ENV_VALUE_TEST'] = 'test'
config = ConfigLoader.load_from_file(filenames[0])
assert config['mode'] == 'dev'
assert config['foo']['test_env'] == 'test'
assert type(config['data']['float']) == float
assert config['data']['float'] == 0.1
assert config['data']['bool'] is False
assert len(config['data']['list']) == 3
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample')
)
def test_read_folder(datafiles):
os.environ['ENV_VALUE_TEST'] = 'test'
config = ConfigLoader.load_from_folder(datafiles.strpath)
assert config['mode'] == 'dev'
assert type(config['data']) == dict
assert type(config['part1']) == dict
assert config['part1']['port'] == 12345
assert type(config['part2']) == dict
assert config['part2']['classic']['project_dir'] == '/path/to/project'
assert type(config['foo']) == dict
assert config['foo']['test_env'] == 'test'
assert type(config['data']['float']) == float
assert config['data']['float'] == 0.1
assert config['data']['bool'] is False
assert len(config['data']['list']) == 3
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample')
)
def test_anchor_references(datafiles):
config = ConfigLoader.load_from_folder(datafiles.strpath)
assert config['mode'] == 'dev' # simple test
assert config['part2']['classic']['project_dir'] == '/path/to/project'
# testing complex part
assert config['part2']['complex']['data_dir'] == '/path/to/project/data'
assert config['part2']['complex']['tests_dir'] == '/path/to/project/tests'
assert config['part2']['complex']['subproject_dir'] == '/path/to/project/subproject'
assert config['part2']['complex']['subproject_data_dir'] == '/path/to/project/subproject/data'
assert config['part2']['complex']['subproject_tests_dir'] == '/path/to/project/subproject/tests'
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample', 'with_difference')
)
def test_template_not_match(datafiles):
config_path = os.path.join(datafiles.strpath, 'diff.yaml')
with pytest.raises(ValueError):
ConfigLoader.check_template_match(config_path)
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample', 'with_difference')
)
def test_template_not_exists(datafiles):
config_path = os.path.join(datafiles.strpath, 'anything.yaml')
with pytest.raises(FileNotFoundError):
ConfigLoader.check_template_match(config_path)
@pytest.mark.filterwarnings("ignore:MarkInfo")
@pytest.mark.datafiles(
os.path.join(FIXTURE_DIR, 'configs', 'configs_sample', 'concat')
)
def test_concatenate_multiple_files(datafiles):
os.environ['ENV_VALUE_TEST'] = 'test'
config = ConfigLoader.load_from_folder(datafiles.strpath, concatenate=True)
assert config['concat2']['other_dir'] == "/path/to/project/other"
assert config['concat2']['subdir_concat'] == "/path/to/project/subproject/subdir"
with pytest.raises(yaml.composer.ComposerError):
config = ConfigLoader.load_from_folder(datafiles.strpath, concatenate=False)
| []
| []
| [
"ENV_VALUE_TEST"
]
| [] | ["ENV_VALUE_TEST"] | python | 1 | 0 | |
Big_Data_Platform/Docker/HMI/Plot_Stresstest/src/L1_C_Reporting.py | import os
from classes.CKafkaPC import KafkaPC
def plot_monitoring(msg):
"""
localhost:8003/plotData/
"""
msgdata = new_c.decode_msg(msg)
# plot tells if message is send as topic for plotData or plotMultipleData
# x_label is the label of xaxis
# x_data is data of xaxis
# x_int_to_date: set it True if your x_data is an integer-value, but you want to convert it to datetime
# y - yaxis-Data
new_data_point = {
"plot": "single",
"x_label": "id",
"source": "monitoring",
"x_data": msgdata["id"],
"x_int_to_date": False,
"y": {"x": msgdata["x"], "y": msgdata["y"]},
}
new_c.send_msg(new_data_point)
print("monitoring message sent")
def plot_model_evaluation_multi(msg):
"""
localhost:8003/plotMultipleData/
"""
msgdata = new_c.decode_msg(msg)
splitData = msgdata["algorithm"].split("(")
# plot tells if message is send as topic for plotData or plotMultipleData
# x_label is the label of xaxis
# x_data is data of xaxis
# x_int_to_date: set it True if your x_data is an integer-value, but you want to convert it to datetime
# y - yaxis-Data
new_data_point = {
"plot": "multi",
"multi_filter": "algorithm",
"source": "model_evaluation",
"x_label": "id",
"x_data": msgdata["id"],
"x_int_to_date": False,
"y": {"new_x": msgdata["new_x"], "algorithm": splitData[0]},
}
new_c.send_msg(new_data_point)
print("model evaluation message sent")
def plot_model_application(msg):
"""
localhost:8003/plotData/
"""
msgdata = new_c.decode_msg(msg)
new_data_point = {
"plot": "single",
"source": "model_application",
"x_label": "id",
"x_data": msgdata["id"],
"x_int_to_date": False,
"y": {
"pred_y": msgdata["pred_y"],
"rmse": msgdata["rmse"],
"CPU_ms": msgdata["CPU_ms"],
"RAM": msgdata["RAM"],
},
}
new_c.send_msg(new_data_point)
print("model application message sent")
def plot_model_application_multi(msg):
msgdata = new_c.decode_msg(msg)
new_data_point = {
"plot": "multi",
"multi_filter": "model_name",
"source": "model_application",
"x_label": "id",
"x_data": msgdata["id"],
"x_int_to_date": False,
"y": {
"model_name": msgdata["model_name"],
"pred_y": msgdata["pred_y"],
"rmse": msgdata["rmse"],
"CPU_ms": msgdata["CPU_ms"],
"RAM": msgdata["RAM"],
},
}
new_c.send_msg(new_data_point)
print("model application message sent")
env_vars = {
"config_path": os.getenv("config_path"),
"config_section": os.getenv("config_section"),
}
new_c = KafkaPC(**env_vars)
plot_dict = new_c.config["PLOT_TOPIC"]
try:
while True:
msg = new_c.consumer.poll(0.1)
if msg is None:
continue
elif msg.error() is not None:
print(f"Error occured: {str(msg.error())}")
else:
# tests if msg.topic is in plot_dict and calls function from dict
try:
if plot_dict.get(msg.topic()) is not None:
eval(plot_dict[msg.topic()])(msg)
except Exception as e:
print(
f"Processing Topic: {msg.topic()} with Function: {plot_dict[msg.topic()]}\n Error: {e}"
)
except KeyboardInterrupt:
pass
finally:
new_c.consumer.close()
| []
| []
| [
"config_path",
"config_section"
]
| [] | ["config_path", "config_section"] | python | 2 | 0 | |
s2search_score.py | from s2search.rank import S2Ranker
import os
import json
import numpy as np
from pathlib import Path
# data_dir = './s2search_data'
s2_dir = './s2search_data'
root_dir = '/Users/ayuee/Documents/GitHub/XAI_PROJECT/data_process/masking'
features = ['title', 'abstract', 'venue', 'authors', 'year', 'n_citations', 'full']
papers_example = [
{
'title': 'Jumping NLP Curves: A Review of Natural Language Processing Research',
'abstract': 'Natural language processing (NLP) is a theory-motivated range of computational techniques for '
'the automatic analysis and representation of human language. NLP research has evolved from the '
'era of punch cards and batch processing (in which the analysis of a sentence could take up to 7 '
'minutes) to the era of Google and the likes of it (in which millions of webpages can be '
'processed in less than a second). This review paper draws on recent developments in NLP research '
'to look at the past, present, and future of NLP technology in a new light. Borrowing the '
'paradigm of jumping curves from the field of business management and marketing prediction, '
'this survey article reinterprets the evolution of NLP research as the intersection of three '
'overlapping curves-namely Syntactics, Semantics, and Pragmatics Curveswhich will eventually lead '
'NLP research to evolve into natural language understanding.',
'venue': 'IEEE Computational intelligence ',
'authors': ['E Cambria', 'B White'],
'year': 2014,
'n_citations': 900,
}
]
def S2_Rank(related_keywords, paper_dict_list, file=s2_dir):
s2ranker = S2Ranker(file)
score = s2ranker.score(related_keywords, paper_dict_list)
return score
def S2_open_json(path):
data = []
with open(path) as f:
Lines = f.readlines()
for line in Lines:
line_strip = line.strip()
jso = json.loads(line_strip, strict=False)
data.append(jso)
return S2_Rank('machine learning', data, s2_dir)
def S2_save_score_as_np(s2score, feature):
base_dir = str(Path(__file__).resolve().parent)
data_dir = os.path.join(base_dir)
os.environ.setdefault("DATA_DIR", data_dir)
output_data_file_name = os.path.join(os.environ.get("DATA_DIR"), "score" + feature)
np.save(output_data_file_name, s2score)
def S2_get_score(root_dir):
score = []
for root, dirs, files in os.walk(root_dir):
for name in files:
if name.endswith((".json")):
for feature in features:
if feature in name:
full_path = os.path.join(root, name)
print(full_path)
score = S2_open_json(full_path)
score = np.array(score)
S2_save_score_as_np(score, feature)
S2_get_score(root_dir)
# print(S2_Rank('NLP', papers_example, s2_dir))
# score = np.load('/Users/ayuee/Documents/GitHub/XAI_PROJECT/data_process/masking/full_Score.npy')
# print(score, np.shape(score))
| []
| []
| [
"DATA_DIR"
]
| [] | ["DATA_DIR"] | python | 1 | 0 | |
increment.go | package main
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/hashicorp/consul/api"
)
func main() {
// init
fmt.Println("Incrementing...")
// key setting
key := ""
if os.Getenv("INCREMENT_KEY") != "" {
key = os.Getenv("INCREMENT_KEY")
} else {
panic("Error: no INCREMENT_KEY environment variable")
}
// add file
addFile := "/local/add"
if len(os.Args) > 1 {
addFile = os.Args[1]
}
// increment setting
add := 1
if fileExists(addFile) {
file, err := ioutil.ReadFile(addFile)
if err != nil {
panic("Error: cannot read file")
}
add, err = strconv.Atoi(strings.TrimSpace(string(file[:])))
if err != nil {
panic("Error: add file not a number")
}
}
// consul client
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
panic(err)
}
// key value client
kv := client.KV()
// lookup value
pair, _, err := kv.Get(key, nil)
if err != nil {
panic(err)
}
// convert value to int
i, err := strconv.Atoi(string(pair.Value[:]))
if err != nil {
panic(err)
}
// increment int
i = i + add
// set incremented value
p := &api.KVPair{Key: key, Value: []byte(strconv.Itoa(i))}
_, err = kv.Put(p, nil)
if err != nil {
panic(err)
}
}
func fileExists(file string) bool {
if _, err := os.Stat(file); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
| [
"\"INCREMENT_KEY\"",
"\"INCREMENT_KEY\""
]
| []
| [
"INCREMENT_KEY"
]
| [] | ["INCREMENT_KEY"] | go | 1 | 0 | |
sirius/docker.py | from __future__ import print_function
import httplib
import json
import os
from distutils.version import LooseVersion
from itertools import chain
from itertools import imap
import requests
from fabric.api import local
from git import Repo
from negowl import ContainerNotFound
from negowl import factory
from sirius.utils import group_by_2
from sirius.utils import parse_list
DEIMOS = 'http://SCDFIS01:9200'
def docker_deploy(name, image, server=None, ports=None, volumes=None, env=None, cmd="", hostname="sirius"):
"""deploy a docker image on some server
will create container when if container is not exists, otherwise update container
Example:
sirius docker_deploy:meerkat,meerkat:0.0.1,server=scdfis01,ports="3141;8080;8900;8081",env="DEBUG\=1;PATH\=2"
:param name: container name
:param image: image with tag, like: 'CentOS:7.0'
:param server: which server will deploy the container, default:localhost
:param ports: just one port, will mapping <port>:80, also accept list with public, private pairs,
like: public1;private1;public2;private2
:param volumes: like: host_file1;container_file1;host_file2;container_file2
:param env: var=10;DEBUG=true
:param cmd: `class`:`str`
:param hostname:
:return:
"""
if server is None:
server = 'localhost'
client = factory.get(server)
try:
client.update_image_2(name, image)
except ContainerNotFound:
container_ports = []
if ports:
ports = parse_list(ports)
if len(ports) == 1:
container_ports = [dict(type='tcp', privateport=80, publicport=int(ports[0]))]
else:
container_ports = [dict(type='tcp', publicport=int(pub), privateport=int(pri)) for pub, pri in
group_by_2(ports)]
container_volumes = []
if volumes:
container_volumes = [dict(hostvolume=s, containervolume=t) for s, t in group_by_2(parse_list(volumes))]
if env:
env = parse_list(env)
code, result = client.create_container(name, image, hostname=hostname, ports=container_ports,
volumes=container_volumes, env=env,
command=cmd)
if httplib.OK != code:
raise Exception("create container failure, code {0}, message: {1}".format(code, result))
def load_settings(src):
full_path = os.path.join(src, 'matrix.json')
with open(full_path, 'rb') as f:
return json.load(f)
def docker_image_name(src='.', release=False):
"""get building docker image name
parse matrix.json, and get image:tag
Example:
IMAGE_NAME=$(sirius docker_image_name | head -n 1)
:param src: the dir which container matrix.json, default is current work directory
:param release: generate release image name
:return:
"""
settings = load_settings(src)
name = settings.get('name')
tag = settings.get('release_tag', 'release1') if release else settings.get('tag', 'build1')
print('{name}:{tag}'.format(name=name, tag=tag))
def docker_build_image(workspace=None, matrix_version=None):
"""build a new image
Example:
sirius docker_build_image[:workspace]
:param workspace: the source code directory, default retrieve workspace from WORKSPACE ENV variable.
:param matrix_version: the matrix version, default is the new tag
:return:
"""
if not workspace:
workspace = os.environ.get('WORKSPACE', '.')
if not matrix_version:
matrix_version = '0.0.4'
docker_prepare_build(workspace)
cmd = ('docker run --rm -v {workspace}:/home/matrix -v /usr/bin/docker:/usr/bin/docker '
'-v /var/run/docker.sock:/var/run/docker.sock docker.neg/matrix:{matrix} /usr/local/bin/matrix.sh')
local(cmd.format(workspace=workspace, matrix=matrix_version))
def docker_new_build_no(project_slug=None):
"""get new build no
it's used to build docker image
Example:
sirius docker_new_build_no:meerkat | head -n 1
:param project_slug: project name
:return:
"""
url = '{0}/build/{1}'.format(DEIMOS, project_slug)
response = requests.post(url)
assert response.status_code == httplib.OK
obj = response.json()
print(obj['build_id'])
return obj['build_id']
def docker_prepare_build(workspace="."):
"""prepare build docker image
generate new docker image tag, and rewrite the matrix.json
:param workspace: the matrix.json location, default './matrix.json'
:return:
"""
workspace = workspace or '.'
matrix_json = os.path.join(workspace, 'matrix.json')
if not os.path.isfile(matrix_json):
raise ValueError('matrix file is not exists, matrix_json={0}'.format(matrix_json))
repo = Repo(workspace)
commit = str(repo.head.commit.hexsha[:5])
with open(matrix_json, 'rb') as f:
obj = json.load(f)
project_slug = obj['name']
build_no = docker_new_build_no(project_slug)
tag = obj['tag']
version = LooseVersion(tag)
obj['tag'] = '.'.join(imap(str, chain(version.version[:3], ['build{0}'.format(build_no)])))
obj['release_tag'] = '.'.join(
imap(str, chain(version.version[:3], ['release{0}.{1}'.format(build_no, commit)])))
with open(matrix_json, 'wb') as f:
json.dump(obj, f)
| []
| []
| [
"WORKSPACE"
]
| [] | ["WORKSPACE"] | python | 1 | 0 | |
src/main/java/org/nmap4j/core/nmap/NMapProperties.java | /*
* Copyright (c) 2010, nmap4j.org
*
* All rights reserved.
*
* This license covers only the Nmap4j library. To use this library with
* Nmap, you must also comply with Nmap's license. Including Nmap within
* commercial applications or appliances generally requires the purchase
* of a commercial Nmap license (see http://nmap.org/book/man-legal.html).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the nmap4j.org nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.nmap4j.core.nmap;
import java.io.File ;
import org.nmap4j.core.flags.Flag;
/**
* This class is used to manage the path to nmap. This class attempts to look
* for the environmental variable NMAP_HOME. If that is not set it is expected
* that the user set this location via this class.
* <p>
* Essentially the path needs to be to the location where the "bin" dir that
* contains the nmap binary is. Usually, when dealing with most nmap installs,
* this will also be location where the "share" dir is located. It's ideal
* to identify both of these of dirs as this allows you to specify the right
* share dir for a binary. Otherwise there is a risk that nmap will pick up
* a default in the system and this may introduce inconsistencies.
* <p>
* If you are planning to use the system property NMAP_HOME to set your
* NMap path, use the no arg constructor. Otherwise use the constructor with
* the String. If you want flexibility, set the path, use the no arg
* constructor and then use the setPath(String) method.
*
* @author jsvede
*
*/
public class NMapProperties {
private String pathToNMap ;
private final String BIN = "bin" ;
private final String SHARE = "share" ;
private final String COMMAND = "nmap" ;
private String sudoUser ;
private String sudoUserPassword ;
/**
* Constructs an instance of NMapProperties and looks in the environment
* properties for NMAP_HOME. If it is not found, the path is initialized
* to null and the API assumes that you will set it manually. See setPath().
*/
public NMapProperties() {
String path = System.getenv().get( "NMAP_HOME" ) ;
if( path != null && path.length() > 0 ) {
pathToNMap = path ;
}
}
/**
* Contructs and instance of NMapProperties using the path passed.
* @param path
*/
public NMapProperties( String path ) {
pathToNMap = path ;
}
/**
* Returns the current path.
* @return
*/
public String getPath() {
return pathToNMap ;
}
/**
* Sets the path the bin dir where nmap can be found. This is also the path
* to the share dir which contains important files for nmap.
* <p>
* For example, if the nmap bin dir is in /usr/local/share/bin/nmap the
* path you would set into this method is /usr/local/share .
* @param pathToBinDir - /the/path/to/nmapbindir.
*/
public void setPath( String pathToBinDir ) {
pathToNMap = pathToBinDir ;
}
/**
* Returns the expected location of the share dir relative to the path set
* or passed in at construction time. The value returned by this method is
* equivalent to that path variable + filesystem dependent separator + bin .
* @return
*/
public String getBinDir() {
return pathToNMap + File.separator + BIN ;
}
/**
* Returns the expected location of the share dir relative to the path set
* or passed in at construction time. The value returned by this method is
* equivalent to the path variable + filesystem dependent separator + share.
* @return
*/
public String getShareDir() {
return pathToNMap + File.separator + SHARE + File.separator + "nmap" ;
}
/**
* This returns the full path to the nmap version to be executed.
* @return
*/
public String getFullyFormattedCommand() {
StringBuffer command = new StringBuffer() ;
command.append( getBinDir() ) ;
command.append( File.separator ) ;
command.append( COMMAND ) ;
command.append( " " ) ;
command.append( Flag.DATADIR ) ;
command.append( " " ) ;
command.append( getShareDir() ) ;
return command.toString() ;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
config/read.go | package config
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/go-ini/ini"
"golang.org/x/net/http/httpproxy"
"github.com/pganalyze/collector/util"
"github.com/hashicorp/go-retryablehttp"
)
const DefaultAPIBaseURL = "https://api.pganalyze.com"
func getDefaultConfig() *ServerConfig {
config := &ServerConfig{
APIBaseURL: DefaultAPIBaseURL,
SectionName: "default",
QueryStatsInterval: 60,
MaxCollectorConnections: 10,
}
// The environment variables are the default way to configure when running inside a Docker container.
if apiKey := os.Getenv("PGA_API_KEY"); apiKey != "" {
config.APIKey = apiKey
}
if apiBaseURL := os.Getenv("PGA_API_BASEURL"); apiBaseURL != "" {
config.APIBaseURL = apiBaseURL
}
if systemID := os.Getenv("PGA_API_SYSTEM_ID"); systemID != "" {
config.SystemID = systemID
}
if systemType := os.Getenv("PGA_API_SYSTEM_TYPE"); systemType != "" {
config.SystemType = systemType
}
if systemScope := os.Getenv("PGA_API_SYSTEM_SCOPE"); systemScope != "" {
config.SystemScope = systemScope
}
if systemScopeFallback := os.Getenv("PGA_API_SYSTEM_SCOPE_FALLBACK"); systemScopeFallback != "" {
config.SystemScopeFallback = systemScopeFallback
}
if enableReports := os.Getenv("PGA_ENABLE_REPORTS"); enableReports != "" && enableReports != "0" {
config.EnableReports = true
}
if disableLogs := os.Getenv("PGA_DISABLE_LOGS"); disableLogs != "" && disableLogs != "0" {
config.DisableLogs = true
}
if disableActivity := os.Getenv("PGA_DISABLE_ACTIVITY"); disableActivity != "" && disableActivity != "0" {
config.DisableActivity = true
}
if enableLogExplain := os.Getenv("PGA_ENABLE_LOG_EXPLAIN"); enableLogExplain != "" && enableLogExplain != "0" {
config.EnableLogExplain = true
}
if dbURL := os.Getenv("DB_URL"); dbURL != "" {
config.DbURL = dbURL
}
if dbName := os.Getenv("DB_NAME"); dbName != "" {
config.DbName = dbName
}
if dbAllNames := os.Getenv("DB_ALL_NAMES"); dbAllNames == "1" {
config.DbAllNames = true
}
if dbUsername := os.Getenv("DB_USERNAME"); dbUsername != "" {
config.DbUsername = dbUsername
}
if dbPassword := os.Getenv("DB_PASSWORD"); dbPassword != "" {
config.DbPassword = dbPassword
}
if dbHost := os.Getenv("DB_HOST"); dbHost != "" {
config.DbHost = dbHost
}
if dbPort := os.Getenv("DB_PORT"); dbPort != "" {
config.DbPort, _ = strconv.Atoi(dbPort)
}
if dbSslMode := os.Getenv("DB_SSLMODE"); dbSslMode != "" {
config.DbSslMode = dbSslMode
}
if dbSslRootCert := os.Getenv("DB_SSLROOTCERT"); dbSslRootCert != "" {
config.DbSslRootCert = dbSslRootCert
}
if dbSslRootCertContents := os.Getenv("DB_SSLROOTCERT_CONTENTS"); dbSslRootCertContents != "" {
config.DbSslRootCertContents = dbSslRootCertContents
}
if dbSslCert := os.Getenv("DB_SSLCERT"); dbSslCert != "" {
config.DbSslCert = dbSslCert
}
if dbSslCertContents := os.Getenv("DB_SSLCERT_CONTENTS"); dbSslCertContents != "" {
config.DbSslCertContents = dbSslCertContents
}
if dbSslKey := os.Getenv("DB_SSLKEY"); dbSslKey != "" {
config.DbSslKey = dbSslKey
}
if dbSslKeyContents := os.Getenv("DB_SSLKEY_CONTENTS"); dbSslKeyContents != "" {
config.DbSslKeyContents = dbSslKeyContents
}
if awsRegion := os.Getenv("AWS_REGION"); awsRegion != "" {
config.AwsRegion = awsRegion
}
if awsAccountID := os.Getenv("AWS_ACCOUNT_ID"); awsAccountID != "" {
config.AwsAccountID = awsAccountID
}
if awsInstanceID := os.Getenv("AWS_INSTANCE_ID"); awsInstanceID != "" {
config.AwsDbInstanceID = awsInstanceID
}
if awsAccessKeyID := os.Getenv("AWS_ACCESS_KEY_ID"); awsAccessKeyID != "" {
config.AwsAccessKeyID = awsAccessKeyID
}
if awsSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY"); awsSecretAccessKey != "" {
config.AwsSecretAccessKey = awsSecretAccessKey
}
if awsAssumeRole := os.Getenv("AWS_ASSUME_ROLE"); awsAssumeRole != "" {
config.AwsAssumeRole = awsAssumeRole
}
if awsWebIdentityTokenFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"); awsWebIdentityTokenFile != "" {
config.AwsWebIdentityTokenFile = awsWebIdentityTokenFile
}
if awsRoleArn := os.Getenv("AWS_ROLE_ARN"); awsRoleArn != "" {
config.AwsRoleArn = awsRoleArn
}
if awsEndpointSigningRegion := os.Getenv("AWS_ENDPOINT_SIGNING_REGION"); awsEndpointSigningRegion != "" {
config.AwsEndpointSigningRegion = awsEndpointSigningRegion
}
if awsEndpointRdsURL := os.Getenv("AWS_ENDPOINT_RDS_URL"); awsEndpointRdsURL != "" {
config.AwsEndpointRdsURL = awsEndpointRdsURL
}
if awsEndpointEc2URL := os.Getenv("AWS_ENDPOINT_EC2_URL"); awsEndpointEc2URL != "" {
config.AwsEndpointEc2URL = awsEndpointEc2URL
}
if awsEndpointCloudwatchURL := os.Getenv("AWS_ENDPOINT_CLOUDWATCH_URL"); awsEndpointCloudwatchURL != "" {
config.AwsEndpointCloudwatchURL = awsEndpointCloudwatchURL
}
if awsEndpointCloudwatchLogsURL := os.Getenv("AWS_ENDPOINT_CLOUDWATCH_LOGS_URL"); awsEndpointCloudwatchLogsURL != "" {
config.AwsEndpointCloudwatchLogsURL = awsEndpointCloudwatchLogsURL
}
if azureDbServerName := os.Getenv("AZURE_DB_SERVER_NAME"); azureDbServerName != "" {
config.AzureDbServerName = azureDbServerName
}
if azureEventhubNamespace := os.Getenv("AZURE_EVENTHUB_NAMESPACE"); azureEventhubNamespace != "" {
config.AzureEventhubNamespace = azureEventhubNamespace
}
if azureEventhubName := os.Getenv("AZURE_EVENTHUB_NAME"); azureEventhubName != "" {
config.AzureEventhubName = azureEventhubName
}
if azureADTenantID := os.Getenv("AZURE_AD_TENANT_ID"); azureADTenantID != "" {
config.AzureADTenantID = azureADTenantID
}
if azureADClientID := os.Getenv("AZURE_AD_CLIENT_ID"); azureADClientID != "" {
config.AzureADClientID = azureADClientID
}
if azureADClientSecret := os.Getenv("AZURE_AD_CLIENT_SECRET"); azureADClientSecret != "" {
config.AzureADClientSecret = azureADClientSecret
}
if azureADCertificatePath := os.Getenv("AZURE_AD_CERTIFICATE_PATH"); azureADCertificatePath != "" {
config.AzureADCertificatePath = azureADCertificatePath
}
if azureADCertificatePassword := os.Getenv("AZURE_AD_CERTIFICATE_PASSWORD"); azureADCertificatePassword != "" {
config.AzureADCertificatePassword = azureADCertificatePassword
}
if gcpCloudSQLInstanceID := os.Getenv("GCP_CLOUDSQL_INSTANCE_ID"); gcpCloudSQLInstanceID != "" {
config.GcpCloudSQLInstanceID = gcpCloudSQLInstanceID
}
if gcpPubsubSubscription := os.Getenv("GCP_PUBSUB_SUBSCRIPTION"); gcpPubsubSubscription != "" {
config.GcpPubsubSubscription = gcpPubsubSubscription
}
if gcpCredentialsFile := os.Getenv("GCP_CREDENTIALS_FILE"); gcpCredentialsFile != "" {
config.GcpCredentialsFile = gcpCredentialsFile
}
if gcpProjectID := os.Getenv("GCP_PROJECT_ID"); gcpProjectID != "" {
config.GcpProjectID = gcpProjectID
}
if logLocation := os.Getenv("LOG_LOCATION"); logLocation != "" {
config.LogLocation = logLocation
}
if logSyslogServer := os.Getenv("LOG_SYSLOG_SERVER"); logSyslogServer != "" {
config.LogSyslogServer = logSyslogServer
}
// Note: We don't support LogDockerTail here since it would require the "docker"
// binary inside the pganalyze container (as well as full Docker access), instead
// the approach for using pganalyze as a sidecar container alongside Postgres
// currently requires writing to a file and then mounting that as a volume
// inside the pganalyze container.
if ignoreTablePattern := os.Getenv("IGNORE_TABLE_PATTERN"); ignoreTablePattern != "" {
config.IgnoreTablePattern = ignoreTablePattern
}
if ignoreSchemaRegexp := os.Getenv("IGNORE_SCHEMA_REGEXP"); ignoreSchemaRegexp != "" {
config.IgnoreSchemaRegexp = ignoreSchemaRegexp
}
if queryStatsInterval := os.Getenv("QUERY_STATS_INTERVAL"); queryStatsInterval != "" {
config.QueryStatsInterval, _ = strconv.Atoi(queryStatsInterval)
}
if maxCollectorConnections := os.Getenv("MAX_COLLECTOR_CONNECTION"); maxCollectorConnections != "" {
config.MaxCollectorConnections, _ = strconv.Atoi(maxCollectorConnections)
}
if skipIfReplica := os.Getenv("SKIP_IF_REPLICA"); skipIfReplica != "" && skipIfReplica != "0" {
config.SkipIfReplica = true
}
if filterLogSecret := os.Getenv("FILTER_LOG_SECRET"); filterLogSecret != "" {
config.FilterLogSecret = filterLogSecret
}
if filterQuerySample := os.Getenv("FILTER_QUERY_SAMPLE"); filterQuerySample != "" {
config.FilterQuerySample = filterQuerySample
}
if filterQueryText := os.Getenv("FILTER_QUERY_TEXT"); filterQueryText != "" {
config.FilterQueryText = filterQueryText
}
if httpProxy := os.Getenv("HTTP_PROXY"); httpProxy != "" {
config.HTTPProxy = httpProxy
}
if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
config.HTTPProxy = httpProxy
}
if httpsProxy := os.Getenv("HTTPS_PROXY"); httpsProxy != "" {
config.HTTPSProxy = httpsProxy
}
if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" {
config.HTTPSProxy = httpsProxy
}
if noProxy := os.Getenv("NO_PROXY"); noProxy != "" {
config.NoProxy = noProxy
}
if noProxy := os.Getenv("no_proxy"); noProxy != "" {
config.NoProxy = noProxy
}
return config
}
func CreateHTTPClient(conf ServerConfig, logger *util.Logger, retry bool) *http.Client {
requireSSL := conf.APIBaseURL == DefaultAPIBaseURL
proxyConfig := httpproxy.Config{
HTTPProxy: conf.HTTPProxy,
HTTPSProxy: conf.HTTPSProxy,
NoProxy: conf.NoProxy,
}
transport := &http.Transport{
Proxy: func(req *http.Request) (*url.URL, error) {
return proxyConfig.ProxyFunc()(req.URL)
},
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
if requireSSL {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
matchesProxyURL := false
if proxyConfig.HTTPProxy != "" {
proxyURL, err := url.Parse(proxyConfig.HTTPProxy)
if err == nil && proxyURL.Host == addr {
matchesProxyURL = true
}
}
if proxyConfig.HTTPSProxy != "" {
proxyURL, err := url.Parse(proxyConfig.HTTPSProxy)
if err == nil && proxyURL.Host == addr {
matchesProxyURL = true
}
}
// Require secure conection for everything except proxies
if !matchesProxyURL && !strings.HasSuffix(addr, ":443") {
return nil, fmt.Errorf("Unencrypted connection is not permitted by pganalyze configuration")
}
return (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true}).DialContext(ctx, network, addr)
}
transport.TLSClientConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
if retry {
client := retryablehttp.NewClient()
client.RetryWaitMin = 1 * time.Second
client.RetryWaitMax = 30 * time.Second
client.RetryMax = 4
client.Logger = nil
client.HTTPClient.Timeout = 120 * time.Second
client.HTTPClient.Transport = transport
return client.StandardClient()
// Note: StandardClient() only acts as a passthrough, handing the request to
// retryablehttp.Client whose nested HTTP client ends up using our custom transport.
} else {
return &http.Client{
Timeout: 120 * time.Second,
Transport: transport,
}
}
}
// CreateEC2IMDSHTTPClient - Create HTTP client for EC2 instance meta data service (IMDS)
func CreateEC2IMDSHTTPClient(conf ServerConfig) *http.Client {
// Match https://github.com/aws/aws-sdk-go/pull/3066
return &http.Client{
Timeout: 1 * time.Second,
}
}
func writeValueToTempfile(value string) (string, error) {
file, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
_, err = file.WriteString(value)
if err != nil {
return "", err
}
err = file.Close()
if err != nil {
return "", err
}
return file.Name(), nil
}
func preprocessConfig(config *ServerConfig) (*ServerConfig, error) {
var err error
host := config.GetDbHost()
if strings.HasSuffix(host, ".rds.amazonaws.com") {
parts := strings.SplitN(host, ".", 4)
if len(parts) == 4 && parts[3] == "rds.amazonaws.com" { // Safety check for any escaping issues
if config.AwsDbInstanceID == "" {
config.AwsDbInstanceID = parts[0]
}
if config.AwsAccountID == "" {
config.AwsAccountID = parts[1]
}
if config.AwsRegion == "" {
config.AwsRegion = parts[2]
}
}
} else if strings.HasSuffix(host, ".postgres.database.azure.com") {
parts := strings.SplitN(host, ".", 2)
if len(parts) == 2 && parts[1] == "postgres.database.azure.com" { // Safety check for any escaping issues
if config.AzureDbServerName == "" {
config.AzureDbServerName = parts[0]
}
}
}
// This is primarily for backwards compatibility when using the IP address of an instance
// combined with only specifying its name, but not its region.
if config.AwsDbInstanceID != "" && config.AwsRegion == "" {
config.AwsRegion = "us-east-1"
}
if config.GcpCloudSQLInstanceID != "" && strings.Count(config.GcpCloudSQLInstanceID, ":") == 2 {
instanceParts := strings.SplitN(config.GcpCloudSQLInstanceID, ":", 3)
config.GcpProjectID = instanceParts[0]
config.GcpCloudSQLInstanceID = instanceParts[2]
}
dbNameParts := []string{}
for _, s := range strings.Split(config.DbName, ",") {
dbNameParts = append(dbNameParts, strings.TrimSpace(s))
}
config.DbName = dbNameParts[0]
if len(dbNameParts) == 2 && dbNameParts[1] == "*" {
config.DbAllNames = true
} else {
config.DbExtraNames = dbNameParts[1:]
}
if config.DbSslRootCertContents != "" {
config.DbSslRootCert, err = writeValueToTempfile(config.DbSslRootCertContents)
if err != nil {
return config, err
}
}
if config.DbSslCertContents != "" {
config.DbSslCert, err = writeValueToTempfile(config.DbSslCertContents)
}
if config.DbSslKeyContents != "" {
config.DbSslKey, err = writeValueToTempfile(config.DbSslKeyContents)
}
if config.AwsEndpointSigningRegionLegacy != "" && config.AwsEndpointSigningRegion == "" {
config.AwsEndpointSigningRegion = config.AwsEndpointSigningRegionLegacy
}
return config, nil
}
// Read - Reads the configuration from the specified filename, or fall back to the default config
func Read(logger *util.Logger, filename string) (Config, error) {
var conf Config
var err error
if _, err = os.Stat(filename); err == nil {
configFile, err := ini.LoadSources(ini.LoadOptions{SpaceBeforeInlineComment: true}, filename)
if err != nil {
return conf, err
}
defaultConfig := getDefaultConfig()
pgaSection, err := configFile.GetSection("pganalyze")
if err != nil {
return conf, fmt.Errorf("Failed to find [pganalyze] section in config: %s", err)
}
err = pgaSection.MapTo(defaultConfig)
if err != nil {
return conf, fmt.Errorf("Failed to map [pganalyze] section in config: %s", err)
}
sections := configFile.Sections()
for _, section := range sections {
config := &ServerConfig{}
*config = *defaultConfig
err = section.MapTo(config)
if err != nil {
return conf, err
}
config, err = preprocessConfig(config)
if err != nil {
return conf, err
}
config.SectionName = section.Name()
config.SystemType, config.SystemScope, config.SystemScopeFallback, config.SystemID = identifySystem(*config)
config.Identifier = ServerIdentifier{
APIKey: config.APIKey,
APIBaseURL: config.APIBaseURL,
SystemID: config.SystemID,
SystemType: config.SystemType,
SystemScope: config.SystemScope,
}
if config.GetDbName() != "" {
// Ensure we have no duplicate identifiers within one collector
skip := false
for _, server := range conf.Servers {
if config.Identifier == server.Identifier {
skip = true
}
}
if skip {
logger.PrintError("Skipping config section %s, detected as duplicate", config.SectionName)
} else {
conf.Servers = append(conf.Servers, *config)
}
}
if config.DbURL != "" {
_, err := url.Parse(config.DbURL)
if err != nil {
prefixedLogger := logger.WithPrefix(config.SectionName)
prefixedLogger.PrintError("Could not parse db_url; check URL format and note that any special characters must be percent-encoded")
}
}
}
if len(conf.Servers) == 0 {
return conf, fmt.Errorf("Configuration file is empty, please edit %s and reload the collector", filename)
}
} else {
if os.Getenv("DYNO") != "" && os.Getenv("PORT") != "" {
for _, kv := range os.Environ() {
parts := strings.Split(kv, "=")
if strings.HasSuffix(parts[0], "_URL") {
config := getDefaultConfig()
config, err = preprocessConfig(config)
if err != nil {
return conf, err
}
config.SectionName = parts[0]
config.SystemID = strings.Replace(parts[0], "_URL", "", 1)
config.SystemType = "heroku"
config.DbURL = parts[1]
conf.Servers = append(conf.Servers, *config)
}
}
} else if os.Getenv("PGA_API_KEY") != "" {
config := getDefaultConfig()
config, err = preprocessConfig(config)
if err != nil {
return conf, err
}
config.SystemType, config.SystemScope, config.SystemScopeFallback, config.SystemID = identifySystem(*config)
conf.Servers = append(conf.Servers, *config)
} else {
return conf, fmt.Errorf("No configuration file found at %s, and no environment variables set", filename)
}
}
var hasIgnoreTablePattern = false
for _, server := range conf.Servers {
if server.IgnoreTablePattern != "" {
hasIgnoreTablePattern = true
break
}
}
if hasIgnoreTablePattern {
if os.Getenv("IGNORE_TABLE_PATTERN") != "" {
logger.PrintVerbose("Deprecated: Setting IGNORE_TABLE_PATTERN is deprecated; please use IGNORE_SCHEMA_REGEXP instead")
} else {
logger.PrintVerbose("Deprecated: Setting ignore_table_pattern is deprecated; please use ignore_schema_regexp instead")
}
}
return conf, nil
}
| [
"\"PGA_API_KEY\"",
"\"PGA_API_BASEURL\"",
"\"PGA_API_SYSTEM_ID\"",
"\"PGA_API_SYSTEM_TYPE\"",
"\"PGA_API_SYSTEM_SCOPE\"",
"\"PGA_API_SYSTEM_SCOPE_FALLBACK\"",
"\"PGA_ENABLE_REPORTS\"",
"\"PGA_DISABLE_LOGS\"",
"\"PGA_DISABLE_ACTIVITY\"",
"\"PGA_ENABLE_LOG_EXPLAIN\"",
"\"DB_URL\"",
"\"DB_NAME\"",
"\"DB_ALL_NAMES\"",
"\"DB_USERNAME\"",
"\"DB_PASSWORD\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_SSLMODE\"",
"\"DB_SSLROOTCERT\"",
"\"DB_SSLROOTCERT_CONTENTS\"",
"\"DB_SSLCERT\"",
"\"DB_SSLCERT_CONTENTS\"",
"\"DB_SSLKEY\"",
"\"DB_SSLKEY_CONTENTS\"",
"\"AWS_REGION\"",
"\"AWS_ACCOUNT_ID\"",
"\"AWS_INSTANCE_ID\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_ASSUME_ROLE\"",
"\"AWS_WEB_IDENTITY_TOKEN_FILE\"",
"\"AWS_ROLE_ARN\"",
"\"AWS_ENDPOINT_SIGNING_REGION\"",
"\"AWS_ENDPOINT_RDS_URL\"",
"\"AWS_ENDPOINT_EC2_URL\"",
"\"AWS_ENDPOINT_CLOUDWATCH_URL\"",
"\"AWS_ENDPOINT_CLOUDWATCH_LOGS_URL\"",
"\"AZURE_DB_SERVER_NAME\"",
"\"AZURE_EVENTHUB_NAMESPACE\"",
"\"AZURE_EVENTHUB_NAME\"",
"\"AZURE_AD_TENANT_ID\"",
"\"AZURE_AD_CLIENT_ID\"",
"\"AZURE_AD_CLIENT_SECRET\"",
"\"AZURE_AD_CERTIFICATE_PATH\"",
"\"AZURE_AD_CERTIFICATE_PASSWORD\"",
"\"GCP_CLOUDSQL_INSTANCE_ID\"",
"\"GCP_PUBSUB_SUBSCRIPTION\"",
"\"GCP_CREDENTIALS_FILE\"",
"\"GCP_PROJECT_ID\"",
"\"LOG_LOCATION\"",
"\"LOG_SYSLOG_SERVER\"",
"\"IGNORE_TABLE_PATTERN\"",
"\"IGNORE_SCHEMA_REGEXP\"",
"\"QUERY_STATS_INTERVAL\"",
"\"MAX_COLLECTOR_CONNECTION\"",
"\"SKIP_IF_REPLICA\"",
"\"FILTER_LOG_SECRET\"",
"\"FILTER_QUERY_SAMPLE\"",
"\"FILTER_QUERY_TEXT\"",
"\"HTTP_PROXY\"",
"\"http_proxy\"",
"\"HTTPS_PROXY\"",
"\"https_proxy\"",
"\"NO_PROXY\"",
"\"no_proxy\"",
"\"DYNO\"",
"\"PORT\"",
"\"PGA_API_KEY\"",
"\"IGNORE_TABLE_PATTERN\""
]
| []
| [
"NO_PROXY",
"AWS_ENDPOINT_EC2_URL",
"DB_HOST",
"PGA_API_SYSTEM_TYPE",
"DB_USERNAME",
"DB_PORT",
"AWS_ENDPOINT_CLOUDWATCH_URL",
"AZURE_DB_SERVER_NAME",
"IGNORE_SCHEMA_REGEXP",
"DB_NAME",
"DB_SSLMODE",
"https_proxy",
"HTTP_PROXY",
"FILTER_QUERY_TEXT",
"AWS_WEB_IDENTITY_TOKEN_FILE",
"AWS_ROLE_ARN",
"AWS_ENDPOINT_SIGNING_REGION",
"GCP_PROJECT_ID",
"HTTPS_PROXY",
"DB_URL",
"FILTER_LOG_SECRET",
"PORT",
"AWS_INSTANCE_ID",
"AWS_REGION",
"MAX_COLLECTOR_CONNECTION",
"AZURE_AD_CLIENT_SECRET",
"PGA_ENABLE_REPORTS",
"IGNORE_TABLE_PATTERN",
"DB_SSLCERT",
"LOG_LOCATION",
"DB_SSLKEY",
"DB_SSLKEY_CONTENTS",
"PGA_API_BASEURL",
"PGA_API_SYSTEM_SCOPE_FALLBACK",
"QUERY_STATS_INTERVAL",
"DB_SSLROOTCERT",
"DB_SSLCERT_CONTENTS",
"PGA_DISABLE_ACTIVITY",
"PGA_API_KEY",
"AWS_ENDPOINT_RDS_URL",
"AZURE_AD_CLIENT_ID",
"DYNO",
"AWS_ACCOUNT_ID",
"AZURE_AD_CERTIFICATE_PASSWORD",
"GCP_CREDENTIALS_FILE",
"GCP_PUBSUB_SUBSCRIPTION",
"AZURE_EVENTHUB_NAMESPACE",
"LOG_SYSLOG_SERVER",
"AZURE_AD_CERTIFICATE_PATH",
"AZURE_EVENTHUB_NAME",
"PGA_API_SYSTEM_ID",
"AZURE_AD_TENANT_ID",
"no_proxy",
"PGA_API_SYSTEM_SCOPE",
"DB_PASSWORD",
"AWS_ENDPOINT_CLOUDWATCH_LOGS_URL",
"AWS_SECRET_ACCESS_KEY",
"PGA_ENABLE_LOG_EXPLAIN",
"SKIP_IF_REPLICA",
"DB_SSLROOTCERT_CONTENTS",
"AWS_ACCESS_KEY_ID",
"AWS_ASSUME_ROLE",
"GCP_CLOUDSQL_INSTANCE_ID",
"PGA_DISABLE_LOGS",
"FILTER_QUERY_SAMPLE",
"DB_ALL_NAMES",
"http_proxy"
]
| [] | ["NO_PROXY", "AWS_ENDPOINT_EC2_URL", "DB_HOST", "PGA_API_SYSTEM_TYPE", "DB_USERNAME", "DB_PORT", "AWS_ENDPOINT_CLOUDWATCH_URL", "AZURE_DB_SERVER_NAME", "IGNORE_SCHEMA_REGEXP", "DB_NAME", "DB_SSLMODE", "https_proxy", "HTTP_PROXY", "FILTER_QUERY_TEXT", "AWS_WEB_IDENTITY_TOKEN_FILE", "AWS_ROLE_ARN", "AWS_ENDPOINT_SIGNING_REGION", "GCP_PROJECT_ID", "HTTPS_PROXY", "DB_URL", "FILTER_LOG_SECRET", "PORT", "AWS_INSTANCE_ID", "AWS_REGION", "MAX_COLLECTOR_CONNECTION", "AZURE_AD_CLIENT_SECRET", "PGA_ENABLE_REPORTS", "IGNORE_TABLE_PATTERN", "DB_SSLCERT", "LOG_LOCATION", "DB_SSLKEY", "DB_SSLKEY_CONTENTS", "PGA_API_BASEURL", "PGA_API_SYSTEM_SCOPE_FALLBACK", "QUERY_STATS_INTERVAL", "DB_SSLROOTCERT", "DB_SSLCERT_CONTENTS", "PGA_DISABLE_ACTIVITY", "PGA_API_KEY", "AWS_ENDPOINT_RDS_URL", "AZURE_AD_CLIENT_ID", "DYNO", "AWS_ACCOUNT_ID", "AZURE_AD_CERTIFICATE_PASSWORD", "GCP_CREDENTIALS_FILE", "GCP_PUBSUB_SUBSCRIPTION", "AZURE_EVENTHUB_NAMESPACE", "LOG_SYSLOG_SERVER", "AZURE_AD_CERTIFICATE_PATH", "AZURE_EVENTHUB_NAME", "PGA_API_SYSTEM_ID", "AZURE_AD_TENANT_ID", "no_proxy", "PGA_API_SYSTEM_SCOPE", "DB_PASSWORD", "AWS_ENDPOINT_CLOUDWATCH_LOGS_URL", "AWS_SECRET_ACCESS_KEY", "PGA_ENABLE_LOG_EXPLAIN", "SKIP_IF_REPLICA", "DB_SSLROOTCERT_CONTENTS", "AWS_ACCESS_KEY_ID", "AWS_ASSUME_ROLE", "GCP_CLOUDSQL_INSTANCE_ID", "PGA_DISABLE_LOGS", "FILTER_QUERY_SAMPLE", "DB_ALL_NAMES", "http_proxy"] | go | 67 | 0 | |
types/play.go | package types
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/hashicorp/terraform/helper/schema"
)
// Play return a new Ansible item to play.
type Play struct {
defaults *Defaults
enabled bool
entity interface{}
hosts []string
groups []string
become bool
becomeMethod string
becomeUser string
diff bool
extraVars map[string]interface{}
forks int
inventoryFile string
limit string
vaultID []string
vaultPasswordFile string
verbose bool
overrideInventoryFile string
overrideVaultID []string
overrideVaultPasswordFile string
}
var (
defaultRolesPath = []string{"~/.ansible/roles", "/usr/share/ansible/roles", "/etc/ansible/roles"}
)
const (
// default values:
playDefaultBecomeMethod = "sudo"
playDefaultBecomeUser = "root"
playDefaultForks = 5
// environment variable names:
ansibleEnvVarForceColor = "ANSIBLE_FORCE_COLOR"
ansibleEnvVarRolesPath = "ANSIBLE_ROLES_PATH"
ansibleEnvVarDefaultRolesPath = "DEFAULT_ROLES_PATH"
// attribute names:
playAttributeEnabled = "enabled"
playAttributePlaybook = "playbook"
playAttributeModule = "module"
playAttributeHosts = "hosts"
playAttributeGroups = "groups"
playAttributeBecome = "become"
playAttributeBecomeMethod = "become_method"
playAttributeBecomeUser = "become_user"
playAttributeDiff = "diff"
playAttributeExtraVars = "extra_vars"
playAttributeForks = "forks"
playAttributeInventoryFile = "inventory_file"
playAttributeLimit = "limit"
playAttributeVaultID = "vault_id"
playAttributeVaultPasswordFile = "vault_password_file"
playAttributeVerbose = "verbose"
)
// NewPlaySchema returns a new play schema.
func NewPlaySchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
playAttributeEnabled: &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
playAttributePlaybook: NewPlaybookSchema(),
playAttributeModule: NewModuleSchema(),
playAttributeHosts: &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
},
playAttributeGroups: &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
},
playAttributeBecome: &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
playAttributeBecomeMethod: &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: playDefaultBecomeMethod,
ValidateFunc: vfBecomeMethod,
},
playAttributeBecomeUser: &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: playDefaultBecomeUser,
},
playAttributeDiff: &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
playAttributeExtraVars: &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Computed: true,
},
playAttributeForks: &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: playDefaultForks,
},
playAttributeInventoryFile: &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: vfPath,
},
playAttributeLimit: &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
playAttributeVaultID: &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
ConflictsWith: []string{"plays.vault_password_file"},
},
playAttributeVaultPasswordFile: &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: vfPath,
ConflictsWith: []string{"plays.vault_id"},
},
playAttributeVerbose: &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
},
}
}
// NewPlayFromInterface reads Play configuration from Terraform schema.
func NewPlayFromInterface(i interface{}, defaults *Defaults) *Play {
vals := mapFromTypeSetList(i.(*schema.Set).List())
v := &Play{
defaults: defaults,
enabled: vals[playAttributeEnabled].(bool),
become: vals[playAttributeBecome].(bool),
becomeMethod: vals[playAttributeBecomeMethod].(string),
becomeUser: vals[playAttributeBecomeUser].(string),
diff: vals[playAttributeDiff].(bool),
extraVars: mapFromTypeMap(vals[playAttributeExtraVars]),
forks: vals[playAttributeForks].(int),
inventoryFile: vals[playAttributeInventoryFile].(string),
limit: vals[playAttributeLimit].(string),
vaultID: listOfInterfaceToListOfString(vals[playAttributeVaultID].([]interface{})),
vaultPasswordFile: vals[playAttributeVaultPasswordFile].(string),
verbose: vals[playAttributeVerbose].(bool),
}
emptySet := "*Set(map[string]interface {}(nil))"
if vals[playAttributePlaybook].(*schema.Set).GoString() != emptySet {
v.entity = NewPlaybookFromInterface(vals[playAttributePlaybook])
} else if vals[playAttributeModule].(*schema.Set).GoString() != emptySet {
v.entity = NewModuleFromInterface(vals[playAttributeModule])
}
if val, ok := vals[playAttributeHosts]; ok {
v.hosts = listOfInterfaceToListOfString(val.([]interface{}))
}
if val, ok := vals[playAttributeGroups]; ok {
v.groups = listOfInterfaceToListOfString(val.([]interface{}))
}
return v
}
// Enabled controls the execution of a play.
// Play will be skipped if this value is false.
func (v *Play) Enabled() bool {
return v.enabled
}
// Entity to run. A Playbook or Module.
func (v *Play) Entity() interface{} {
return v.entity
}
// Hosts to include in the auto-generated inventory file.
func (v *Play) Hosts() []string {
if len(v.hosts) > 0 {
return v.hosts
}
if v.defaults.hostsIsSet {
return v.defaults.hosts
}
return make([]string, 0)
}
// Groups to include in the auto-generated inventory file.
func (v *Play) Groups() []string {
if len(v.groups) > 0 {
return v.groups
}
if v.defaults.groupsIsSet {
return v.defaults.groups
}
return make([]string, 0)
}
// Become represents Ansible --become flag.
func (v *Play) Become() bool {
return v.become
}
// BecomeMethod represents Ansible --become-method flag.
func (v *Play) BecomeMethod() string {
if v.becomeMethod != "" {
return v.becomeMethod
}
if v.defaults.becomeMethodIsSet {
return v.defaults.becomeMethod
}
return playDefaultBecomeMethod
}
// BecomeUser represents Ansible --become-user flag.
func (v *Play) BecomeUser() string {
if v.becomeUser != "" {
return v.becomeUser
}
if v.defaults.becomeUserIsSet {
return v.defaults.becomeUser
}
return "" // will be obtained from connection info
}
// Diff represents Ansible --diff flag.
func (v *Play) Diff() bool {
return v.diff
}
// ExtraVars represents Ansible --extra-vars flag.
func (v *Play) ExtraVars() map[string]interface{} {
if len(v.extraVars) > 0 {
return v.extraVars
}
if v.defaults.extraVarsIsSet {
return v.defaults.extraVars
}
return make(map[string]interface{})
}
// Forks represents Ansible --forks flag.
func (v *Play) Forks() int {
if v.forks > 0 {
return v.forks
}
if v.defaults.forksIsSet {
return v.defaults.forks
}
return playDefaultForks
}
// InventoryFile represents Ansible --inventory-file flag.
func (v *Play) InventoryFile() string {
if v.overrideInventoryFile != "" {
return v.overrideInventoryFile
}
if v.inventoryFile != "" {
return v.inventoryFile
}
if v.defaults.inventoryFileIsSet {
return v.defaults.inventoryFile
}
return ""
}
// Limit represents Ansible --limit flag.
func (v *Play) Limit() string {
if v.limit != "" {
return v.limit
}
if v.defaults.limitIsSet {
return v.defaults.limit
}
return ""
}
// VaultPasswordFile represents Ansible --vault-password-file flag.
func (v *Play) VaultPasswordFile() string {
if v.overrideVaultPasswordFile != "" {
return v.overrideVaultPasswordFile
}
if v.vaultPasswordFile != "" {
return v.vaultPasswordFile
}
if v.defaults.vaultPasswordFileIsSet {
return v.defaults.vaultPasswordFile
}
return ""
}
// VaultID represents Ansible --vault-id flag.
func (v *Play) VaultID() []string {
if len(v.overrideVaultID) > 0 {
return v.overrideVaultID
}
if len(v.vaultID) > 0 {
return v.vaultID
}
if v.defaults.vaultIDIsSet {
return v.defaults.vaultID
}
return make([]string, 0)
}
// Verbose represents Ansible --verbose flag.
func (v *Play) Verbose() bool {
return v.verbose
}
// SetOverrideInventoryFile is used by the provisioner in the following cases:
// - remote provisioner not given an inventory_file, a generated temporary file used
// - local mode always writes a temporary inventory file, such file has to be removed after provisioning
func (v *Play) SetOverrideInventoryFile(path string) {
v.overrideInventoryFile = path
}
// SetOverrideVaultID is used by remote provisioner when vault id files are defined.
// After uploading the files to the machine, the paths are updated to the remote paths, such that Ansible
// can be given correct remote locations.
func (v *Play) SetOverrideVaultID(paths []string) {
v.overrideVaultID = paths
}
// SetOverrideVaultPasswordPath is used by remote provisioner when a vault password file is defined.
// After uploading the file to the machine, the path is updated to the remote path, such that Ansible
// can be given the correct remote location.
func (v *Play) SetOverrideVaultPasswordPath(path string) {
v.overrideVaultPasswordFile = path
}
func (v *Play) defaultRolePaths() []string {
if val, ok := os.LookupEnv(ansibleEnvVarRolesPath); ok {
return strings.Split(val, ":")
}
if val, ok := os.LookupEnv(ansibleEnvVarDefaultRolesPath); ok {
return strings.Split(val, ":")
}
return defaultRolesPath
}
// ToCommand serializes the play to an executable Ansible command.
func (v *Play) ToCommand(ansibleArgs LocalModeAnsibleArgs) (string, error) {
command := ""
// entity to call:
switch entity := v.Entity().(type) {
case *Playbook:
command = fmt.Sprintf("%s=true", ansibleEnvVarForceColor)
// handling role directories:
rolePaths := v.defaultRolePaths()
for _, rp := range entity.RolesPath() {
rolePaths = append(rolePaths, filepath.Clean(rp))
}
command = fmt.Sprintf("%s %s=%s", command, ansibleEnvVarRolesPath, strings.Join(rolePaths, ":"))
command = fmt.Sprintf("%s ansible-playbook %s", command, entity.FilePath())
// force handlers:
if entity.ForceHandlers() {
command = fmt.Sprintf("%s --force-handlers", command)
}
// skip tags:
if len(entity.SkipTags()) > 0 {
command = fmt.Sprintf("%s --skip-tags='%s'", command, strings.Join(entity.SkipTags(), ","))
}
// start at task:
if entity.StartAtTask() != "" {
command = fmt.Sprintf("%s --start-at-task='%s'", command, entity.StartAtTask())
}
// tags:
if len(entity.Tags()) > 0 {
command = fmt.Sprintf("%s --tags='%s'", command, strings.Join(entity.Tags(), ","))
}
case *Module:
hostPattern := entity.HostPattern()
if hostPattern == "" {
hostPattern = ansibleModuleDefaultHostPattern
}
command = fmt.Sprintf("ansible %s --module-name='%s'", hostPattern, entity.module)
if entity.Background() > 0 {
command = fmt.Sprintf("%s --background=%d", command, entity.Background())
if entity.Poll() > 0 {
command = fmt.Sprintf("%s --poll=%d", command, entity.Poll())
}
}
// module args:
if len(entity.Args()) > 0 {
args := make([]string, 0)
for mak, mav := range entity.Args() {
args = append(args, fmt.Sprintf("%s=%+v", mak, mav))
}
command = fmt.Sprintf("%s --args=\"%s\"", command, strings.Join(args, " "))
}
// one line:
if entity.OneLine() {
command = fmt.Sprintf("%s --one-line", command)
}
}
// inventory file:
command = fmt.Sprintf("%s --inventory-file='%s'", command, v.InventoryFile())
// shared arguments:
// become:
if v.Become() {
command = fmt.Sprintf("%s --become", command)
command = fmt.Sprintf("%s --become-method='%s'", command, v.BecomeMethod())
if v.BecomeUser() != "" {
command = fmt.Sprintf("%s --become-user='%s'", command, v.BecomeUser())
} else {
command = fmt.Sprintf("%s --become-user='%s'", command, ansibleArgs.Username)
}
}
// diff:
if v.Diff() {
command = fmt.Sprintf("%s --diff", command)
}
// extra vars:
if len(v.ExtraVars()) > 0 {
extraVars, err := json.Marshal(v.ExtraVars())
if err != nil {
return "", err
}
command = fmt.Sprintf("%s --extra-vars='%s'", command, string(extraVars))
}
// forks:
if v.Forks() > 0 {
command = fmt.Sprintf("%s --forks=%d", command, v.Forks())
}
// limit
if v.Limit() != "" {
command = fmt.Sprintf("%s --limit='%s'", command, v.Limit())
}
if len(v.VaultID()) > 0 {
for _, vaultID := range v.VaultID() {
command = fmt.Sprintf("%s --vault-id='%s'", command, filepath.Clean(vaultID))
}
} else {
// vault password file:
if v.VaultPasswordFile() != "" {
command = fmt.Sprintf("%s --vault-password-file='%s'", command, v.VaultPasswordFile())
}
}
// verbose:
if v.Verbose() {
command = fmt.Sprintf("%s --verbose", command)
}
return command, nil
}
// ToLocalCommand serializes the play to an executable local provisioning Ansible command.
func (v *Play) ToLocalCommand(ansibleArgs LocalModeAnsibleArgs, ansibleSSHSettings *AnsibleSSHSettings) (string, error) {
baseCommand, err := v.ToCommand(ansibleArgs)
if err != nil {
return "", err
}
return fmt.Sprintf("%s %s", baseCommand, v.toCommandArguments(ansibleArgs, ansibleSSHSettings)), nil
}
func (v *Play) toCommandArguments(ansibleArgs LocalModeAnsibleArgs, ansibleSSHSettings *AnsibleSSHSettings) string {
args := fmt.Sprintf("--user='%s'", ansibleArgs.Username)
if ansibleArgs.PemFile != "" {
args = fmt.Sprintf("%s --private-key='%s'", args, ansibleArgs.PemFile)
}
sshExtraAgrsOptions := make([]string, 0)
sshExtraAgrsOptions = append(sshExtraAgrsOptions, fmt.Sprintf("-p %d", ansibleArgs.Port))
sshExtraAgrsOptions = append(sshExtraAgrsOptions, fmt.Sprintf("-o UserKnownHostsFile=%s", ansibleArgs.KnownHostsFile))
sshExtraAgrsOptions = append(sshExtraAgrsOptions, fmt.Sprintf("-o ConnectTimeout=%d", ansibleSSHSettings.ConnectTimeoutSeconds()))
sshExtraAgrsOptions = append(sshExtraAgrsOptions, fmt.Sprintf("-o ConnectionAttempts=%d", ansibleSSHSettings.ConnectAttempts()))
if ansibleArgs.BastionHost != "" {
sshExtraAgrsOptions = append(
sshExtraAgrsOptions,
fmt.Sprintf(
"-o ProxyCommand=\"ssh -p %d -W %%h:%%p %s@%s -o UserKnownHostsFile=%s\"",
ansibleArgs.BastionPort,
ansibleArgs.BastionUsername,
ansibleArgs.BastionHost,
ansibleArgs.KnownHostsFile))
if ansibleArgs.BastionPemFile == "" && os.Getenv("SSH_AUTH_SOCK") != "" {
sshExtraAgrsOptions = append(sshExtraAgrsOptions, "-o ForwardAgent=yes")
}
}
args = fmt.Sprintf("%s --ssh-extra-args='%s'", args, strings.Join(sshExtraAgrsOptions, " "))
return args
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
main_test.go | package gorm_test
// Run tests
// $ docker-compose up
// $ ./test_all.sh
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/blue1004jy/gorm"
_ "github.com/blue1004jy/gorm/dialects/mssql"
_ "github.com/blue1004jy/gorm/dialects/mysql"
"github.com/blue1004jy/gorm/dialects/postgres"
_ "github.com/blue1004jy/gorm/dialects/sqlite"
"github.com/erikstmartin/go-testdb"
"github.com/jinzhu/now"
)
var (
DB *gorm.DB
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
}
runMigration()
}
func OpenTestConnection() (db *gorm.DB, err error) {
dbDSN := os.Getenv("GORM_DSN")
switch os.Getenv("GORM_DIALECT") {
case "mysql":
fmt.Println("testing mysql...")
if dbDSN == "" {
dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True"
}
db, err = gorm.Open("mysql", dbDSN)
case "postgres":
fmt.Println("testing postgres...")
if dbDSN == "" {
dbDSN = "user=gorm password=gorm dbname=gorm port=9920 sslmode=disable"
}
db, err = gorm.Open("postgres", dbDSN)
case "mssql":
// CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86';
// CREATE DATABASE gorm;
// USE gorm;
// CREATE USER gorm FROM LOGIN gorm;
// sp_changedbowner 'gorm';
fmt.Println("testing mssql...")
if dbDSN == "" {
dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm"
}
db, err = gorm.Open("mssql", dbDSN)
default:
fmt.Println("testing sqlite3...")
db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db"))
}
// db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// db.SetLogger(log.New(os.Stdout, "\r\n", 0))
if debug := os.Getenv("DEBUG"); debug == "true" {
db.LogMode(true)
} else if debug == "false" {
db.LogMode(false)
}
db.DB().SetMaxIdleConns(10)
return
}
func TestOpen_ReturnsError_WithBadArgs(t *testing.T) {
stringRef := "foo"
testCases := []interface{}{42, time.Now(), &stringRef}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) {
_, err := gorm.Open("postgresql", tc)
if err == nil {
t.Error("Should got error with invalid database source")
}
if !strings.HasPrefix(err.Error(), "invalid database source:") {
t.Errorf("Should got error starting with \"invalid database source:\", but got %q", err.Error())
}
})
}
}
func TestStringPrimaryKey(t *testing.T) {
type UUIDStruct struct {
ID string `gorm:"primary_key"`
Name string
}
DB.DropTable(&UUIDStruct{})
DB.AutoMigrate(&UUIDStruct{})
data := UUIDStruct{ID: "uuid", Name: "hello"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" {
t.Errorf("string primary key should not be populated")
}
data = UUIDStruct{ID: "uuid", Name: "hello world"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" {
t.Errorf("string primary key should not be populated")
}
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
t.Error("No errors should happen if set table for pluck", err)
}
var users []User
if DB.Table("users").Find(&[]User{}).Error != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
var user User
DB.Table("deleted_users").First(&user, "name = ?", "DeletedUser")
user.Age = 20
DB.Table("deleted_users").Save(&user)
if DB.Table("deleted_users").First(&user, "name = ? AND age = ?", "DeletedUser", 20).RecordNotFound() {
t.Errorf("Failed to found updated user")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
// Table should not exist at this point, HasTable should return false
if ok := DB.HasTable("foos"); ok {
t.Errorf("Table should not exist, but does")
}
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
// We create the table
if err := DB.CreateTable(&Foo{}).Error; err != nil {
t.Errorf("Table should be created")
}
// And now it should exits, and HasTable should return true
if ok := DB.HasTable("foos"); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestTableNameConcurrently(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
var wg sync.WaitGroup
wg.Add(10)
for i := 1; i <= 10; i++ {
go func(db *gorm.DB) {
DB.SingularTable(true)
wg.Done()
}(DB)
}
wg.Wait()
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
DB.SingularTable(false)
}
func TestNullValues(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-2", Valid: true},
Gender: &sql.NullString{String: "F", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-3", Valid: false},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestNullValuesWithFirstOrCreate(t *testing.T) {
var nv1 = NullValue{
Name: sql.NullString{String: "first_or_create", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
}
var nv2 NullValue
result := DB.Where(nv1).FirstOrCreate(&nv2)
if result.RowsAffected != 1 {
t.Errorf("RowsAffected should be 1 after create some record")
}
if result.Error != nil {
t.Errorf("Should not raise any error, but got %v", result.Error)
}
if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" {
t.Errorf("first or create with nullvalues")
}
if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil {
t.Errorf("Should not raise any error, but got %v", err)
}
if nv2.Age.Int64 != 18 {
t.Errorf("should update age to 18")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
tx3 := DB.Begin()
u3 := User{Name: "transcation-3"}
if err := tx3.Save(&u3).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx3.First(&User{}, "name = ?", "transcation-3").Error; err != nil {
t.Errorf("Should find saved record")
}
tx3.RollbackUnlessCommitted()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx4 := DB.Begin()
u4 := User{Name: "transcation-4"}
if err := tx4.Save(&u4).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx4.First(&User{}, "name = ?", "transcation-4").Error; err != nil {
t.Errorf("Should find saved record")
}
tx4.Commit()
tx4.RollbackUnlessCommitted()
if err := DB.First(&User{}, "name = ?", "transcation-4").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
}
func assertPanic(t *testing.T, f func()) {
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
f()
}
func TestTransactionWithBlock(t *testing.T) {
// rollback
err := DB.Transaction(func(tx *gorm.DB) error {
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
return errors.New("the error message")
})
if err.Error() != "the error message" {
t.Errorf("Transaction return error will equal the block returns error")
}
if err := DB.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
// commit
DB.Transaction(func(tx *gorm.DB) error {
u2 := User{Name: "transcation-2"}
if err := tx.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
return nil
})
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
// panic will rollback
assertPanic(t, func() {
DB.Transaction(func(tx *gorm.DB) error {
u3 := User{Name: "transcation-3"}
if err := tx.Save(&u3).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation-3").Error; err != nil {
t.Errorf("Should find saved record")
}
panic("force panic")
})
})
if err := DB.First(&User{}, "name = ?", "transcation-3").Error; err == nil {
t.Errorf("Should not find record after panic rollback")
}
}
func TestTransaction_NoErrorOnRollbackAfterCommit(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.Commit().Error; err != nil {
t.Errorf("Commit should not raise error")
}
if err := tx.Rollback().Error; err != nil {
t.Errorf("Rollback should not raise error")
}
}
func TestTransactionReadonly(t *testing.T) {
dialect := os.Getenv("GORM_DIALECT")
if dialect == "" {
dialect = "sqlite"
}
switch dialect {
case "mssql", "sqlite":
t.Skipf("%s does not support readonly transactions\n", dialect)
}
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
tx.Commit()
tx = DB.BeginTx(context.Background(), &sql.TxOptions{ReadOnly: true})
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
u = User{Name: "transcation-2"}
if err := tx.Save(&u).Error; err == nil {
t.Errorf("Error should have been raised in a readonly transaction")
}
tx.Rollback()
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records")
}
}
func TestScanRows(t *testing.T) {
user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
type Result struct {
Name string
Age int
}
var results []Result
for rows.Next() {
var result Result
if err := DB.ScanRows(rows, &result); err != nil {
t.Errorf("should get no error, but got %v", err)
}
results = append(results, result)
}
if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) {
t.Errorf("Should find expected results")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes = &result{}
if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil {
t.Errorf("Scan to pointer of pointer")
}
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
var user = User{
Name: "joins",
CreditCard: CreditCard{Number: "411111111111"},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users1 []User
DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1)
if len(users1) != 2 {
t.Errorf("should find two users using left join")
}
var users2 []User
DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Where("name = ?", "joins").First(&users2)
if len(users2) != 1 {
t.Errorf("should find one users using left join with conditions")
}
var users3 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3)
if len(users3) != 1 {
t.Errorf("should find one users using multiple left join conditions")
}
var users4 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4)
if len(users4) != 0 {
t.Errorf("should find no user when searching with unexisting credit card")
}
var users5 []User
db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5)
if db5.Error != nil {
t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error())
}
}
type JoinedIds struct {
UserID int64 `gorm:"column:id"`
BillingAddressID int64 `gorm:"column:id"`
EmailID int64 `gorm:"column:id"`
}
func TestScanIdenticalColumnNames(t *testing.T) {
var user = User{
Name: "joinsIds",
Email: "[email protected]",
BillingAddress: Address{
Address1: "One Park Place",
},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users []JoinedIds
DB.Select("users.id, addresses.id, emails.id").Table("users").
Joins("left join addresses on users.billing_address_id = addresses.id").
Joins("left join emails on emails.user_id = users.id").
Where("name = ?", "joinsIds").Scan(&users)
if len(users) != 2 {
t.Fatal("should find two rows using left join")
}
if user.Id != users[0].UserID {
t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[0].UserID)
}
if user.Id != users[1].UserID {
t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[1].UserID)
}
if user.BillingAddressID.Int64 != users[0].BillingAddressID {
t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
}
if user.BillingAddressID.Int64 != users[1].BillingAddressID {
t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
}
if users[0].EmailID == users[1].EmailID {
t.Errorf("Email ids should be unique. Got %d and %d", users[0].EmailID, users[1].EmailID)
}
if int64(user.Emails[0].Id) != users[0].EmailID && int64(user.Emails[1].Id) != users[0].EmailID {
t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[0].EmailID)
}
if int64(user.Emails[0].Id) != users[1].EmailID && int64(user.Emails[1].Id) != users[1].EmailID {
t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[1].EmailID)
}
}
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
sort.Slice(results, func(i, j int) bool {
return strings.Compare(results[i].Email, results[j].Email) < 0
})
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join select")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestQueryBuilderSubselectInWhere(t *testing.T) {
user := User{Name: "query_expr_select_ruser1", Email: "[email protected]", Age: 32}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser2", Email: "[email protected]", Age: 16}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("*").Where("name IN (?)", DB.
Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 4 {
t.Errorf("Four users should be found, instead found %d", len(users))
}
DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB.
Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 2 {
t.Errorf("Two users should be found, instead found %d", len(users))
}
}
func TestQueryBuilderRawQueryWithSubquery(t *testing.T) {
user := User{Name: "subquery_test_user1", Age: 10}
DB.Save(&user)
user = User{Name: "subquery_test_user2", Age: 11}
DB.Save(&user)
user = User{Name: "subquery_test_user3", Age: 12}
DB.Save(&user)
var count int
err := DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 2 {
t.Errorf("Row count must be 2, instead got %d", count)
}
err = DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("name LIKE ?", "subquery_test%").
Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 1 {
t.Errorf("Row count must be 1, instead got %d", count)
}
}
func TestQueryBuilderSubselectInHaving(t *testing.T) {
user := User{Name: "query_expr_having_ruser1", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser2", Email: "[email protected]", Age: 128}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB.
Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users)
if len(users) != 1 {
t.Errorf("Two user group should be found, instead found %d", len(users))
}
}
func DialectHasTzSupport() bool {
// NB: mssql and FoundationDB do not support time zones.
if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" {
return false
}
return true
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: &vtime}
if !DialectHasTzSupport() {
// If our driver dialect doesn't support TZ's, just use UTC for everything here.
utcBirthday := user.Birthday.UTC()
user.Birthday = &utcBirthday
}
DB.Save(&user)
expectedBirthday := "2013-02-18 17:51:49 +0000"
foundBirthday := user.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
foundBirthday = findUser.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk postgres.Hstore
}
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error; err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := gorm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("GORM_DIALECT")
db, err := gorm.Open(dialect, DB.DB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound {
t.Errorf("Should have found existing record")
}
}
func TestDdlErrors(t *testing.T) {
var err error
if err = DB.Close(); err != nil {
t.Errorf("Closing DDL test db connection err=%s", err)
}
defer func() {
// Reopen DB connection.
if DB, err = OpenTestConnection(); err != nil {
t.Fatalf("Failed re-opening db connection: %s", err)
}
}()
if err := DB.Find(&User{}).Error; err == nil {
t.Errorf("Expected operation on closed db to produce an error, but err was nil")
}
}
func TestOpenWithOneParameter(t *testing.T) {
db, err := gorm.Open("dialect")
if db != nil {
t.Error("Open with one parameter returned non nil for db")
}
if err == nil {
t.Error("Open with one parameter returned err as nil")
}
}
func TestSaveAssociations(t *testing.T) {
db := DB.New()
deltaAddressCount := 0
if err := db.Model(&Address{}).Count(&deltaAddressCount).Error; err != nil {
t.Errorf("failed to fetch address count")
t.FailNow()
}
placeAddress := &Address{
Address1: "somewhere on earth",
}
ownerAddress1 := &Address{
Address1: "near place address",
}
ownerAddress2 := &Address{
Address1: "address2",
}
db.Create(placeAddress)
addressCountShouldBe := func(t *testing.T, expectedCount int) {
countFromDB := 0
t.Helper()
err := db.Model(&Address{}).Count(&countFromDB).Error
if err != nil {
t.Error("failed to fetch address count")
}
if countFromDB != expectedCount {
t.Errorf("address count mismatch: %d", countFromDB)
}
}
addressCountShouldBe(t, deltaAddressCount+1)
// owner address should be created, place address should be reused
place1 := &Place{
PlaceAddressID: placeAddress.ID,
PlaceAddress: placeAddress,
OwnerAddress: ownerAddress1,
}
err := db.Create(place1).Error
if err != nil {
t.Errorf("failed to store place: %s", err.Error())
}
addressCountShouldBe(t, deltaAddressCount+2)
// owner address should be created again, place address should be reused
place2 := &Place{
PlaceAddressID: placeAddress.ID,
PlaceAddress: &Address{
ID: 777,
Address1: "address1",
},
OwnerAddress: ownerAddress2,
OwnerAddressID: 778,
}
err = db.Create(place2).Error
if err != nil {
t.Errorf("failed to store place: %s", err.Error())
}
addressCountShouldBe(t, deltaAddressCount+3)
count := 0
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
OwnerAddressID: ownerAddress1.ID,
}).Count(&count)
if count != 1 {
t.Errorf("only one instance of (%d, %d) should be available, found: %d",
placeAddress.ID, ownerAddress1.ID, count)
}
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
OwnerAddressID: ownerAddress2.ID,
}).Count(&count)
if count != 1 {
t.Errorf("only one instance of (%d, %d) should be available, found: %d",
placeAddress.ID, ownerAddress2.ID, count)
}
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
}).Count(&count)
if count != 2 {
t.Errorf("two instances of (%d) should be available, found: %d",
placeAddress.ID, count)
}
}
func TestBlockGlobalUpdate(t *testing.T) {
db := DB.New()
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err := db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err != nil {
t.Error("Unexpected error on global update")
}
err = db.Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on global delete")
}
db.BlockGlobalUpdate(true)
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err = db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err == nil {
t.Error("Expected error on global update")
}
err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error
if err != nil {
t.Error("Unxpected error on conditional update")
}
err = db.Delete(&Toy{}).Error
if err == nil {
t.Error("Expected error on global delete")
}
err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on conditional delete")
}
}
func TestCountWithHaving(t *testing.T) {
db := DB.New()
db.Delete(User{})
defer db.Delete(User{})
DB.Create(getPreparedUser("user1", "pluck_user"))
DB.Create(getPreparedUser("user2", "pluck_user"))
user3 := getPreparedUser("user3", "pluck_user")
user3.Languages = []Language{}
DB.Create(user3)
var count int
err := db.Model(User{}).Select("users.id").
Joins("LEFT JOIN user_languages ON user_languages.user_id = users.id").
Joins("LEFT JOIN languages ON user_languages.language_id = languages.id").
Group("users.id").Having("COUNT(languages.id) > 1").Count(&count).Error
if err != nil {
t.Error("Unexpected error on query count with having")
}
if count != 2 {
t.Error("Unexpected result on query count with having")
}
}
func TestPluck(t *testing.T) {
db := DB.New()
db.Delete(User{})
defer db.Delete(User{})
DB.Create(&User{Id: 1, Name: "user1"})
DB.Create(&User{Id: 2, Name: "user2"})
DB.Create(&User{Id: 3, Name: "user3"})
var ids []int64
err := db.Model(User{}).Order("id").Pluck("id", &ids).Error
if err != nil {
t.Error("Unexpected error on pluck")
}
if len(ids) != 3 || ids[0] != 1 || ids[1] != 2 || ids[2] != 3 {
t.Error("Unexpected result on pluck")
}
err = db.Model(User{}).Order("id").Pluck("id", &ids).Error
if err != nil {
t.Error("Unexpected error on pluck again")
}
if len(ids) != 3 || ids[0] != 1 || ids[1] != 2 || ids[2] != 3 {
t.Error("Unexpected result on pluck again")
}
}
func TestCountWithQueryOption(t *testing.T) {
db := DB.New()
db.Delete(User{})
defer db.Delete(User{})
DB.Create(&User{Name: "user1"})
DB.Create(&User{Name: "user2"})
DB.Create(&User{Name: "user3"})
var count int
err := db.Model(User{}).Select("users.id").
Set("gorm:query_option", "WHERE users.name='user2'").
Count(&count).Error
if err != nil {
t.Error("Unexpected error on query count with query_option")
}
if count != 1 {
t.Error("Unexpected result on query count with query_option")
}
}
func TestSubQueryWithQueryOption(t *testing.T) {
db := DB.New()
subQuery := db.Model(User{}).Select("users.id").
Set("gorm:query_option", "WHERE users.name='user2'").
SubQuery()
matched, _ := regexp.MatchString(
`^&{.+\s+WHERE users\.name='user2'.*\s\[]}$`, fmt.Sprint(subQuery))
if !matched {
t.Error("Unexpected result of SubQuery with query_option")
}
}
func TestQueryExprWithQueryOption(t *testing.T) {
db := DB.New()
queryExpr := db.Model(User{}).Select("users.id").
Set("gorm:query_option", "WHERE users.name='user2'").
QueryExpr()
matched, _ := regexp.MatchString(
`^&{.+\s+WHERE users\.name='user2'.*\s\[]}$`, fmt.Sprint(queryExpr))
if !matched {
t.Error("Unexpected result of QueryExpr with query_option")
}
}
func TestQueryHint1(t *testing.T) {
db := DB.New()
_, err := db.Model(User{}).Raw("select 1").Rows()
if err != nil {
t.Error("Unexpected error on query count with query_option")
}
}
func TestQueryHint2(t *testing.T) {
type TestStruct struct {
ID string `gorm:"primary_key"`
Name string
}
DB.DropTable(&TestStruct{})
DB.AutoMigrate(&TestStruct{})
data := TestStruct{ID: "uuid", Name: "hello"}
if err := DB.Set("gorm:query_hint", "/*master*/").Save(&data).Error; err != nil {
t.Error("Unexpected error on query count with query_option")
}
}
func TestFloatColumnPrecision(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect != "mysql" && dialect != "sqlite" {
t.Skip()
}
type FloatTest struct {
ID string `gorm:"primary_key"`
FloatValue float64 `gorm:"column:float_value" sql:"type:float(255,5);"`
}
DB.DropTable(&FloatTest{})
DB.AutoMigrate(&FloatTest{})
data := FloatTest{ID: "uuid", FloatValue: 112.57315}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.FloatValue != 112.57315 {
t.Errorf("Float value should not lose precision")
}
}
func TestWhereUpdates(t *testing.T) {
type OwnerEntity struct {
gorm.Model
OwnerID uint
OwnerType string
}
type SomeEntity struct {
gorm.Model
Name string
OwnerEntity OwnerEntity `gorm:"polymorphic:Owner"`
}
DB.DropTable(&SomeEntity{})
DB.AutoMigrate(&SomeEntity{})
a := SomeEntity{Name: "test"}
DB.Model(&a).Where(a).Updates(SomeEntity{Name: "test2"})
}
func BenchmarkGorm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.Save(&email)
// Query
DB.First(&EmailWithIdx{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
func parseTime(str string) *time.Time {
t := now.New(time.Now().UTC()).MustParse(str)
return &t
}
| [
"\"GORM_DSN\"",
"\"GORM_DIALECT\"",
"\"DEBUG\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT",
"GORM_DSN",
"DEBUG"
]
| [] | ["GORM_DIALECT", "GORM_DSN", "DEBUG"] | go | 3 | 0 | |
server/settings/staging.py | """ Do not put secrets in this file. This file is public.
For staging environment (Using Dokku)
"""
import os
from server.settings._base import Config as BaseConfig, initialize_config
from server.settings._prodbase import Config as ProdBaseConfig
@initialize_config
class Config(ProdBaseConfig, BaseConfig):
ENV = 'staging'
MAX_CONTENT_LENGTH = 10 * 1024 * 1024 # Max Upload Size is 10MB
STORAGE_CONTAINER = os.environ.get('STORAGE_CONTAINER', os.path.abspath("./local-storage"))
@classmethod
def get_default_db_url(cls):
return os.getenv('SQLALCHEMY_URL', 'sqlite:///../oksqlite.db')
| []
| []
| [
"SQLALCHEMY_URL",
"STORAGE_CONTAINER"
]
| [] | ["SQLALCHEMY_URL", "STORAGE_CONTAINER"] | python | 2 | 0 | |
StudiOCR/wsl.py | import os
def get_wsl_distro():
# This should be defined for any WSL distro. If it isn't, we aren't in WSL.
if 'WSL_DISTRO_NAME' in os.environ:
return os.environ['WSL_DISTRO_NAME']
else:
return None
def get_wsl_host():
# If we aren't in WSL, return none.
if not get_wsl_distro():
return None
result = None
# If we're in WSL2...
if 'WSL_INTEROP' in os.environ:
with open("/etc/resolv.conf", "r") as resolv_file:
result = resolv_file.readlines()[-1].split(' ')[1].strip()
resolv_file.close()
return result
# Otherwise, we're in WSL1.
else:
return "localhost"
def set_display_to_host(major=0, minor=None):
if get_wsl_distro():
os.environ['DISPLAY'] = (get_wsl_host() + ":%d" %
major + (".%d" % minor if minor != None else ""))
| []
| []
| [
"WSL_DISTRO_NAME",
"DISPLAY"
]
| [] | ["WSL_DISTRO_NAME", "DISPLAY"] | python | 2 | 0 | |
tests/testflows/helpers/argparser.py | import os
def argparser(parser):
"""Default argument parser for regressions.
"""
parser.add_argument("--local",
action="store_true",
help="run regression in local mode", default=False)
parser.add_argument("--clickhouse-binary-path",
type=str, dest="clickhouse_binary_path",
help="path to ClickHouse binary, default: /usr/bin/clickhouse", metavar="path",
default=os.getenv("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")) | []
| []
| [
"CLICKHOUSE_TESTS_SERVER_BIN_PATH"
]
| [] | ["CLICKHOUSE_TESTS_SERVER_BIN_PATH"] | python | 1 | 0 | |
src/cmd/dist/buildtool.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Build toolchain using Go 1.4.
//
// The general strategy is to copy the source files we need into
// a new GOPATH workspace, adjust import paths appropriately,
// invoke the Go 1.4 go command to build those sources,
// and then copy the binaries back.
package main
import (
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
)
// bootstrapDirs is a list of directories holding code that must be
// compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
// All directories in this list are relative to and must be below $GOROOT/src.
//
// The list has two kinds of entries: names beginning with cmd/ with
// no other slashes, which are commands, and other paths, which are packages
// supporting the commands. Packages in the standard library can be listed
// if a newer copy needs to be substituted for the Go 1.4 copy when used
// by the command packages. Paths ending with /... automatically
// include all packages within subdirectories as well.
// These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big.
var bootstrapDirs = []string{
"cmd/asm",
"cmd/asm/internal/...",
"cmd/cgo",
"cmd/compile",
"cmd/compile/internal/...",
"cmd/internal/archive",
"cmd/internal/bio",
"cmd/internal/codesign",
"cmd/internal/dwarf",
"cmd/internal/edit",
"cmd/internal/gcprog",
"cmd/internal/goobj",
"cmd/internal/obj/...",
"cmd/internal/objabi",
"cmd/internal/pkgpath",
"cmd/internal/quoted",
"cmd/internal/src",
"cmd/internal/sys",
"cmd/link",
"cmd/link/internal/...",
"compress/flate",
"compress/zlib",
"container/heap",
"debug/dwarf",
"debug/elf",
"debug/macho",
"debug/pe",
"go/constant",
"internal/buildcfg",
"internal/goexperiment",
"internal/goversion",
"internal/race",
"internal/unsafeheader",
"internal/xcoff",
"math/big",
"math/bits",
"sort",
"strconv",
}
// File prefixes that are ignored by go/build anyway, and cause
// problems with editor generated temporary files (#18931).
var ignorePrefixes = []string{
".",
"_",
"#",
}
// File suffixes that use build tags introduced since Go 1.4.
// These must not be copied into the bootstrap build directory.
// Also ignore test files.
var ignoreSuffixes = []string{
"_arm64.s",
"_arm64.go",
"_riscv64.s",
"_riscv64.go",
"_wasm.s",
"_wasm.go",
"_test.s",
"_test.go",
}
var tryDirs = []string{
"sdk/go1.17",
"go1.17",
}
func bootstrapBuildTools() {
goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
if goroot_bootstrap == "" {
home := os.Getenv("HOME")
goroot_bootstrap = pathf("%s/go1.4", home)
for _, d := range tryDirs {
if p := pathf("%s/%s", home, d); isdir(p) {
goroot_bootstrap = p
}
}
}
xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap)
mkbuildcfg(pathf("%s/src/internal/buildcfg/zbootstrap.go", goroot))
mkobjabi(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot))
// Use $GOROOT/pkg/bootstrap as the bootstrap workspace root.
// We use a subdirectory of $GOROOT/pkg because that's the
// space within $GOROOT where we store all generated objects.
// We could use a temporary directory outside $GOROOT instead,
// but it is easier to debug on failure if the files are in a known location.
workspace := pathf("%s/pkg/bootstrap", goroot)
xremoveall(workspace)
xatexit(func() { xremoveall(workspace) })
base := pathf("%s/src/bootstrap", workspace)
xmkdirall(base)
// Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
writefile("module bootstrap\n", pathf("%s/%s", base, "go.mod"), 0)
for _, dir := range bootstrapDirs {
recurse := strings.HasSuffix(dir, "/...")
dir = strings.TrimSuffix(dir, "/...")
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
fatalf("walking bootstrap dirs failed: %v: %v", path, err)
}
name := filepath.Base(path)
src := pathf("%s/src/%s", goroot, path)
dst := pathf("%s/%s", base, path)
if info.IsDir() {
if !recurse && path != dir || name == "testdata" {
return filepath.SkipDir
}
xmkdirall(dst)
if path == "cmd/cgo" {
// Write to src because we need the file both for bootstrap
// and for later in the main build.
mkzdefaultcc("", pathf("%s/zdefaultcc.go", src))
mkzdefaultcc("", pathf("%s/zdefaultcc.go", dst))
}
return nil
}
for _, pre := range ignorePrefixes {
if strings.HasPrefix(name, pre) {
return nil
}
}
for _, suf := range ignoreSuffixes {
if strings.HasSuffix(name, suf) {
return nil
}
}
text := bootstrapRewriteFile(src)
writefile(text, dst, 0)
return nil
})
}
// Set up environment for invoking Go 1.4 go command.
// GOROOT points at Go 1.4 GOROOT,
// GOPATH points at our bootstrap workspace,
// GOBIN is empty, so that binaries are installed to GOPATH/bin,
// and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty,
// so that Go 1.4 builds whatever kind of binary it knows how to build.
// Restore GOROOT, GOPATH, and GOBIN when done.
// Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH,
// because setup will take care of those when bootstrapBuildTools returns.
defer os.Setenv("GOROOT", os.Getenv("GOROOT"))
os.Setenv("GOROOT", goroot_bootstrap)
defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
os.Setenv("GOPATH", workspace)
defer os.Setenv("GOBIN", os.Getenv("GOBIN"))
os.Setenv("GOBIN", "")
os.Setenv("GOOS", "")
os.Setenv("GOHOSTOS", "")
os.Setenv("GOARCH", "")
os.Setenv("GOHOSTARCH", "")
// Run Go 1.4 to build binaries. Use -gcflags=-l to disable inlining to
// workaround bugs in Go 1.4's compiler. See discussion thread:
// https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ
// Use the math_big_pure_go build tag to disable the assembly in math/big
// which may contain unsupported instructions.
// Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l
// only applies to the final cmd/go binary, but that's OK: if this is Go 1.10
// or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler.
cmd := []string{
pathf("%s/bin/go", goroot_bootstrap),
"install",
"-gcflags=-l",
"-tags=math_big_pure_go compiler_bootstrap",
}
if vflag > 0 {
cmd = append(cmd, "-v")
}
if tool := os.Getenv("GOBOOTSTRAP_TOOLEXEC"); tool != "" {
cmd = append(cmd, "-toolexec="+tool)
}
cmd = append(cmd, "bootstrap/cmd/...")
run(base, ShowOutput|CheckExit, cmd...)
// Copy binaries into tool binary directory.
for _, name := range bootstrapDirs {
if !strings.HasPrefix(name, "cmd/") {
continue
}
name = name[len("cmd/"):]
if !strings.Contains(name, "/") {
copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), writeExec)
}
}
if vflag > 0 {
xprintf("\n")
}
}
var ssaRewriteFileSubstring = filepath.FromSlash("src/cmd/compile/internal/ssa/rewrite")
// isUnneededSSARewriteFile reports whether srcFile is a
// src/cmd/compile/internal/ssa/rewriteARCHNAME.go file for an
// architecture that isn't for the current runtime.GOARCH.
//
// When unneeded is true archCaps is the rewrite base filename without
// the "rewrite" prefix or ".go" suffix: AMD64, 386, ARM, ARM64, etc.
func isUnneededSSARewriteFile(srcFile string) (archCaps string, unneeded bool) {
if !strings.Contains(srcFile, ssaRewriteFileSubstring) {
return "", false
}
fileArch := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(srcFile), "rewrite"), ".go")
if fileArch == "" {
return "", false
}
b := fileArch[0]
if b == '_' || ('a' <= b && b <= 'z') {
return "", false
}
archCaps = fileArch
fileArch = strings.ToLower(fileArch)
fileArch = strings.TrimSuffix(fileArch, "splitload")
if fileArch == os.Getenv("GOHOSTARCH") {
return "", false
}
if fileArch == strings.TrimSuffix(runtime.GOARCH, "le") {
return "", false
}
if fileArch == strings.TrimSuffix(os.Getenv("GOARCH"), "le") {
return "", false
}
return archCaps, true
}
func bootstrapRewriteFile(srcFile string) string {
// During bootstrap, generate dummy rewrite files for
// irrelevant architectures. We only need to build a bootstrap
// binary that works for the current runtime.GOARCH.
// This saves 6+ seconds of bootstrap.
if archCaps, ok := isUnneededSSARewriteFile(srcFile); ok {
return fmt.Sprintf(`// Code generated by go tool dist; DO NOT EDIT.
package ssa
func rewriteValue%s(v *Value) bool { panic("unused during bootstrap") }
func rewriteBlock%s(b *Block) bool { panic("unused during bootstrap") }
`, archCaps, archCaps)
}
return bootstrapFixImports(srcFile)
}
func bootstrapFixImports(srcFile string) string {
text := readfile(srcFile)
if !strings.Contains(srcFile, "/cmd/") && !strings.Contains(srcFile, `\cmd\`) {
text = regexp.MustCompile(`\bany\b`).ReplaceAllString(text, "interface{}")
}
lines := strings.SplitAfter(text, "\n")
inBlock := false
for i, line := range lines {
if strings.HasPrefix(line, "import (") {
inBlock = true
continue
}
if inBlock && strings.HasPrefix(line, ")") {
inBlock = false
continue
}
if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"") || strings.HasPrefix(line, "\texec \"")) {
line = strings.Replace(line, `"cmd/`, `"bootstrap/cmd/`, -1)
// During bootstrap, must use plain os/exec.
line = strings.Replace(line, `exec "internal/execabs"`, `"os/exec"`, -1)
for _, dir := range bootstrapDirs {
if strings.HasPrefix(dir, "cmd/") {
continue
}
line = strings.Replace(line, `"`+dir+`"`, `"bootstrap/`+dir+`"`, -1)
}
lines[i] = line
}
}
lines[0] = "// Code generated by go tool dist; DO NOT EDIT.\n// This is a bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0]
return strings.Join(lines, "")
}
| [
"\"GOROOT_BOOTSTRAP\"",
"\"HOME\"",
"\"GOROOT\"",
"\"GOPATH\"",
"\"GOBIN\"",
"\"GOBOOTSTRAP_TOOLEXEC\"",
"\"GOHOSTARCH\"",
"\"GOARCH\""
]
| []
| [
"GOHOSTARCH",
"GOBIN",
"GOROOT",
"GOPATH",
"GOROOT_BOOTSTRAP",
"GOBOOTSTRAP_TOOLEXEC",
"GOARCH",
"HOME"
]
| [] | ["GOHOSTARCH", "GOBIN", "GOROOT", "GOPATH", "GOROOT_BOOTSTRAP", "GOBOOTSTRAP_TOOLEXEC", "GOARCH", "HOME"] | go | 8 | 0 | |
server/controllers/events/events_controller_e2e_test.go | package events_test
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/google/go-github/v31/github"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/go-version"
. "github.com/petergtz/pegomock"
"github.com/runatlantis/atlantis/server"
events_controllers "github.com/runatlantis/atlantis/server/controllers/events"
"github.com/runatlantis/atlantis/server/core/db"
"github.com/runatlantis/atlantis/server/core/locking"
"github.com/runatlantis/atlantis/server/core/runtime"
runtimemocks "github.com/runatlantis/atlantis/server/core/runtime/mocks"
runtimematchers "github.com/runatlantis/atlantis/server/core/runtime/mocks/matchers"
"github.com/runatlantis/atlantis/server/core/runtime/policy"
"github.com/runatlantis/atlantis/server/core/terraform"
"github.com/runatlantis/atlantis/server/events"
"github.com/runatlantis/atlantis/server/events/mocks"
"github.com/runatlantis/atlantis/server/events/mocks/matchers"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/vcs"
vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks"
"github.com/runatlantis/atlantis/server/events/webhooks"
"github.com/runatlantis/atlantis/server/events/yaml"
"github.com/runatlantis/atlantis/server/events/yaml/valid"
"github.com/runatlantis/atlantis/server/logging"
. "github.com/runatlantis/atlantis/testing"
)
const ConftestVersion = "0.25.0"
var applyLocker locking.ApplyLocker
var userConfig server.UserConfig
type NoopTFDownloader struct{}
var mockPreWorkflowHookRunner *runtimemocks.MockPreWorkflowHookRunner
func (m *NoopTFDownloader) GetFile(dst, src string, opts ...getter.ClientOption) error {
return nil
}
func (m *NoopTFDownloader) GetAny(dst, src string, opts ...getter.ClientOption) error {
return nil
}
type LocalConftestCache struct {
}
func (m *LocalConftestCache) Get(key *version.Version) (string, error) {
return exec.LookPath(fmt.Sprintf("conftest%s", ConftestVersion))
}
func TestGitHubWorkflow(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
// Ensure we have >= TF 0.14 locally.
ensureRunning014(t)
cases := []struct {
Description string
// RepoDir is relative to testfixtures/test-repos.
RepoDir string
// ModifiedFiles are the list of files that have been modified in this
// pull request.
ModifiedFiles []string
// Comments are what our mock user writes to the pull request.
Comments []string
// DisableApply flag used by userConfig object when initializing atlantis server.
DisableApply bool
// ApplyLock creates an apply lock that temporarily disables apply command
ApplyLock bool
// ExpAutomerge is true if we expect Atlantis to automerge.
ExpAutomerge bool
// ExpAutoplan is true if we expect Atlantis to autoplan.
ExpAutoplan bool
// ExpParallel is true if we expect Atlantis to run parallel plans or applies.
ExpParallel bool
// ExpMergeable is true if we expect Atlantis to be able to merge.
// If for instance policy check is failing and there are no approvals
// ExpMergeable should be false
ExpMergeable bool
// ExpReplies is a list of files containing the expected replies that
// Atlantis writes to the pull request in order. A reply from a parallel operation
// will be matched using a substring check.
ExpReplies [][]string
}{
{
Description: "simple",
RepoDir: "simple",
ModifiedFiles: []string{"main.tf"},
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply.txt"},
{"exp-output-merge.txt"},
},
ExpAutoplan: true,
},
{
Description: "simple with plan comment",
RepoDir: "simple",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis plan",
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-autoplan.txt"},
{"exp-output-apply.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "simple with comment -var",
RepoDir: "simple",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis plan -- -var var=overridden",
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-atlantis-plan-var-overridden.txt"},
{"exp-output-apply-var.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "simple with workspaces",
RepoDir: "simple",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis plan -- -var var=default_workspace",
"atlantis plan -w new_workspace -- -var var=new_workspace",
"atlantis apply -w default",
"atlantis apply -w new_workspace",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-atlantis-plan.txt"},
{"exp-output-atlantis-plan-new-workspace.txt"},
{"exp-output-apply-var-default-workspace.txt"},
{"exp-output-apply-var-new-workspace.txt"},
{"exp-output-merge-workspaces.txt"},
},
},
{
Description: "simple with workspaces and apply all",
RepoDir: "simple",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis plan -- -var var=default_workspace",
"atlantis plan -w new_workspace -- -var var=new_workspace",
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-atlantis-plan.txt"},
{"exp-output-atlantis-plan-new-workspace.txt"},
{"exp-output-apply-var-all.txt"},
{"exp-output-merge-workspaces.txt"},
},
},
{
Description: "simple with atlantis.yaml",
RepoDir: "simple-yaml",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply -w staging",
"atlantis apply -w default",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-apply-default.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "simple with atlantis.yaml and apply all",
RepoDir: "simple-yaml",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-all.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "modules staging only",
RepoDir: "modules",
ModifiedFiles: []string{"staging/main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply -d staging",
},
ExpReplies: [][]string{
{"exp-output-autoplan-only-staging.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-merge-only-staging.txt"},
},
},
{
Description: "modules modules only",
RepoDir: "modules",
ModifiedFiles: []string{"modules/null/main.tf"},
ExpAutoplan: false,
Comments: []string{
"atlantis plan -d staging",
"atlantis plan -d production",
"atlantis apply -d staging",
"atlantis apply -d production",
},
ExpReplies: [][]string{
{"exp-output-plan-staging.txt"},
{"exp-output-plan-production.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-apply-production.txt"},
{"exp-output-merge-all-dirs.txt"},
},
},
{
Description: "modules-yaml",
RepoDir: "modules-yaml",
ModifiedFiles: []string{"modules/null/main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply -d staging",
"atlantis apply -d production",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-apply-production.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "tfvars-yaml",
RepoDir: "tfvars-yaml",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply -p staging",
"atlantis apply -p default",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-apply-default.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "tfvars no autoplan",
RepoDir: "tfvars-yaml-no-autoplan",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: false,
Comments: []string{
"atlantis plan -p staging",
"atlantis plan -p default",
"atlantis apply -p staging",
"atlantis apply -p default",
},
ExpReplies: [][]string{
{"exp-output-plan-staging.txt"},
{"exp-output-plan-default.txt"},
{"exp-output-apply-staging.txt"},
{"exp-output-apply-default.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "automerge",
RepoDir: "automerge",
ExpAutomerge: true,
ExpAutoplan: true,
ModifiedFiles: []string{"dir1/main.tf", "dir2/main.tf"},
Comments: []string{
"atlantis apply -d dir1",
"atlantis apply -d dir2",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-dir1.txt"},
{"exp-output-apply-dir2.txt"},
{"exp-output-automerge.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "server-side cfg",
RepoDir: "server-side-cfg",
ExpAutomerge: false,
ExpAutoplan: true,
ModifiedFiles: []string{"main.tf"},
Comments: []string{
"atlantis apply -w staging",
"atlantis apply -w default",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-staging-workspace.txt"},
{"exp-output-apply-default-workspace.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "workspaces parallel with atlantis.yaml",
RepoDir: "workspace-parallel-yaml",
ModifiedFiles: []string{"production/main.tf", "staging/main.tf"},
ExpAutoplan: true,
ExpParallel: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan-staging.txt", "exp-output-autoplan-production.txt"},
{"exp-output-apply-all-staging.txt", "exp-output-apply-all-production.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "global apply lock disables apply commands",
RepoDir: "simple-yaml",
ModifiedFiles: []string{"main.tf"},
DisableApply: false,
ApplyLock: true,
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-locked.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "disable apply flag always takes presedence",
RepoDir: "simple-yaml",
ModifiedFiles: []string{"main.tf"},
DisableApply: true,
ApplyLock: false,
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-apply-locked.txt"},
{"exp-output-merge.txt"},
},
},
}
for _, c := range cases {
t.Run(c.Description, func(t *testing.T) {
RegisterMockTestingT(t)
// reset userConfig
userConfig = server.UserConfig{}
userConfig.DisableApply = c.DisableApply
ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir)
// Set the repo to be cloned through the testing backdoor.
repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir)
defer cleanup()
atlantisWorkspace.TestingOverrideHeadCloneURL = fmt.Sprintf("file://%s", repoDir)
// Setup test dependencies.
w := httptest.NewRecorder()
When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil)
When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil)
// First, send the open pull request event which triggers autoplan.
pullOpenedReq := GitHubPullRequestOpenedEvent(t, headSHA)
ctrl.Post(w, pullOpenedReq)
ResponseContains(t, w, 200, "Processing...")
// Create global apply lock if required
if c.ApplyLock {
_, _ = applyLocker.LockApply()
}
// Now send any other comments.
for _, comment := range c.Comments {
commentReq := GitHubCommentEvent(t, comment)
w = httptest.NewRecorder()
ctrl.Post(w, commentReq)
ResponseContains(t, w, 200, "Processing...")
}
// Send the "pull closed" event which would be triggered by the
// automerge or a manual merge.
pullClosedReq := GitHubPullRequestClosedEvent(t)
w = httptest.NewRecorder()
ctrl.Post(w, pullClosedReq)
ResponseContains(t, w, 200, "Pull request cleaned successfully")
// Let's verify the pre-workflow hook was called for each comment including the pull request opened event
mockPreWorkflowHookRunner.VerifyWasCalled(Times(len(c.Comments)+1)).Run(runtimematchers.AnyModelsPreWorkflowHookCommandContext(), EqString("some dummy command"), AnyString())
// Now we're ready to verify Atlantis made all the comments back (or
// replies) that we expect. We expect each plan to have 1 comment,
// and apply have 1 for each comment plus one for the locks deleted at the
// end.
expNumReplies := len(c.Comments) + 1
if c.ExpAutoplan {
expNumReplies++
}
if c.ExpAutomerge {
expNumReplies++
}
_, _, actReplies, _ := vcsClient.VerifyWasCalled(Times(expNumReplies)).CreateComment(AnyRepo(), AnyInt(), AnyString(), AnyString()).GetAllCapturedArguments()
Assert(t, len(c.ExpReplies) == len(actReplies), "missing expected replies, got %d but expected %d", len(actReplies), len(c.ExpReplies))
for i, expReply := range c.ExpReplies {
assertCommentEquals(t, expReply, actReplies[i], c.RepoDir, c.ExpParallel)
}
if c.ExpAutomerge {
// Verify that the merge API call was made.
vcsClient.VerifyWasCalledOnce().MergePull(matchers.AnyModelsPullRequest(), matchers.AnyModelsPullRequestOptions())
} else {
vcsClient.VerifyWasCalled(Never()).MergePull(matchers.AnyModelsPullRequest(), matchers.AnyModelsPullRequestOptions())
}
})
}
}
func TestSimlpleWorkflow_terraformLockFile(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
// Ensure we have >= TF 0.14 locally.
ensureRunning014(t)
cases := []struct {
Description string
// RepoDir is relative to testfixtures/test-repos.
RepoDir string
// ModifiedFiles are the list of files that have been modified in this
// pull request.
ModifiedFiles []string
// ExpAutoplan is true if we expect Atlantis to autoplan.
ExpAutoplan bool
// Comments are what our mock user writes to the pull request.
Comments []string
// ExpReplies is a list of files containing the expected replies that
// Atlantis writes to the pull request in order. A reply from a parallel operation
// will be matched using a substring check.
ExpReplies [][]string
// LockFileTracked deterims if the `.terraform.lock.hcl` file is tracked in git
// if this is true we dont expect the lockfile to be modified by terraform init
// if false we expect the lock file to be updated
LockFileTracked bool
}{
{
Description: "simple with plan comment lockfile staged",
RepoDir: "simple-with-lockfile",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis plan",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-plan.txt"},
},
LockFileTracked: true,
},
{
Description: "simple with plan comment lockfile not staged",
RepoDir: "simple-with-lockfile",
ModifiedFiles: []string{"main.tf"},
Comments: []string{
"atlantis plan",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-plan.txt"},
},
LockFileTracked: false,
},
}
for _, c := range cases {
t.Run(c.Description, func(t *testing.T) {
RegisterMockTestingT(t)
// reset userConfig
userConfig = server.UserConfig{}
userConfig.DisableApply = true
ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir)
// Set the repo to be cloned through the testing backdoor.
repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir)
defer cleanup()
oldLockFilePath, err := filepath.Abs(filepath.Join("testfixtures", "null_provider_lockfile_old_version"))
Ok(t, err)
oldLockFileContent, err := os.ReadFile(oldLockFilePath)
Ok(t, err)
if c.LockFileTracked {
runCmd(t, "", "cp", oldLockFilePath, fmt.Sprintf("%s/.terraform.lock.hcl", repoDir))
runCmd(t, repoDir, "git", "add", ".terraform.lock.hcl")
runCmd(t, repoDir, "git", "commit", "-am", "stage .terraform.lock.hcl")
}
atlantisWorkspace.TestingOverrideHeadCloneURL = fmt.Sprintf("file://%s", repoDir)
// Setup test dependencies.
w := httptest.NewRecorder()
When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil)
When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil)
// First, send the open pull request event which triggers autoplan.
pullOpenedReq := GitHubPullRequestOpenedEvent(t, headSHA)
ctrl.Post(w, pullOpenedReq)
ResponseContains(t, w, 200, "Processing...")
// check lock file content
actualLockFileContent, err := os.ReadFile(fmt.Sprintf("%s/repos/runatlantis/atlantis-tests/2/default/.terraform.lock.hcl", atlantisWorkspace.DataDir))
Ok(t, err)
if c.LockFileTracked {
if string(oldLockFileContent) != string(actualLockFileContent) {
t.Error("Expected terraform.lock.hcl file not to be different as it has been staged")
t.FailNow()
}
} else {
if string(oldLockFileContent) == string(actualLockFileContent) {
t.Error("Expected terraform.lock.hcl file to be different as it should have been updated")
t.FailNow()
}
}
if !c.LockFileTracked {
// replace the lock file generated by the previous init to simulate
// dependcies needing updating in a latter plan
runCmd(t, "", "cp", oldLockFilePath, fmt.Sprintf("%s/repos/runatlantis/atlantis-tests/2/default/.terraform.lock.hcl", atlantisWorkspace.DataDir))
}
// Now send any other comments.
for _, comment := range c.Comments {
commentReq := GitHubCommentEvent(t, comment)
w = httptest.NewRecorder()
ctrl.Post(w, commentReq)
ResponseContains(t, w, 200, "Processing...")
}
// check lock file content
actualLockFileContent, err = os.ReadFile(fmt.Sprintf("%s/repos/runatlantis/atlantis-tests/2/default/.terraform.lock.hcl", atlantisWorkspace.DataDir))
Ok(t, err)
if c.LockFileTracked {
if string(oldLockFileContent) != string(actualLockFileContent) {
t.Error("Expected terraform.lock.hcl file not to be different as it has been staged")
t.FailNow()
}
} else {
if string(oldLockFileContent) == string(actualLockFileContent) {
t.Error("Expected terraform.lock.hcl file to be different as it should have been updated")
t.FailNow()
}
}
// Let's verify the pre-workflow hook was called for each comment including the pull request opened event
mockPreWorkflowHookRunner.VerifyWasCalled(Times(2)).Run(runtimematchers.AnyModelsPreWorkflowHookCommandContext(), EqString("some dummy command"), AnyString())
// Now we're ready to verify Atlantis made all the comments back (or
// replies) that we expect. We expect each plan to have 1 comment,
// and apply have 1 for each comment plus one for the locks deleted at the
// end.
_, _, actReplies, _ := vcsClient.VerifyWasCalled(Times(2)).CreateComment(AnyRepo(), AnyInt(), AnyString(), AnyString()).GetAllCapturedArguments()
Assert(t, len(c.ExpReplies) == len(actReplies), "missing expected replies, got %d but expected %d", len(actReplies), len(c.ExpReplies))
for i, expReply := range c.ExpReplies {
assertCommentEquals(t, expReply, actReplies[i], c.RepoDir, false)
}
})
}
}
func TestGitHubWorkflowWithPolicyCheck(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
// Ensure we have >= TF 0.14 locally.
ensureRunning014(t)
// Ensure we have >= Conftest 0.21 locally.
ensureRunningConftest(t)
cases := []struct {
Description string
// RepoDir is relative to testfixtures/test-repos.
RepoDir string
// ModifiedFiles are the list of files that have been modified in this
// pull request.
ModifiedFiles []string
// Comments are what our mock user writes to the pull request.
Comments []string
// ExpAutomerge is true if we expect Atlantis to automerge.
ExpAutomerge bool
// ExpAutoplan is true if we expect Atlantis to autoplan.
ExpAutoplan bool
// ExpParallel is true if we expect Atlantis to run parallel plans or applies.
ExpParallel bool
// ExpReplies is a list of files containing the expected replies that
// Atlantis writes to the pull request in order. A reply from a parallel operation
// will be matched using a substring check.
ExpReplies [][]string
}{
{
Description: "1 failing policy and 1 passing policy ",
RepoDir: "policy-checks-multi-projects",
ModifiedFiles: []string{"dir1/main.tf,", "dir2/main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-auto-policy-check.txt"},
{"exp-output-apply.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "failing policy without policies passing using extra args",
RepoDir: "policy-checks-extra-args",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-auto-policy-check.txt"},
{"exp-output-apply-failed.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "failing policy without policies passing",
RepoDir: "policy-checks",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-auto-policy-check.txt"},
{"exp-output-apply-failed.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "failing policy additional apply requirements specified",
RepoDir: "policy-checks-apply-reqs",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-auto-policy-check.txt"},
{"exp-output-apply-failed.txt"},
{"exp-output-merge.txt"},
},
},
{
Description: "failing policy approved by non owner",
RepoDir: "policy-checks-diff-owner",
ModifiedFiles: []string{"main.tf"},
ExpAutoplan: true,
Comments: []string{
"atlantis approve_policies",
"atlantis apply",
},
ExpReplies: [][]string{
{"exp-output-autoplan.txt"},
{"exp-output-auto-policy-check.txt"},
{"exp-output-approve-policies.txt"},
{"exp-output-apply-failed.txt"},
{"exp-output-merge.txt"},
},
},
}
for _, c := range cases {
t.Run(c.Description, func(t *testing.T) {
RegisterMockTestingT(t)
// reset userConfig
userConfig = server.UserConfig{}
userConfig.EnablePolicyChecksFlag = true
ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir)
// Set the repo to be cloned through the testing backdoor.
repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir)
defer cleanup()
atlantisWorkspace.TestingOverrideHeadCloneURL = fmt.Sprintf("file://%s", repoDir)
// Setup test dependencies.
w := httptest.NewRecorder()
When(vcsClient.PullIsMergeable(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(true, nil)
When(vcsClient.PullIsApproved(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(models.ApprovalStatus{
IsApproved: true,
}, nil)
When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil)
When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil)
// First, send the open pull request event which triggers autoplan.
pullOpenedReq := GitHubPullRequestOpenedEvent(t, headSHA)
ctrl.Post(w, pullOpenedReq)
ResponseContains(t, w, 200, "Processing...")
// Now send any other comments.
for _, comment := range c.Comments {
commentReq := GitHubCommentEvent(t, comment)
w = httptest.NewRecorder()
ctrl.Post(w, commentReq)
ResponseContains(t, w, 200, "Processing...")
}
// Send the "pull closed" event which would be triggered by the
// automerge or a manual merge.
pullClosedReq := GitHubPullRequestClosedEvent(t)
w = httptest.NewRecorder()
ctrl.Post(w, pullClosedReq)
ResponseContains(t, w, 200, "Pull request cleaned successfully")
// Now we're ready to verify Atlantis made all the comments back (or
// replies) that we expect. We expect each plan to have 2 comments,
// one for plan one for policy check and apply have 1 for each
// comment plus one for the locks deleted at the end.
expNumReplies := len(c.Comments) + 1
if c.ExpAutoplan {
expNumReplies++
expNumReplies++
}
var planRegex = regexp.MustCompile("plan")
for _, comment := range c.Comments {
if planRegex.MatchString(comment) {
expNumReplies++
}
}
if c.ExpAutomerge {
expNumReplies++
}
_, _, actReplies, _ := vcsClient.VerifyWasCalled(Times(expNumReplies)).CreateComment(AnyRepo(), AnyInt(), AnyString(), AnyString()).GetAllCapturedArguments()
Assert(t, len(c.ExpReplies) == len(actReplies), "missing expected replies, got %d but expected %d", len(actReplies), len(c.ExpReplies))
for i, expReply := range c.ExpReplies {
assertCommentEquals(t, expReply, actReplies[i], c.RepoDir, c.ExpParallel)
}
if c.ExpAutomerge {
// Verify that the merge API call was made.
vcsClient.VerifyWasCalledOnce().MergePull(matchers.AnyModelsPullRequest(), matchers.AnyModelsPullRequestOptions())
} else {
vcsClient.VerifyWasCalled(Never()).MergePull(matchers.AnyModelsPullRequest(), matchers.AnyModelsPullRequestOptions())
}
})
}
}
func setupE2E(t *testing.T, repoDir string) (events_controllers.VCSEventsController, *vcsmocks.MockClient, *mocks.MockGithubPullGetter, *events.FileWorkspace) {
allowForkPRs := false
dataDir, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
//env vars
if userConfig.EnablePolicyChecksFlag {
// need this to be set or we'll fail the policy check step
os.Setenv(policy.DefaultConftestVersionEnvKey, "0.25.0")
}
// Mocks.
e2eVCSClient := vcsmocks.NewMockClient()
e2eStatusUpdater := &events.DefaultCommitStatusUpdater{Client: e2eVCSClient, TitleBuilder: vcs.StatusTitleBuilder{TitlePrefix: "atlantis"}}
e2eGithubGetter := mocks.NewMockGithubPullGetter()
e2eGitlabGetter := mocks.NewMockGitlabMergeRequestGetter()
// Real dependencies.
logger := logging.NewNoopLogger(t)
eventParser := &events.EventParser{
GithubUser: "github-user",
GithubToken: "github-token",
GitlabUser: "gitlab-user",
GitlabToken: "gitlab-token",
}
commentParser := &events.CommentParser{
GithubUser: "github-user",
GitlabUser: "gitlab-user",
}
terraformClient, err := terraform.NewClient(logger, binDir, cacheDir, "", "", "", "default-tf-version", "https://releases.hashicorp.com", &NoopTFDownloader{}, false)
Ok(t, err)
boltdb, err := db.New(dataDir)
Ok(t, err)
lockingClient := locking.NewClient(boltdb)
applyLocker = locking.NewApplyClient(boltdb, userConfig.DisableApply)
projectLocker := &events.DefaultProjectLocker{
Locker: lockingClient,
VCSClient: e2eVCSClient,
}
workingDir := &events.FileWorkspace{
DataDir: dataDir,
TestingOverrideHeadCloneURL: "override-me",
}
defaultTFVersion := terraformClient.DefaultVersion()
locker := events.NewDefaultWorkingDirLocker()
parser := &yaml.ParserValidator{}
globalCfgArgs := valid.GlobalCfgArgs{
AllowRepoCfg: true,
MergeableReq: false,
ApprovedReq: false,
PreWorkflowHooks: []*valid.PreWorkflowHook{
{
StepName: "global_hook",
RunCommand: "some dummy command",
},
},
PolicyCheckEnabled: userConfig.EnablePolicyChecksFlag,
}
globalCfg := valid.NewGlobalCfgFromArgs(globalCfgArgs)
expCfgPath := filepath.Join(absRepoPath(t, repoDir), "repos.yaml")
if _, err := os.Stat(expCfgPath); err == nil {
globalCfg, err = parser.ParseGlobalCfg(expCfgPath, globalCfg)
Ok(t, err)
}
drainer := &events.Drainer{}
parallelPoolSize := 1
silenceNoProjects := false
mockPreWorkflowHookRunner = runtimemocks.NewMockPreWorkflowHookRunner()
preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{
VCSClient: e2eVCSClient,
GlobalCfg: globalCfg,
WorkingDirLocker: locker,
WorkingDir: workingDir,
PreWorkflowHookRunner: mockPreWorkflowHookRunner,
}
projectCommandBuilder := events.NewProjectCommandBuilder(
userConfig.EnablePolicyChecksFlag,
parser,
&events.DefaultProjectFinder{},
e2eVCSClient,
workingDir,
locker,
globalCfg,
&events.DefaultPendingPlanFinder{},
commentParser,
false,
false,
"**/*.tf,**/*.tfvars,**/*.tfvars.json,**/terragrunt.hcl",
)
showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTFVersion)
Ok(t, err)
conftestVersion, _ := version.NewVersion(ConftestVersion)
conftextExec := policy.NewConfTestExecutorWorkflow(logger, binDir, &NoopTFDownloader{})
// swapping out version cache to something that always returns local contest
// binary
conftextExec.VersionCache = &LocalConftestCache{}
policyCheckRunner, err := runtime.NewPolicyCheckStepRunner(
conftestVersion,
conftextExec,
)
Ok(t, err)
projectCommandRunner := &events.DefaultProjectCommandRunner{
Locker: projectLocker,
LockURLGenerator: &mockLockURLGenerator{},
InitStepRunner: &runtime.InitStepRunner{
TerraformExecutor: terraformClient,
DefaultTFVersion: defaultTFVersion,
},
PlanStepRunner: &runtime.PlanStepRunner{
TerraformExecutor: terraformClient,
DefaultTFVersion: defaultTFVersion,
},
ShowStepRunner: showStepRunner,
PolicyCheckStepRunner: policyCheckRunner,
ApplyStepRunner: &runtime.ApplyStepRunner{
TerraformExecutor: terraformClient,
},
RunStepRunner: &runtime.RunStepRunner{
TerraformExecutor: terraformClient,
DefaultTFVersion: defaultTFVersion,
},
WorkingDir: workingDir,
Webhooks: &mockWebhookSender{},
WorkingDirLocker: locker,
AggregateApplyRequirements: &events.AggregateApplyRequirements{
PullApprovedChecker: e2eVCSClient,
WorkingDir: workingDir,
},
}
dbUpdater := &events.DBUpdater{
DB: boltdb,
}
pullUpdater := &events.PullUpdater{
HidePrevPlanComments: false,
VCSClient: e2eVCSClient,
MarkdownRenderer: &events.MarkdownRenderer{},
}
autoMerger := &events.AutoMerger{
VCSClient: e2eVCSClient,
GlobalAutomerge: false,
}
policyCheckCommandRunner := events.NewPolicyCheckCommandRunner(
dbUpdater,
pullUpdater,
e2eStatusUpdater,
projectCommandRunner,
parallelPoolSize,
false,
)
planCommandRunner := events.NewPlanCommandRunner(
false,
false,
e2eVCSClient,
&events.DefaultPendingPlanFinder{},
workingDir,
e2eStatusUpdater,
projectCommandBuilder,
projectCommandRunner,
dbUpdater,
pullUpdater,
policyCheckCommandRunner,
autoMerger,
parallelPoolSize,
silenceNoProjects,
boltdb,
)
applyCommandRunner := events.NewApplyCommandRunner(
e2eVCSClient,
false,
applyLocker,
e2eStatusUpdater,
projectCommandBuilder,
projectCommandRunner,
autoMerger,
pullUpdater,
dbUpdater,
boltdb,
parallelPoolSize,
silenceNoProjects,
false,
)
approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner(
e2eStatusUpdater,
projectCommandBuilder,
projectCommandRunner,
pullUpdater,
dbUpdater,
silenceNoProjects,
false,
)
unlockCommandRunner := events.NewUnlockCommandRunner(
mocks.NewMockDeleteLockCommand(),
e2eVCSClient,
silenceNoProjects,
)
versionCommandRunner := events.NewVersionCommandRunner(
pullUpdater,
projectCommandBuilder,
projectCommandRunner,
parallelPoolSize,
silenceNoProjects,
)
commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{
models.PlanCommand: planCommandRunner,
models.ApplyCommand: applyCommandRunner,
models.ApprovePoliciesCommand: approvePoliciesCommandRunner,
models.UnlockCommand: unlockCommandRunner,
models.VersionCommand: versionCommandRunner,
}
commandRunner := &events.DefaultCommandRunner{
EventParser: eventParser,
VCSClient: e2eVCSClient,
GithubPullGetter: e2eGithubGetter,
GitlabMergeRequestGetter: e2eGitlabGetter,
Logger: logger,
GlobalCfg: globalCfg,
AllowForkPRs: allowForkPRs,
AllowForkPRsFlag: "allow-fork-prs",
CommentCommandRunnerByCmd: commentCommandRunnerByCmd,
Drainer: drainer,
PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner,
PullStatusFetcher: boltdb,
}
repoAllowlistChecker, err := events.NewRepoAllowlistChecker("*")
Ok(t, err)
ctrl := events_controllers.VCSEventsController{
TestingMode: true,
CommandRunner: commandRunner,
PullCleaner: &events.PullClosedExecutor{
Locker: lockingClient,
VCSClient: e2eVCSClient,
WorkingDir: workingDir,
DB: boltdb,
},
Logger: logger,
Parser: eventParser,
CommentParser: commentParser,
GithubWebhookSecret: nil,
GithubRequestValidator: &events_controllers.DefaultGithubRequestValidator{},
GitlabRequestParserValidator: &events_controllers.DefaultGitlabRequestParserValidator{},
GitlabWebhookSecret: nil,
RepoAllowlistChecker: repoAllowlistChecker,
SupportedVCSHosts: []models.VCSHostType{models.Gitlab, models.Github, models.BitbucketCloud},
VCSClient: e2eVCSClient,
}
return ctrl, e2eVCSClient, e2eGithubGetter, workingDir
}
type mockLockURLGenerator struct{}
func (m *mockLockURLGenerator) GenerateLockURL(lockID string) string {
return "lock-url"
}
type mockWebhookSender struct{}
func (w *mockWebhookSender) Send(log logging.SimpleLogging, result webhooks.ApplyResult) error {
return nil
}
func GitHubCommentEvent(t *testing.T, comment string) *http.Request {
requestJSON, err := os.ReadFile(filepath.Join("testfixtures", "githubIssueCommentEvent.json"))
Ok(t, err)
requestJSON = []byte(strings.Replace(string(requestJSON), "###comment body###", comment, 1))
req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON))
Ok(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set(githubHeader, "issue_comment")
return req
}
func GitHubPullRequestOpenedEvent(t *testing.T, headSHA string) *http.Request {
requestJSON, err := os.ReadFile(filepath.Join("testfixtures", "githubPullRequestOpenedEvent.json"))
Ok(t, err)
// Replace sha with expected sha.
requestJSONStr := strings.Replace(string(requestJSON), "c31fd9ea6f557ad2ea659944c3844a059b83bc5d", headSHA, -1)
req, err := http.NewRequest("POST", "/events", bytes.NewBuffer([]byte(requestJSONStr)))
Ok(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set(githubHeader, "pull_request")
return req
}
func GitHubPullRequestClosedEvent(t *testing.T) *http.Request {
requestJSON, err := os.ReadFile(filepath.Join("testfixtures", "githubPullRequestClosedEvent.json"))
Ok(t, err)
req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON))
Ok(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set(githubHeader, "pull_request")
return req
}
func GitHubPullRequestParsed(headSHA string) *github.PullRequest {
// headSHA can't be empty so default if not set.
if headSHA == "" {
headSHA = "13940d121be73f656e2132c6d7b4c8e87878ac8d"
}
return &github.PullRequest{
Number: github.Int(2),
State: github.String("open"),
HTMLURL: github.String("htmlurl"),
Head: &github.PullRequestBranch{
Repo: &github.Repository{
FullName: github.String("runatlantis/atlantis-tests"),
CloneURL: github.String("https://github.com/runatlantis/atlantis-tests.git"),
},
SHA: github.String(headSHA),
Ref: github.String("branch"),
},
Base: &github.PullRequestBranch{
Repo: &github.Repository{
FullName: github.String("runatlantis/atlantis-tests"),
CloneURL: github.String("https://github.com/runatlantis/atlantis-tests.git"),
},
Ref: github.String("master"),
},
User: &github.User{
Login: github.String("atlantisbot"),
},
}
}
// absRepoPath returns the absolute path to the test repo under dir repoDir.
func absRepoPath(t *testing.T, repoDir string) string {
path, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", repoDir))
Ok(t, err)
return path
}
// initializeRepo copies the repo data from testfixtures and initializes a new
// git repo in a temp directory. It returns that directory and a function
// to run in a defer that will delete the dir.
// The purpose of this function is to create a real git repository with a branch
// called 'branch' from the files under repoDir. This is so we can check in
// those files normally to this repo without needing a .git directory.
func initializeRepo(t *testing.T, repoDir string) (string, string, func()) {
originRepo := absRepoPath(t, repoDir)
// Copy the files to the temp dir.
destDir, cleanup := TempDir(t)
runCmd(t, "", "cp", "-r", fmt.Sprintf("%s/.", originRepo), destDir)
// Initialize the git repo.
runCmd(t, destDir, "git", "init")
runCmd(t, destDir, "touch", ".gitkeep")
runCmd(t, destDir, "git", "add", ".gitkeep")
runCmd(t, destDir, "git", "config", "--local", "user.email", "[email protected]")
runCmd(t, destDir, "git", "config", "--local", "user.name", "atlantisbot")
runCmd(t, destDir, "git", "commit", "-m", "initial commit")
runCmd(t, destDir, "git", "checkout", "-b", "branch")
runCmd(t, destDir, "git", "add", ".")
runCmd(t, destDir, "git", "commit", "-am", "branch commit")
headSHA := runCmd(t, destDir, "git", "rev-parse", "HEAD")
headSHA = strings.Trim(headSHA, "\n")
return destDir, headSHA, cleanup
}
func runCmd(t *testing.T, dir string, name string, args ...string) string {
cpCmd := exec.Command(name, args...)
cpCmd.Dir = dir
cpOut, err := cpCmd.CombinedOutput()
Assert(t, err == nil, "err running %q: %s", strings.Join(append([]string{name}, args...), " "), cpOut)
return string(cpOut)
}
func assertCommentEquals(t *testing.T, expReplies []string, act string, repoDir string, parallel bool) {
t.Helper()
// Replace all 'Creation complete after 0s [id=2135833172528078362]' strings with
// 'Creation complete after *s [id=*******************]' so we can do a comparison.
idRegex := regexp.MustCompile(`Creation complete after [0-9]+s \[id=[0-9]+]`)
act = idRegex.ReplaceAllString(act, "Creation complete after *s [id=*******************]")
// Replace all null_resource.simple{n}: .* with null_resource.simple: because
// with multiple resources being created the logs are all out of order which
// makes comparison impossible.
resourceRegex := regexp.MustCompile(`null_resource\.simple(\[\d])?\d?:.*`)
act = resourceRegex.ReplaceAllString(act, "null_resource.simple:")
// For parallel plans and applies, do a substring match since output may be out of order
var replyMatchesExpected func(string, string) bool
if parallel {
replyMatchesExpected = func(act string, expStr string) bool {
return strings.Contains(act, expStr)
}
} else {
replyMatchesExpected = func(act string, expStr string) bool {
return expStr == act
}
}
for _, expFile := range expReplies {
exp, err := os.ReadFile(filepath.Join(absRepoPath(t, repoDir), expFile))
Ok(t, err)
expStr := string(exp)
// My editor adds a newline to all the files, so if the actual comment
// doesn't end with a newline then strip the last newline from the file's
// contents.
if !strings.HasSuffix(act, "\n") {
expStr = strings.TrimSuffix(expStr, "\n")
}
if !replyMatchesExpected(act, expStr) {
// If in CI, we write the diff to the console. Otherwise we write the diff
// to file so we can use our local diff viewer.
if os.Getenv("CI") == "true" {
t.Logf("exp: %s, got: %s", expStr, act)
t.FailNow()
} else {
actFile := filepath.Join(absRepoPath(t, repoDir), expFile+".act")
err := os.WriteFile(actFile, []byte(act), 0600)
Ok(t, err)
cwd, err := os.Getwd()
Ok(t, err)
rel, err := filepath.Rel(cwd, actFile)
Ok(t, err)
t.Errorf("%q was different, wrote actual comment to %q", expFile, rel)
}
}
}
}
// returns parent, bindir, cachedir, cleanup func
func mkSubDirs(t *testing.T) (string, string, string, func()) {
tmp, cleanup := TempDir(t)
binDir := filepath.Join(tmp, "bin")
err := os.MkdirAll(binDir, 0700)
Ok(t, err)
cachedir := filepath.Join(tmp, "plugin-cache")
err = os.MkdirAll(cachedir, 0700)
Ok(t, err)
return tmp, binDir, cachedir, cleanup
}
// Will fail test if conftest isn't in path and isn't version >= 0.25.0
func ensureRunningConftest(t *testing.T) {
localPath, err := exec.LookPath(fmt.Sprintf("conftest%s", ConftestVersion))
if err != nil {
t.Logf("conftest >= %s must be installed to run this test", ConftestVersion)
t.FailNow()
}
versionOutBytes, err := exec.Command(localPath, "--version").Output() // #nosec
if err != nil {
t.Logf("error running conftest version: %s", err)
t.FailNow()
}
versionOutput := string(versionOutBytes)
match := versionConftestRegex.FindStringSubmatch(versionOutput)
if len(match) <= 1 {
t.Logf("could not parse contest version from %s", versionOutput)
t.FailNow()
}
localVersion, err := version.NewVersion(match[1])
Ok(t, err)
minVersion, err := version.NewVersion(ConftestVersion)
Ok(t, err)
if localVersion.LessThan(minVersion) {
t.Logf("must have contest version >= %s, you have %s", minVersion, localVersion)
t.FailNow()
}
}
// Will fail test if terraform isn't in path and isn't version >= 0.14
func ensureRunning014(t *testing.T) {
localPath, err := exec.LookPath("terraform")
if err != nil {
t.Log("terraform >= 0.14 must be installed to run this test")
t.FailNow()
}
versionOutBytes, err := exec.Command(localPath, "version").Output() // #nosec
if err != nil {
t.Logf("error running terraform version: %s", err)
t.FailNow()
}
versionOutput := string(versionOutBytes)
match := versionRegex.FindStringSubmatch(versionOutput)
if len(match) <= 1 {
t.Logf("could not parse terraform version from %s", versionOutput)
t.FailNow()
}
localVersion, err := version.NewVersion(match[1])
Ok(t, err)
minVersion, err := version.NewVersion("0.14.0")
Ok(t, err)
if localVersion.LessThan(minVersion) {
t.Logf("must have terraform version >= %s, you have %s", minVersion, localVersion)
t.FailNow()
}
}
// versionRegex extracts the version from `terraform version` output.
// Terraform v0.12.0-alpha4 (2c36829d3265661d8edbd5014de8090ea7e2a076)
// => 0.12.0-alpha4
//
// Terraform v0.11.10
// => 0.11.10
var versionRegex = regexp.MustCompile("Terraform v(.*?)(\\s.*)?\n")
var versionConftestRegex = regexp.MustCompile("Version: (.*?)(\\s.*)?\n")
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
costesting/ci_test.go | package cos
// Basic imports
import (
"context"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/tencentyun/cos-go-sdk-v5"
)
// Define the suite, and absorb the built-in basic suite
// functionality from testify - including a T() method which
// returns the current testing context
type CosTestSuite struct {
suite.Suite
VariableThatShouldStartAtFive int
// CI client
Client *cos.Client
// Copy source client
CClient *cos.Client
Region string
Bucket string
Appid string
// test_object
TestObject string
// special_file_name
SepFileName string
}
// 请替换成您的账号及存储桶信息
const (
//uin
kUin = "100010805041"
kAppid = 1259654469
// 常规测试需要的存储桶
kBucket = "cosgosdktest-1259654469"
kRegion = "ap-guangzhou"
// 跨区域复制需要的目标存储桶,地域不能与kBucket存储桶相同。
kRepBucket = "cosgosdkreptest"
kRepRegion = "ap-chengdu"
// Batch测试需要的源存储桶和目标存储桶,目前只在成都、重庆地域公测
kBatchBucket = "testcd-1259654469"
kTargetBatchBucket = "cosgosdkreptest-1259654469" //复用了存储桶
kBatchRegion = "ap-chengdu"
)
func (s *CosTestSuite) SetupSuite() {
fmt.Println("Set up test")
// init
s.TestObject = "test.txt"
s.SepFileName = "中文" + "→↓←→↖↗↙↘! \"#$%&'()*+,-./0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
// CI client for test interface
// URL like this http://test-1253846586.cos.ap-guangzhou.myqcloud.com
u := "https://" + kBucket + ".cos." + kRegion + ".myqcloud.com"
u2 := "https://" + kUin + ".cos-control." + kBatchRegion + ".myqcloud.com"
// Get the region
bucketurl, _ := url.Parse(u)
batchurl, _ := url.Parse(u2)
p := strings.Split(bucketurl.Host, ".")
assert.Equal(s.T(), 5, len(p), "Bucket host is not right")
s.Region = p[2]
// Bucket name
pp := strings.Split(p[0], "-")
s.Bucket = pp[0]
s.Appid = pp[1]
ib := &cos.BaseURL{BucketURL: bucketurl, BatchURL: batchurl}
s.Client = cos.NewClient(ib, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
},
})
opt := &cos.BucketPutOptions{
XCosACL: "public-read",
}
r, err := s.Client.Bucket.Put(context.Background(), opt)
if err != nil && r.StatusCode == 409 {
fmt.Println("BucketAlreadyOwnedByYou")
} else if err != nil {
assert.Nil(s.T(), err, "PutBucket Failed")
}
}
// Begin of api test
// Service API
func (s *CosTestSuite) TestGetService() {
_, _, err := s.Client.Service.Get(context.Background())
assert.Nil(s.T(), err, "GetService Failed")
}
// Bucket API
func (s *CosTestSuite) TestPutHeadDeleteBucket() {
// Notic sometimes the bucket host can not analyis, may has i/o timeout problem
u := "http://" + "testgosdkbucket-create-head-del-" + s.Appid + ".cos." + kRegion + ".myqcloud.com"
iu, _ := url.Parse(u)
ib := &cos.BaseURL{BucketURL: iu}
client := cos.NewClient(ib, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
},
})
r, err := client.Bucket.Put(context.Background(), nil)
if err != nil && r.StatusCode == 409 {
fmt.Println("BucketAlreadyOwnedByYou")
} else if err != nil {
assert.Nil(s.T(), err, "PutBucket Failed")
}
if err != nil {
panic(err)
}
time.Sleep(3 * time.Second)
_, err = client.Bucket.Head(context.Background())
assert.Nil(s.T(), err, "HeadBucket Failed")
if err == nil {
_, err = client.Bucket.Delete(context.Background())
assert.Nil(s.T(), err, "DeleteBucket Failed")
}
}
func (s *CosTestSuite) TestPutBucketACLIllegal() {
opt := &cos.BucketPutACLOptions{
Header: &cos.ACLHeaderOptions{
XCosACL: "public-read-writ",
},
}
_, err := s.Client.Bucket.PutACL(context.Background(), opt)
assert.NotNil(s.T(), err, "PutBucketACL illegal Failed")
}
func (s *CosTestSuite) TestPutGetBucketACLNormal() {
// with header
opt := &cos.BucketPutACLOptions{
Header: &cos.ACLHeaderOptions{
XCosACL: "private",
},
}
_, err := s.Client.Bucket.PutACL(context.Background(), opt)
assert.Nil(s.T(), err, "PutBucketACL normal Failed")
v, _, err := s.Client.Bucket.GetACL(context.Background())
assert.Nil(s.T(), err, "GetBucketACL normal Failed")
assert.Equal(s.T(), 1, len(v.AccessControlList), "GetBucketACL normal Failed, must be private")
}
func (s *CosTestSuite) TestGetBucket() {
opt := &cos.BucketGetOptions{
Prefix: "中文",
MaxKeys: 3,
}
_, _, err := s.Client.Bucket.Get(context.Background(), opt)
assert.Nil(s.T(), err, "GetBucket Failed")
}
func (s *CosTestSuite) TestGetBucketLocation() {
v, _, err := s.Client.Bucket.GetLocation(context.Background())
assert.Nil(s.T(), err, "GetLocation Failed")
assert.Equal(s.T(), s.Region, v.Location, "GetLocation wrong region")
}
func (s *CosTestSuite) TestPutGetDeleteCORS() {
opt := &cos.BucketPutCORSOptions{
Rules: []cos.BucketCORSRule{
{
AllowedOrigins: []string{"http://www.qq.com"},
AllowedMethods: []string{"PUT", "GET"},
AllowedHeaders: []string{"x-cos-meta-test", "x-cos-xx"},
MaxAgeSeconds: 500,
ExposeHeaders: []string{"x-cos-meta-test1"},
},
},
}
_, err := s.Client.Bucket.PutCORS(context.Background(), opt)
assert.Nil(s.T(), err, "PutBucketCORS Failed")
v, _, err := s.Client.Bucket.GetCORS(context.Background())
assert.Nil(s.T(), err, "GetBucketCORS Failed")
assert.Equal(s.T(), 1, len(v.Rules), "GetBucketCORS wrong number rules")
}
func (s *CosTestSuite) TestVersionAndReplication() {
opt := &cos.BucketPutVersionOptions{
// Enabled or Suspended, the versioning once opened can not close.
Status: "Enabled",
}
_, err := s.Client.Bucket.PutVersioning(context.Background(), opt)
assert.Nil(s.T(), err, "PutVersioning Failed")
v, _, err := s.Client.Bucket.GetVersioning(context.Background())
assert.Nil(s.T(), err, "GetVersioning Failed")
assert.Equal(s.T(), "Enabled", v.Status, "Get Wrong Version status")
repOpt := &cos.PutBucketReplicationOptions{
// qcs::cam::uin/[UIN]:uin/[Subaccount]
Role: "qcs::cam::uin/" + kUin + ":uin/" + kUin,
Rule: []cos.BucketReplicationRule{
{
ID: "1",
// Enabled or Disabled
Status: "Enabled",
Destination: &cos.ReplicationDestination{
// qcs::cos:[Region]::[Bucketname-Appid]
Bucket: "qcs::cos:" + kRepRegion + "::" + kRepBucket + "-" + s.Appid,
},
},
},
}
_, err = s.Client.Bucket.PutBucketReplication(context.Background(), repOpt)
assert.Nil(s.T(), err, "PutBucketReplication Failed")
vr, _, err := s.Client.Bucket.GetBucketReplication(context.Background())
assert.Nil(s.T(), err, "GetBucketReplication Failed")
for _, r := range vr.Rule {
assert.Equal(s.T(), "Enabled", r.Status, "Get Wrong Version status")
assert.Equal(s.T(), "qcs::cos:"+kRepRegion+"::"+kRepBucket+"-"+s.Appid, r.Destination.Bucket, "Get Wrong Version status")
}
_, err = s.Client.Bucket.DeleteBucketReplication(context.Background())
assert.Nil(s.T(), err, "DeleteBucketReplication Failed")
}
func (s *CosTestSuite) TestBucketInventory() {
id := "test1"
dBucket := "qcs::cos:" + s.Region + "::" + s.Bucket + "-" + s.Appid
opt := &cos.BucketPutInventoryOptions{
ID: id,
// True or False
IsEnabled: "True",
IncludedObjectVersions: "All",
Filter: &cos.BucketInventoryFilter{
Prefix: "test",
},
OptionalFields: &cos.BucketInventoryOptionalFields{
BucketInventoryFields: []string{
"Size", "LastModifiedDate",
},
},
Schedule: &cos.BucketInventorySchedule{
// Weekly or Daily
Frequency: "Daily",
},
Destination: &cos.BucketInventoryDestination{
Bucket: dBucket,
Format: "CSV",
},
}
_, err := s.Client.Bucket.PutInventory(context.Background(), id, opt)
assert.Nil(s.T(), err, "PutBucketInventory Failed")
v, _, err := s.Client.Bucket.GetInventory(context.Background(), id)
assert.Nil(s.T(), err, "GetBucketInventory Failed")
assert.Equal(s.T(), "test1", v.ID, "Get Wrong inventory id")
assert.Equal(s.T(), "true", v.IsEnabled, "Get Wrong inventory isenabled")
assert.Equal(s.T(), dBucket, v.Destination.Bucket, "Get Wrong inventory isenabled")
_, err = s.Client.Bucket.DeleteInventory(context.Background(), id)
assert.Nil(s.T(), err, "DeleteBucketInventory Failed")
}
func (s *CosTestSuite) TestBucketLogging() {
tBucket := s.Bucket + "-" + s.Appid
opt := &cos.BucketPutLoggingOptions{
LoggingEnabled: &cos.BucketLoggingEnabled{
TargetBucket: tBucket,
},
}
_, err := s.Client.Bucket.PutLogging(context.Background(), opt)
assert.Nil(s.T(), err, "PutLogging Failed")
v, _, err := s.Client.Bucket.GetLogging(context.Background())
assert.Nil(s.T(), err, "GetLogging Failed")
assert.Equal(s.T(), tBucket, v.LoggingEnabled.TargetBucket, "Get Wrong Version status")
}
func (s *CosTestSuite) TestBucketTagging() {
opt := &cos.BucketPutTaggingOptions{
TagSet: []cos.BucketTaggingTag{
{
Key: "testk1",
Value: "testv1",
},
{
Key: "testk2",
Value: "testv2",
},
},
}
_, err := s.Client.Bucket.PutTagging(context.Background(), opt)
assert.Nil(s.T(), err, "Put Tagging Failed")
v, _, err := s.Client.Bucket.GetTagging(context.Background())
assert.Nil(s.T(), err, "Get Tagging Failed")
assert.Equal(s.T(), v.TagSet[0].Key, opt.TagSet[0].Key, "Get Wrong Tag key")
assert.Equal(s.T(), v.TagSet[0].Value, opt.TagSet[0].Value, "Get Wrong Tag value")
assert.Equal(s.T(), v.TagSet[1].Key, opt.TagSet[1].Key, "Get Wrong Tag key")
assert.Equal(s.T(), v.TagSet[1].Value, opt.TagSet[1].Value, "Get Wrong Tag value")
}
func (s *CosTestSuite) TestPutGetDeleteLifeCycle() {
lc := &cos.BucketPutLifecycleOptions{
Rules: []cos.BucketLifecycleRule{
{
ID: "1234",
Filter: &cos.BucketLifecycleFilter{Prefix: "test"},
Status: "Enabled",
Transition: &cos.BucketLifecycleTransition{
Days: 10,
StorageClass: "Standard",
},
},
},
}
_, err := s.Client.Bucket.PutLifecycle(context.Background(), lc)
assert.Nil(s.T(), err, "PutBucketLifecycle Failed")
_, r, err := s.Client.Bucket.GetLifecycle(context.Background())
// Might cleaned by other case concrrent
if err != nil && 404 != r.StatusCode {
assert.Nil(s.T(), err, "GetBucketLifecycle Failed")
}
_, err = s.Client.Bucket.DeleteLifecycle(context.Background())
assert.Nil(s.T(), err, "DeleteBucketLifecycle Failed")
}
func (s *CosTestSuite) TestPutGetDeleteWebsite() {
opt := &cos.BucketPutWebsiteOptions{
Index: "index.html",
Error: &cos.ErrorDocument{"index_backup.html"},
RoutingRules: &cos.WebsiteRoutingRules{
[]cos.WebsiteRoutingRule{
{
ConditionErrorCode: "404",
RedirectProtocol: "https",
RedirectReplaceKey: "404.html",
},
{
ConditionPrefix: "docs/",
RedirectProtocol: "https",
RedirectReplaceKeyPrefix: "documents/",
},
},
},
}
_, err := s.Client.Bucket.PutWebsite(context.Background(), opt)
assert.Nil(s.T(), err, "PutBucketWebsite Failed")
res, rsp, err := s.Client.Bucket.GetWebsite(context.Background())
if err != nil && 404 != rsp.StatusCode {
assert.Nil(s.T(), err, "GetBucketWebsite Failed")
}
assert.Equal(s.T(), opt.Index, res.Index, "GetBucketWebsite Failed")
assert.Equal(s.T(), opt.Error, res.Error, "GetBucketWebsite Failed")
assert.Equal(s.T(), opt.RedirectProtocol, res.RedirectProtocol, "GetBucketWebsite Failed")
_, err = s.Client.Bucket.DeleteWebsite(context.Background())
assert.Nil(s.T(), err, "DeleteBucketWebsite Failed")
}
func (s *CosTestSuite) TestListMultipartUploads() {
// Create new upload
name := "test_multipart" + time.Now().Format(time.RFC3339)
flag := false
v, _, err := s.Client.Object.InitiateMultipartUpload(context.Background(), name, nil)
assert.Nil(s.T(), err, "InitiateMultipartUpload Failed")
id := v.UploadID
// List
r, _, err := s.Client.Bucket.ListMultipartUploads(context.Background(), nil)
assert.Nil(s.T(), err, "ListMultipartUploads Failed")
for _, p := range r.Uploads {
if p.Key == name {
assert.Equal(s.T(), id, p.UploadID, "ListMultipartUploads wrong uploadid")
flag = true
}
}
assert.Equal(s.T(), true, flag, "ListMultipartUploads wrong key")
// Abort
_, err = s.Client.Object.AbortMultipartUpload(context.Background(), name, id)
assert.Nil(s.T(), err, "AbortMultipartUpload Failed")
}
// Object API
func (s *CosTestSuite) TestPutHeadGetDeleteObject_10MB() {
name := "test/objectPut" + time.Now().Format(time.RFC3339)
b := make([]byte, 1024*1024*10)
_, err := rand.Read(b)
content := fmt.Sprintf("%X", b)
f := strings.NewReader(content)
_, err = s.Client.Object.Put(context.Background(), name, f, nil)
assert.Nil(s.T(), err, "PutObject Failed")
_, err = s.Client.Object.Head(context.Background(), name, nil)
assert.Nil(s.T(), err, "HeadObject Failed")
_, err = s.Client.Object.Delete(context.Background(), name)
assert.Nil(s.T(), err, "DeleteObject Failed")
}
func (s *CosTestSuite) TestPutGetDeleteObjectByFile_10MB() {
// Create tmp file
filePath := "tmpfile" + time.Now().Format(time.RFC3339)
newfile, err := os.Create(filePath)
assert.Nil(s.T(), err, "create tmp file Failed")
defer newfile.Close()
name := "test/objectPutByFile" + time.Now().Format(time.RFC3339)
b := make([]byte, 1024*1024*10)
_, err = rand.Read(b)
newfile.Write(b)
_, err = s.Client.Object.PutFromFile(context.Background(), name, filePath, nil)
assert.Nil(s.T(), err, "PutObject Failed")
// Over write tmp file
_, err = s.Client.Object.GetToFile(context.Background(), name, filePath, nil)
assert.Nil(s.T(), err, "HeadObject Failed")
_, err = s.Client.Object.Delete(context.Background(), name)
assert.Nil(s.T(), err, "DeleteObject Failed")
// remove the local tmp file
err = os.Remove(filePath)
assert.Nil(s.T(), err, "remove local file Failed")
}
func (s *CosTestSuite) TestPutGetDeleteObjectSpecialName() {
f := strings.NewReader("test")
name := s.SepFileName + time.Now().Format(time.RFC3339)
_, err := s.Client.Object.Put(context.Background(), name, f, nil)
assert.Nil(s.T(), err, "PutObject Failed")
resp, err := s.Client.Object.Get(context.Background(), name, nil)
assert.Nil(s.T(), err, "GetObject Failed")
defer resp.Body.Close()
bs, _ := ioutil.ReadAll(resp.Body)
assert.Equal(s.T(), "test", string(bs), "GetObject failed content wrong")
_, err = s.Client.Object.Delete(context.Background(), name)
assert.Nil(s.T(), err, "DeleteObject Failed")
}
func (s *CosTestSuite) TestPutObjectToNonExistBucket() {
u := "http://gosdknonexistbucket-" + s.Appid + ".cos." + s.Region + ".myqcloud.com"
iu, _ := url.Parse(u)
ib := &cos.BaseURL{BucketURL: iu}
client := cos.NewClient(ib, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
},
})
name := "test/objectPut.go"
f := strings.NewReader("test")
r, err := client.Object.Put(context.Background(), name, f, nil)
assert.NotNil(s.T(), err, "PutObject ToNonExistBucket Failed")
assert.Equal(s.T(), 404, r.StatusCode, "PutObject ToNonExistBucket, not 404")
}
func (s *CosTestSuite) TestPutGetObjectACL() {
name := "test/objectACL.go" + time.Now().Format(time.RFC3339)
f := strings.NewReader("test")
_, err := s.Client.Object.Put(context.Background(), name, f, nil)
assert.Nil(s.T(), err, "PutObject Failed")
// Put acl
opt := &cos.ObjectPutACLOptions{
Header: &cos.ACLHeaderOptions{
XCosACL: "public-read",
},
}
_, err = s.Client.Object.PutACL(context.Background(), name, opt)
assert.Nil(s.T(), err, "PutObjectACL Failed")
v, _, err := s.Client.Object.GetACL(context.Background(), name)
assert.Nil(s.T(), err, "GetObjectACL Failed")
assert.Equal(s.T(), 2, len(v.AccessControlList), "GetLifecycle wrong number rules")
_, err = s.Client.Object.Delete(context.Background(), name)
assert.Nil(s.T(), err, "DeleteObject Failed")
}
func (s *CosTestSuite) TestPutObjectRestore() {
name := "archivetest"
putOpt := &cos.ObjectPutOptions{
ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{
XCosStorageClass: "ARCHIVE",
},
}
f := strings.NewReader("test")
_, err := s.Client.Object.Put(context.Background(), name, f, putOpt)
assert.Nil(s.T(), err, "PutObject Archive faild")
opt := &cos.ObjectRestoreOptions{
Days: 2,
Tier: &cos.CASJobParameters{
// Standard, Exepdited and Bulk
Tier: "Expedited",
},
}
resp, _ := s.Client.Object.PostRestore(context.Background(), name, opt)
retCode := resp.StatusCode
if retCode != 200 && retCode != 202 && retCode != 409 {
right := false
fmt.Println("PutObjectRestore get code is:", retCode)
assert.Equal(s.T(), true, right, "PutObjectRestore Failed")
}
}
func (s *CosTestSuite) TestCopyObject() {
u := "http://" + kRepBucket + "-" + s.Appid + ".cos." + kRepRegion + ".myqcloud.com"
iu, _ := url.Parse(u)
ib := &cos.BaseURL{BucketURL: iu}
c := cos.NewClient(ib, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
},
})
opt := &cos.BucketPutOptions{
XCosACL: "public-read",
}
// Notice in intranet the bucket host sometimes has i/o timeout problem
r, err := c.Bucket.Put(context.Background(), opt)
if err != nil && r.StatusCode == 409 {
fmt.Println("BucketAlreadyOwnedByYou")
} else if err != nil {
assert.Nil(s.T(), err, "PutBucket Failed")
}
source := "test/objectMove1" + time.Now().Format(time.RFC3339)
expected := "test"
f := strings.NewReader(expected)
r, err = c.Object.Put(context.Background(), source, f, nil)
assert.Nil(s.T(), err, "PutObject Failed")
var version_id string
if r.Header["X-Cos-Version-Id"] != nil {
version_id = r.Header.Get("X-Cos-Version-Id")
}
time.Sleep(3 * time.Second)
// Copy file
soruceURL := fmt.Sprintf("%s/%s", iu.Host, source)
dest := "test/objectMove1" + time.Now().Format(time.RFC3339)
//opt := &cos.ObjectCopyOptions{}
if version_id == "" {
_, _, err = s.Client.Object.Copy(context.Background(), dest, soruceURL, nil)
} else {
_, _, err = s.Client.Object.Copy(context.Background(), dest, soruceURL, nil, version_id)
}
assert.Nil(s.T(), err, "PutObjectCopy Failed")
// Check content
resp, err := s.Client.Object.Get(context.Background(), dest, nil)
assert.Nil(s.T(), err, "GetObject Failed")
bs, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
result := string(bs)
assert.Equal(s.T(), expected, result, "PutObjectCopy Failed, wrong content")
}
func (s *CosTestSuite) TestCreateAbortMultipartUpload() {
name := "test_multipart" + time.Now().Format(time.RFC3339)
v, _, err := s.Client.Object.InitiateMultipartUpload(context.Background(), name, nil)
assert.Nil(s.T(), err, "InitiateMultipartUpload Failed")
_, err = s.Client.Object.AbortMultipartUpload(context.Background(), name, v.UploadID)
assert.Nil(s.T(), err, "AbortMultipartUpload Failed")
}
func (s *CosTestSuite) TestCreateCompleteMultipartUpload() {
name := "test/test_complete_upload" + time.Now().Format(time.RFC3339)
v, _, err := s.Client.Object.InitiateMultipartUpload(context.Background(), name, nil)
uploadID := v.UploadID
blockSize := 1024 * 1024 * 3
opt := &cos.CompleteMultipartUploadOptions{}
for i := 1; i < 3; i++ {
b := make([]byte, blockSize)
_, err := rand.Read(b)
content := fmt.Sprintf("%X", b)
f := strings.NewReader(content)
resp, err := s.Client.Object.UploadPart(
context.Background(), name, uploadID, i, f, nil,
)
assert.Nil(s.T(), err, "UploadPart Failed")
etag := resp.Header.Get("Etag")
opt.Parts = append(opt.Parts, cos.Object{
PartNumber: i, ETag: etag},
)
}
_, _, err = s.Client.Object.CompleteMultipartUpload(
context.Background(), name, uploadID, opt,
)
assert.Nil(s.T(), err, "CompleteMultipartUpload Failed")
}
func (s *CosTestSuite) TestSSE_C() {
name := "test/TestSSE_C"
content := "test sse-c " + time.Now().Format(time.RFC3339)
f := strings.NewReader(content)
putOpt := &cos.ObjectPutOptions{
ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{
ContentType: "text/html",
//XCosServerSideEncryption: "AES256",
XCosSSECustomerAglo: "AES256",
XCosSSECustomerKey: "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY=",
XCosSSECustomerKeyMD5: "U5L61r7jcwdNvT7frmUG8g==",
},
ACLHeaderOptions: &cos.ACLHeaderOptions{
XCosACL: "public-read",
//XCosACL: "private",
},
}
_, err := s.Client.Object.Put(context.Background(), name, f, putOpt)
assert.Nil(s.T(), err, "PutObject with SSE failed")
headOpt := &cos.ObjectHeadOptions{
XCosSSECustomerAglo: "AES256",
XCosSSECustomerKey: "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY=",
XCosSSECustomerKeyMD5: "U5L61r7jcwdNvT7frmUG8g==",
}
_, err = s.Client.Object.Head(context.Background(), name, headOpt)
assert.Nil(s.T(), err, "HeadObject with SSE failed")
getOpt := &cos.ObjectGetOptions{
XCosSSECustomerAglo: "AES256",
XCosSSECustomerKey: "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY=",
XCosSSECustomerKeyMD5: "U5L61r7jcwdNvT7frmUG8g==",
}
var resp *cos.Response
resp, err = s.Client.Object.Get(context.Background(), name, getOpt)
assert.Nil(s.T(), err, "GetObject with SSE failed")
bodyBytes, _ := ioutil.ReadAll(resp.Body)
bodyContent := string(bodyBytes)
assert.Equal(s.T(), content, bodyContent, "GetObject with SSE failed, want: %+v, res: %+v", content, bodyContent)
copyOpt := &cos.ObjectCopyOptions{
&cos.ObjectCopyHeaderOptions{
XCosCopySourceSSECustomerAglo: "AES256",
XCosCopySourceSSECustomerKey: "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY=",
XCosCopySourceSSECustomerKeyMD5: "U5L61r7jcwdNvT7frmUG8g==",
},
&cos.ACLHeaderOptions{},
}
copySource := s.Bucket + "-" + s.Appid + ".cos." + s.Region + ".myqcloud.com/" + name
_, _, err = s.Client.Object.Copy(context.Background(), "test/TestSSE_C_Copy", copySource, copyOpt)
assert.Nil(s.T(), err, "CopyObject with SSE failed")
partIni := &cos.MultiUploadOptions{
OptIni: &cos.InitiateMultipartUploadOptions{
&cos.ACLHeaderOptions{},
&cos.ObjectPutHeaderOptions{
XCosSSECustomerAglo: "AES256",
XCosSSECustomerKey: "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY=",
XCosSSECustomerKeyMD5: "U5L61r7jcwdNvT7frmUG8g==",
},
},
PartSize: 1,
}
filePath := "tmpfile" + time.Now().Format(time.RFC3339)
newFile, err := os.Create(filePath)
assert.Nil(s.T(), err, "create tmp file Failed")
defer newFile.Close()
b := make([]byte, 1024*10)
_, err = rand.Read(b)
newFile.Write(b)
_, _, err = s.Client.Object.MultiUpload(context.Background(), "test/TestSSE_C_MultiUpload", filePath, partIni)
assert.Nil(s.T(), err, "MultiUpload with SSE failed")
err = os.Remove(filePath)
assert.Nil(s.T(), err, "remove local file Failed")
}
func (s *CosTestSuite) TestMultiUpload() {
filePath := "tmpfile" + time.Now().Format(time.RFC3339)
newFile, err := os.Create(filePath)
assert.Nil(s.T(), err, "create tmp file Failed")
defer newFile.Close()
b := make([]byte, 1024*1024*10)
_, err = rand.Read(b)
newFile.Write(b)
partIni := &cos.MultiUploadOptions{}
_, _, err = s.Client.Object.MultiUpload(context.Background(), "test/Test_MultiUpload", filePath, partIni)
err = os.Remove(filePath)
assert.Nil(s.T(), err, "remove tmp file failed")
}
func (s *CosTestSuite) TestBatch() {
client := cos.NewClient(s.Client.BaseURL, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
},
})
source_name := "test/1.txt"
sf := strings.NewReader("batch test content")
_, err := client.Object.Put(context.Background(), source_name, sf, nil)
assert.Nil(s.T(), err, "object put Failed")
manifest_name := "test/manifest.csv"
f := strings.NewReader(kBatchBucket + "," + source_name)
resp, err := client.Object.Put(context.Background(), manifest_name, f, nil)
assert.Nil(s.T(), err, "object put Failed")
etag := resp.Header.Get("ETag")
uuid_str := uuid.New().String()
opt := &cos.BatchCreateJobOptions{
ClientRequestToken: uuid_str,
ConfirmationRequired: "true",
Description: "test batch",
Manifest: &cos.BatchJobManifest{
Location: &cos.BatchJobManifestLocation{
ETag: etag,
ObjectArn: "qcs::cos:" + kBatchRegion + ":uid/" + s.Appid + ":" + kBatchBucket + "/" + manifest_name,
},
Spec: &cos.BatchJobManifestSpec{
Fields: []string{"Bucket", "Key"},
Format: "COSBatchOperations_CSV_V1",
},
},
Operation: &cos.BatchJobOperation{
PutObjectCopy: &cos.BatchJobOperationCopy{
TargetResource: "qcs::cos:" + kBatchRegion + ":uid/" + s.Appid + ":" + kTargetBatchBucket,
},
},
Priority: 1,
Report: &cos.BatchJobReport{
Bucket: "qcs::cos:" + kBatchRegion + ":uid/" + s.Appid + ":" + kBatchBucket,
Enabled: "true",
Format: "Report_CSV_V1",
Prefix: "job-result",
ReportScope: "AllTasks",
},
RoleArn: "qcs::cam::uin/" + kUin + ":roleName/COSBatch_QcsRole",
}
headers := &cos.BatchRequestHeaders{
XCosAppid: kAppid,
}
res1, _, err := client.Batch.CreateJob(context.Background(), opt, headers)
assert.Nil(s.T(), err, "create job Failed")
jobid := res1.JobId
res2, _, err := client.Batch.DescribeJob(context.Background(), jobid, headers)
assert.Nil(s.T(), err, "describe job Failed")
assert.Equal(s.T(), res2.Job.ConfirmationRequired, "true", "ConfirmationRequired not right")
assert.Equal(s.T(), res2.Job.Description, "test batch", "Description not right")
assert.Equal(s.T(), res2.Job.JobId, jobid, "jobid not right")
assert.Equal(s.T(), res2.Job.Priority, 1, "priority not right")
assert.Equal(s.T(), res2.Job.RoleArn, "qcs::cam::uin/"+kUin+":roleName/COSBatch_QcsRole", "priority not right")
_, _, err = client.Batch.ListJobs(context.Background(), nil, headers)
assert.Nil(s.T(), err, "list jobs failed")
up_opt := &cos.BatchUpdatePriorityOptions{
JobId: jobid,
Priority: 3,
}
res3, _, err := client.Batch.UpdateJobPriority(context.Background(), up_opt, headers)
assert.Nil(s.T(), err, "list jobs failed")
assert.Equal(s.T(), res3.JobId, jobid, "jobid failed")
assert.Equal(s.T(), res3.Priority, 3, "priority not right")
// 等待状态变成Suspended
for i := 0; i < 10; i = i + 1 {
res, _, err := client.Batch.DescribeJob(context.Background(), jobid, headers)
assert.Nil(s.T(), err, "describe job Failed")
assert.Equal(s.T(), res2.Job.ConfirmationRequired, "true", "ConfirmationRequired not right")
assert.Equal(s.T(), res2.Job.Description, "test batch", "Description not right")
assert.Equal(s.T(), res2.Job.JobId, jobid, "jobid not right")
assert.Equal(s.T(), res2.Job.Priority, 1, "priority not right")
assert.Equal(s.T(), res2.Job.RoleArn, "qcs::cam::uin/"+kUin+":roleName/COSBatch_QcsRole", "priority not right")
if res.Job.Status == "Suspended" {
break
}
if i == 9 {
assert.Error(s.T(), errors.New("Job status is not Suspended or timeout"))
}
time.Sleep(time.Second * 2)
}
us_opt := &cos.BatchUpdateStatusOptions{
JobId: jobid,
RequestedJobStatus: "Ready", // 允许状态转换见 https://cloud.tencent.com/document/product/436/38604
StatusUpdateReason: "to test",
}
res4, _, err := client.Batch.UpdateJobStatus(context.Background(), us_opt, headers)
assert.Nil(s.T(), err, "list jobs failed")
assert.Equal(s.T(), res4.JobId, jobid, "jobid failed")
assert.Equal(s.T(), res4.Status, "Ready", "status failed")
assert.Equal(s.T(), res4.StatusUpdateReason, "to test", "StatusUpdateReason failed")
}
func (s *CosTestSuite) TestEncryption() {
opt := &cos.BucketPutEncryptionOptions{
Rule: &cos.BucketEncryptionConfiguration{
SSEAlgorithm: "AES256",
},
}
_, err := s.Client.Bucket.PutEncryption(context.Background(), opt)
assert.Nil(s.T(), err, "PutEncryption Failed")
res, _, err := s.Client.Bucket.GetEncryption(context.Background())
assert.Nil(s.T(), err, "GetEncryption Failed")
assert.Equal(s.T(), opt.Rule.SSEAlgorithm, res.Rule.SSEAlgorithm, "GetEncryption Failed")
_, err = s.Client.Bucket.DeleteEncryption(context.Background())
assert.Nil(s.T(), err, "DeleteEncryption Failed")
}
func (s *CosTestSuite) TestReferer() {
opt := &cos.BucketPutRefererOptions{
Status: "Enabled",
RefererType: "White-List",
DomainList: []string{
"*.qq.com",
"*.qcloud.com",
},
EmptyReferConfiguration: "Allow",
}
_, err := s.Client.Bucket.PutReferer(context.Background(), opt)
assert.Nil(s.T(), err, "PutReferer Failed")
res, _, err := s.Client.Bucket.GetReferer(context.Background())
assert.Nil(s.T(), err, "GetReferer Failed")
assert.Equal(s.T(), opt.Status, res.Status, "GetReferer Failed")
assert.Equal(s.T(), opt.RefererType, res.RefererType, "GetReferer Failed")
assert.Equal(s.T(), opt.DomainList, res.DomainList, "GetReferer Failed")
assert.Equal(s.T(), opt.EmptyReferConfiguration, res.EmptyReferConfiguration, "GetReferer Failed")
}
// End of api test
// All methods that begin with "Test" are run as tests within a
// suite.
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestCosTestSuite(t *testing.T) {
suite.Run(t, new(CosTestSuite))
}
func (s *CosTestSuite) TearDownSuite() {
// Clean the file in bucket
// r, _, err := s.Client.Bucket.ListMultipartUploads(context.Background(), nil)
// assert.Nil(s.T(), err, "ListMultipartUploads Failed")
// for _, p := range r.Uploads {
// // Abort
// _, err = s.Client.Object.AbortMultipartUpload(context.Background(), p.Key, p.UploadID)
// assert.Nil(s.T(), err, "AbortMultipartUpload Failed")
// }
// // Delete objects
// opt := &cos.BucketGetOptions{
// MaxKeys: 500,
// }
// v, _, err := s.Client.Bucket.Get(context.Background(), opt)
// assert.Nil(s.T(), err, "GetBucket Failed")
// for _, c := range v.Contents {
// _, err := s.Client.Object.Delete(context.Background(), c.Key)
// assert.Nil(s.T(), err, "DeleteObject Failed")
// }
// When clean up these infos, can not solve the concurrent test problem
fmt.Println("tear down~")
}
| [
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\"",
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\"",
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\"",
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\"",
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
]
| []
| [
"COS_SECRETKEY",
"COS_SECRETID"
]
| [] | ["COS_SECRETKEY", "COS_SECRETID"] | go | 2 | 0 | |
pkg/controllers/routing/network_routes_controller.go | package routing
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/cloudnativelabs/kube-router/pkg/healthcheck"
"github.com/cloudnativelabs/kube-router/pkg/metrics"
"github.com/cloudnativelabs/kube-router/pkg/options"
"github.com/cloudnativelabs/kube-router/pkg/utils"
"github.com/coreos/go-iptables/iptables"
"github.com/golang/protobuf/ptypes/any"
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
"github.com/golang/protobuf/ptypes"
gobgpapi "github.com/osrg/gobgp/api"
gobgp "github.com/osrg/gobgp/pkg/server"
"k8s.io/klog/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/vishvananda/netlink"
v1core "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
const (
IfaceNotFound = "Link not found"
customRouteTableID = "77"
customRouteTableName = "kube-router"
podSubnetsIPSetName = "kube-router-pod-subnets"
nodeAddrsIPSetName = "kube-router-node-ips"
nodeASNAnnotation = "kube-router.io/node.asn"
nodeCommunitiesAnnotation = "kube-router.io/node.bgp.communities"
nodeCustomImportRejectAnnotation = "kube-router.io/node.bgp.customimportreject"
pathPrependASNAnnotation = "kube-router.io/path-prepend.as"
pathPrependRepeatNAnnotation = "kube-router.io/path-prepend.repeat-n"
peerASNAnnotation = "kube-router.io/peer.asns"
peerIPAnnotation = "kube-router.io/peer.ips"
// nolint:gosec // this is not a hardcoded password
peerPasswordAnnotation = "kube-router.io/peer.passwords"
peerPortAnnotation = "kube-router.io/peer.ports"
rrClientAnnotation = "kube-router.io/rr.client"
rrServerAnnotation = "kube-router.io/rr.server"
svcLocalAnnotation = "kube-router.io/service.local"
bgpLocalAddressAnnotation = "kube-router.io/bgp-local-addresses"
svcAdvertiseClusterAnnotation = "kube-router.io/service.advertise.clusterip"
svcAdvertiseExternalAnnotation = "kube-router.io/service.advertise.externalip"
svcAdvertiseLoadBalancerAnnotation = "kube-router.io/service.advertise.loadbalancerip"
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
// Deprecated: use kube-router.io/service.advertise.loadbalancer instead
svcSkipLbIpsAnnotation = "kube-router.io/service.skiplbips"
LoadBalancerST = "LoadBalancer"
ClusterIPST = "ClusterIP"
NodePortST = "NodePort"
prependPathMaxBits = 8
asnMaxBitSize = 32
bgpCommunityMaxSize = 32
bgpCommunityMaxPartSize = 16
routeReflectorMaxID = 32
ipv4MaskMinBits = 32
// Taken from: https://github.com/torvalds/linux/blob/master/include/uapi/linux/rtnetlink.h#L284
zebraRouteOriginator = 0x11
)
// NetworkRoutingController is struct to hold necessary information required by controller
type NetworkRoutingController struct {
nodeIP net.IP
nodeName string
nodeSubnet net.IPNet
nodeInterface string
routerID string
isIpv6 bool
activeNodes map[string]bool
mu sync.Mutex
clientset kubernetes.Interface
bgpServer *gobgp.BgpServer
syncPeriod time.Duration
clusterCIDR string
enablePodEgress bool
hostnameOverride string
advertiseClusterIP bool
advertiseExternalIP bool
advertiseLoadBalancerIP bool
advertisePodCidr bool
autoMTU bool
defaultNodeAsnNumber uint32
nodeAsnNumber uint32
nodeCustomImportRejectIPNets []net.IPNet
nodeCommunities []string
globalPeerRouters []*gobgpapi.Peer
nodePeerRouters []string
enableCNI bool
bgpFullMeshMode bool
bgpEnableInternal bool
bgpGracefulRestart bool
bgpGracefulRestartTime time.Duration
bgpGracefulRestartDeferralTime time.Duration
ipSetHandler *utils.IPSet
enableOverlays bool
overlayType string
peerMultihopTTL uint8
MetricsEnabled bool
bgpServerStarted bool
bgpHoldtime float64
bgpPort uint32
bgpRRClient bool
bgpRRServer bool
bgpClusterID string
cniConfFile string
disableSrcDstCheck bool
initSrcDstCheckDone bool
ec2IamAuthorized bool
pathPrependAS string
pathPrependCount uint8
pathPrepend bool
localAddressList []string
overrideNextHop bool
podCidr string
CNIFirewallSetup *sync.Cond
ipsetMutex *sync.Mutex
routeSyncer *routeSyncer
nodeLister cache.Indexer
svcLister cache.Indexer
epLister cache.Indexer
NodeEventHandler cache.ResourceEventHandler
ServiceEventHandler cache.ResourceEventHandler
EndpointsEventHandler cache.ResourceEventHandler
}
// Run runs forever until we are notified on stop channel
func (nrc *NetworkRoutingController) Run(healthChan chan<- *healthcheck.ControllerHeartbeat, stopCh <-chan struct{},
wg *sync.WaitGroup) {
var err error
if nrc.enableCNI {
nrc.updateCNIConfig()
}
klog.V(1).Info("Populating ipsets.")
err = nrc.syncNodeIPSets()
if err != nil {
klog.Errorf("Failed initial ipset setup: %s", err)
}
// In case of cluster provisioned on AWS disable source-destination check
if nrc.disableSrcDstCheck {
nrc.disableSourceDestinationCheck()
nrc.initSrcDstCheckDone = true
}
// enable IP forwarding for the packets coming in/out from the pods
err = nrc.enableForwarding()
if err != nil {
klog.Errorf("Failed to enable IP forwarding of traffic from pods: %s", err.Error())
}
nrc.CNIFirewallSetup.Broadcast()
// Handle ipip tunnel overlay
if nrc.enableOverlays {
klog.V(1).Info("IPIP Tunnel Overlay enabled in configuration.")
klog.V(1).Info("Setting up overlay networking.")
err = nrc.enablePolicyBasedRouting()
if err != nil {
klog.Errorf("Failed to enable required policy based routing: %s", err.Error())
}
} else {
klog.V(1).Info("IPIP Tunnel Overlay disabled in configuration.")
klog.V(1).Info("Cleaning up old overlay networking if needed.")
err = nrc.disablePolicyBasedRouting()
if err != nil {
klog.Errorf("Failed to disable policy based routing: %s", err.Error())
}
}
klog.V(1).Info("Performing cleanup of depreciated rules/ipsets (if needed).")
err = nrc.deleteBadPodEgressRules()
if err != nil {
klog.Errorf("Error cleaning up old/bad Pod egress rules: %s", err.Error())
}
// Handle Pod egress masquerading configuration
if nrc.enablePodEgress {
klog.V(1).Infoln("Enabling Pod egress.")
err = nrc.createPodEgressRule()
if err != nil {
klog.Errorf("Error enabling Pod egress: %s", err.Error())
}
} else {
klog.V(1).Infoln("Disabling Pod egress.")
err = nrc.deletePodEgressRule()
if err != nil {
klog.Warningf("Error cleaning up Pod Egress related networking: %s", err)
}
}
// create 'kube-bridge' interface to which pods will be connected
kubeBridgeIf, err := netlink.LinkByName("kube-bridge")
if err != nil && err.Error() == IfaceNotFound {
linkAttrs := netlink.NewLinkAttrs()
linkAttrs.Name = "kube-bridge"
bridge := &netlink.Bridge{LinkAttrs: linkAttrs}
if err = netlink.LinkAdd(bridge); err != nil {
klog.Errorf("Failed to create `kube-router` bridge due to %s. Will be created by CNI bridge "+
"plugin when pod is launched.", err.Error())
}
kubeBridgeIf, err = netlink.LinkByName("kube-bridge")
if err != nil {
klog.Errorf("Failed to find created `kube-router` bridge due to %s. Will be created by CNI "+
"bridge plugin when pod is launched.", err.Error())
}
err = netlink.LinkSetUp(kubeBridgeIf)
if err != nil {
klog.Errorf("Failed to bring `kube-router` bridge up due to %s. Will be created by CNI bridge "+
"plugin at later point when pod is launched.", err.Error())
}
}
if nrc.autoMTU {
mtu, err := utils.GetMTUFromNodeIP(nrc.nodeIP, nrc.enableOverlays)
if err != nil {
klog.Errorf("Failed to find MTU for node IP: %s for intelligently setting the kube-bridge MTU "+
"due to %s.", nrc.nodeIP, err.Error())
}
if mtu > 0 {
klog.Infof("Setting MTU of kube-bridge interface to: %d", mtu)
err = netlink.LinkSetMTU(kubeBridgeIf, mtu)
if err != nil {
klog.Errorf("Failed to set MTU for kube-bridge interface due to: %s", err.Error())
}
} else {
klog.Infof("Not setting MTU of kube-bridge interface")
}
}
// enable netfilter for the bridge
if _, err := exec.Command("modprobe", "br_netfilter").CombinedOutput(); err != nil {
klog.Errorf("Failed to enable netfilter for bridge. Network policies and service proxy may "+
"not work: %s", err.Error())
}
sysctlErr := utils.SetSysctl(utils.BridgeNFCallIPTables, 1)
if sysctlErr != nil {
klog.Errorf("Failed to enable iptables for bridge. Network policies and service proxy may "+
"not work: %s", sysctlErr.Error())
}
if nrc.isIpv6 {
sysctlErr = utils.SetSysctl(utils.BridgeNFCallIP6Tables, 1)
if sysctlErr != nil {
klog.Errorf("Failed to enable ip6tables for bridge. Network policies and service proxy may "+
"not work: %s", sysctlErr.Error())
}
}
t := time.NewTicker(nrc.syncPeriod)
defer t.Stop()
defer wg.Done()
klog.Infof("Starting network route controller")
// Start route syncer
nrc.routeSyncer.run(stopCh, wg)
// Wait till we are ready to launch BGP server
for {
err := nrc.startBgpServer(true)
if err != nil {
klog.Errorf("Failed to start node BGP server: %s", err)
select {
case <-stopCh:
klog.Infof("Shutting down network routes controller")
return
case <-t.C:
klog.Infof("Retrying start of node BGP server")
continue
}
} else {
break
}
}
nrc.bgpServerStarted = true
if !nrc.bgpGracefulRestart {
defer func() {
err := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err != nil {
klog.Errorf("error shutting down BGP server: %s", err)
}
}()
}
// loop forever till notified to stop on stopCh
for {
var err error
select {
case <-stopCh:
klog.Infof("Shutting down network routes controller")
return
default:
}
// Update ipset entries
if nrc.enablePodEgress || nrc.enableOverlays {
klog.V(1).Info("Syncing ipsets")
err = nrc.syncNodeIPSets()
if err != nil {
klog.Errorf("Error synchronizing ipsets: %s", err.Error())
}
}
// enable IP forwarding for the packets coming in/out from the pods
err = nrc.enableForwarding()
if err != nil {
klog.Errorf("Failed to enable IP forwarding of traffic from pods: %s", err.Error())
}
// advertise or withdraw IPs for the services to be reachable via host
toAdvertise, toWithdraw, err := nrc.getActiveVIPs()
if err != nil {
klog.Errorf("failed to get routes to advertise/withdraw %s", err)
}
klog.V(1).Infof("Performing periodic sync of service VIP routes")
nrc.advertiseVIPs(toAdvertise)
nrc.withdrawVIPs(toWithdraw)
klog.V(1).Info("Performing periodic sync of pod CIDR routes")
err = nrc.advertisePodRoute()
if err != nil {
klog.Errorf("Error advertising route: %s", err.Error())
}
err = nrc.AddPolicies()
if err != nil {
klog.Errorf("Error adding BGP policies: %s", err.Error())
}
if nrc.bgpEnableInternal {
nrc.syncInternalPeers()
}
if err == nil {
healthcheck.SendHeartBeat(healthChan, "NRC")
} else {
klog.Errorf("Error during periodic sync in network routing controller. Error: " + err.Error())
klog.Errorf("Skipping sending heartbeat from network routing controller as periodic sync failed.")
}
select {
case <-stopCh:
klog.Infof("Shutting down network routes controller")
return
case <-t.C:
}
}
}
func (nrc *NetworkRoutingController) updateCNIConfig() {
cidr, err := utils.GetPodCidrFromCniSpec(nrc.cniConfFile)
if err != nil {
klog.Errorf("Failed to get pod CIDR from CNI conf file: %s", err)
}
if reflect.DeepEqual(cidr, net.IPNet{}) {
klog.Infof("`subnet` in CNI conf file is empty so populating `subnet` in CNI conf file with pod " +
"CIDR assigned to the node obtained from node spec.")
}
cidrlen, _ := cidr.Mask.Size()
oldCidr := cidr.IP.String() + "/" + strconv.Itoa(cidrlen)
currentCidr := nrc.podCidr
if len(cidr.IP) == 0 || strings.Compare(oldCidr, currentCidr) != 0 {
err = utils.InsertPodCidrInCniSpec(nrc.cniConfFile, currentCidr)
if err != nil {
klog.Fatalf("Failed to insert `subnet`(pod CIDR) into CNI conf file: %s", err.Error())
}
}
if nrc.autoMTU {
err = nrc.autoConfigureMTU()
if err != nil {
klog.Errorf("Failed to auto-configure MTU due to: %s", err.Error())
}
}
}
func (nrc *NetworkRoutingController) autoConfigureMTU() error {
mtu, err := utils.GetMTUFromNodeIP(nrc.nodeIP, nrc.enableOverlays)
if err != nil {
return fmt.Errorf("failed to generate MTU: %s", err.Error())
}
file, err := ioutil.ReadFile(nrc.cniConfFile)
if err != nil {
return fmt.Errorf("failed to load CNI conf file: %s", err.Error())
}
var config interface{}
err = json.Unmarshal(file, &config)
if err != nil {
return fmt.Errorf("failed to parse JSON from CNI conf file: %s", err.Error())
}
if strings.HasSuffix(nrc.cniConfFile, ".conflist") {
configMap := config.(map[string]interface{})
for key := range configMap {
if key != "plugins" {
continue
}
pluginConfigs := configMap["plugins"].([]interface{})
for _, pluginConfig := range pluginConfigs {
pluginConfigMap := pluginConfig.(map[string]interface{})
pluginConfigMap["mtu"] = mtu
}
}
} else {
pluginConfig := config.(map[string]interface{})
pluginConfig["mtu"] = mtu
}
configJSON, _ := json.Marshal(config)
err = ioutil.WriteFile(nrc.cniConfFile, configJSON, 0644)
if err != nil {
return fmt.Errorf("failed to insert `mtu` into CNI conf file: %s", err.Error())
}
return nil
}
func (nrc *NetworkRoutingController) watchBgpUpdates() {
pathWatch := func(path *gobgpapi.Path) {
if nrc.MetricsEnabled {
metrics.ControllerBGPadvertisementsReceived.Inc()
}
if path.NeighborIp == "<nil>" {
return
}
klog.V(2).Infof("Processing bgp route advertisement from peer: %s", path.NeighborIp)
if err := nrc.injectRoute(path); err != nil {
klog.Errorf("Failed to inject routes due to: " + err.Error())
}
}
err := nrc.bgpServer.MonitorTable(context.Background(), &gobgpapi.MonitorTableRequest{
TableType: gobgpapi.TableType_GLOBAL,
Family: &gobgpapi.Family{
Afi: gobgpapi.Family_AFI_IP,
Safi: gobgpapi.Family_SAFI_UNICAST,
},
}, pathWatch)
if err != nil {
klog.Errorf("failed to register monitor global routing table callback due to : " + err.Error())
}
}
func (nrc *NetworkRoutingController) advertisePodRoute() error {
if nrc.MetricsEnabled {
metrics.ControllerBGPadvertisementsSent.Inc()
}
cidrStr := strings.Split(nrc.podCidr, "/")
subnet := cidrStr[0]
cidrLen, err := strconv.Atoi(cidrStr[1])
if err != nil || cidrLen < 0 || cidrLen > 32 {
return fmt.Errorf("the pod CIDR IP given is not a proper mask: %d", cidrLen)
}
if nrc.isIpv6 {
klog.V(2).Infof("Advertising route: '%s/%d via %s' to peers", subnet, cidrLen, nrc.nodeIP.String())
v6Family := &gobgpapi.Family{
Afi: gobgpapi.Family_AFI_IP6,
Safi: gobgpapi.Family_SAFI_UNICAST,
}
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
nlri, _ := ptypes.MarshalAny(&gobgpapi.IPAddressPrefix{
PrefixLen: uint32(cidrLen),
Prefix: cidrStr[0],
})
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
a1, _ := ptypes.MarshalAny(&gobgpapi.OriginAttribute{
Origin: 0,
})
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
v6Attrs, _ := ptypes.MarshalAny(&gobgpapi.MpReachNLRIAttribute{
Family: v6Family,
NextHops: []string{nrc.nodeIP.String()},
Nlris: []*any.Any{nlri},
})
_, err := nrc.bgpServer.AddPath(context.Background(), &gobgpapi.AddPathRequest{
Path: &gobgpapi.Path{
Family: v6Family,
Nlri: nlri,
Pattrs: []*any.Any{a1, v6Attrs},
},
})
if err != nil {
return fmt.Errorf(err.Error())
}
} else {
klog.V(2).Infof("Advertising route: '%s/%d via %s' to peers", subnet, cidrLen, nrc.nodeIP.String())
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
nlri, _ := ptypes.MarshalAny(&gobgpapi.IPAddressPrefix{
PrefixLen: uint32(cidrLen),
Prefix: cidrStr[0],
})
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
a1, _ := ptypes.MarshalAny(&gobgpapi.OriginAttribute{
Origin: 0,
})
// nolint:staticcheck // this has to stick around for now until gobgp updates protobuf
a2, _ := ptypes.MarshalAny(&gobgpapi.NextHopAttribute{
NextHop: nrc.nodeIP.String(),
})
attrs := []*any.Any{a1, a2}
_, err := nrc.bgpServer.AddPath(context.Background(), &gobgpapi.AddPathRequest{
Path: &gobgpapi.Path{
Family: &gobgpapi.Family{Afi: gobgpapi.Family_AFI_IP, Safi: gobgpapi.Family_SAFI_UNICAST},
Nlri: nlri,
Pattrs: attrs,
},
})
if err != nil {
return fmt.Errorf(err.Error())
}
}
return nil
}
func (nrc *NetworkRoutingController) injectRoute(path *gobgpapi.Path) error {
klog.V(2).Infof("injectRoute Path Looks Like: %s", path.String())
var route *netlink.Route
var link netlink.Link
dst, nextHop, err := parseBGPPath(path)
if err != nil {
return err
}
tunnelName := generateTunnelName(nextHop.String())
sameSubnet := nrc.nodeSubnet.Contains(nextHop)
// If we've made it this far, then it is likely that the node is holding a destination route for this path already.
// If the path we've received from GoBGP is a withdrawal, we should clean up any lingering routes that may exist
// on the host (rather than creating a new one or updating an existing one), and then return.
if path.IsWithdraw {
klog.V(2).Infof("Removing route: '%s via %s' from peer in the routing table", dst, nextHop)
// The path might be withdrawn because the peer became unestablished or it may be withdrawn because just the
// path was withdrawn. Check to see if the peer is still established before deciding whether to clean the
// tunnel and tunnel routes or whether to just delete the destination route.
peerEstablished, err := nrc.isPeerEstablished(nextHop.String())
if err != nil {
klog.Errorf("encountered error while checking peer status: %v", err)
}
if err == nil && !peerEstablished {
klog.V(1).Infof("Peer '%s' was not found any longer, removing tunnel and routes",
nextHop.String())
// Also delete route from state map so that it doesn't get re-synced after deletion
nrc.routeSyncer.delInjectedRoute(dst)
nrc.cleanupTunnel(dst, tunnelName)
return nil
}
// Also delete route from state map so that it doesn't get re-synced after deletion
nrc.routeSyncer.delInjectedRoute(dst)
return deleteRoutesByDestination(dst)
}
shouldCreateTunnel := func() bool {
if !nrc.enableOverlays {
return false
}
if nrc.overlayType == "full" {
return true
}
if nrc.overlayType == "subnet" && !sameSubnet {
return true
}
return false
}
// create IPIP tunnels only when node is not in same subnet or overlay-type is set to 'full'
// if the user has disabled overlays, don't create tunnels. If we're not creating a tunnel, check to see if there is
// any cleanup that needs to happen.
if shouldCreateTunnel() {
link, err = nrc.setupOverlayTunnel(tunnelName, nextHop)
if err != nil {
return err
}
} else {
// knowing that a tunnel shouldn't exist for this route, check to see if there are any lingering tunnels /
// routes that need to be cleaned up.
nrc.cleanupTunnel(dst, tunnelName)
}
switch {
case link != nil:
// if we setup an overlay tunnel link, then use it for destination routing
route = &netlink.Route{
LinkIndex: link.Attrs().Index,
Src: nrc.nodeIP,
Dst: dst,
Protocol: zebraRouteOriginator,
}
case sameSubnet:
// if the nextHop is within the same subnet, add a route for the destination so that traffic can bet routed
// at layer 2 and minimize the need to traverse a router
route = &netlink.Route{
Dst: dst,
Gw: nextHop,
Protocol: zebraRouteOriginator,
}
default:
// otherwise, let BGP do its thing, nothing to do here
return nil
}
// Alright, everything is in place, and we have our route configured, let's add it to the host's routing table
klog.V(2).Infof("Inject route: '%s via %s' from peer to routing table", dst, nextHop)
nrc.routeSyncer.addInjectedRoute(dst, route)
// Immediately sync the local route table regardless of timer
nrc.routeSyncer.syncLocalRouteTable()
return nil
}
func (nrc *NetworkRoutingController) isPeerEstablished(peerIP string) (bool, error) {
var peerConnected bool
peerFunc := func(peer *gobgpapi.Peer) {
if peer.Conf.NeighborAddress == peerIP && peer.State.SessionState == gobgpapi.PeerState_ESTABLISHED {
peerConnected = true
}
}
err := nrc.bgpServer.ListPeer(context.Background(), &gobgpapi.ListPeerRequest{
Address: peerIP,
}, peerFunc)
if err != nil {
return false, fmt.Errorf("unable to list peers to see if tunnel & routes need to be removed: %v", err)
}
return peerConnected, nil
}
// cleanupTunnel removes any traces of tunnels / routes that were setup by nrc.setupOverlayTunnel() and are no longer
// needed. All errors are logged only, as we want to attempt to perform all cleanup actions regardless of their success
func (nrc *NetworkRoutingController) cleanupTunnel(destinationSubnet *net.IPNet, tunnelName string) {
klog.V(1).Infof("Cleaning up old routes for %s if there are any", destinationSubnet.String())
if err := deleteRoutesByDestination(destinationSubnet); err != nil {
klog.Errorf("Failed to cleanup routes: %v", err)
}
klog.V(1).Infof("Cleaning up any lingering tunnel interfaces named: %s", tunnelName)
if link, err := netlink.LinkByName(tunnelName); err == nil {
if err = netlink.LinkDel(link); err != nil {
klog.Errorf("Failed to delete tunnel link for the node due to " + err.Error())
}
}
}
// setupOverlayTunnel attempts to create an tunnel link and corresponding routes for IPIP based overlay networks
func (nrc *NetworkRoutingController) setupOverlayTunnel(tunnelName string, nextHop net.IP) (netlink.Link, error) {
var out []byte
link, err := netlink.LinkByName(tunnelName)
// an error here indicates that the the tunnel didn't exist, so we need to create it, if it already exists there's
// nothing to do here
if err != nil {
cmdArgs := []string{"tunnel", "add", tunnelName, "mode", "ipip", "local", nrc.nodeIP.String(), "remote",
nextHop.String()}
// need to skip binding device if nrc.nodeInterface is loopback, otherwise packets never leave
// from egress interface to the tunnel peer.
if nrc.nodeInterface != "lo" {
cmdArgs = append(cmdArgs, []string{"dev", nrc.nodeInterface}...)
}
out, err := exec.Command("ip", cmdArgs...).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("route not injected for the route advertised by the node %s "+
"Failed to create tunnel interface %s. error: %s, output: %s",
nextHop, tunnelName, err, string(out))
}
link, err = netlink.LinkByName(tunnelName)
if err != nil {
return nil, fmt.Errorf("route not injected for the route advertised by the node %s "+
"Failed to get tunnel interface by name error: %s", tunnelName, err)
}
if err = netlink.LinkSetUp(link); err != nil {
return nil, errors.New("Failed to bring tunnel interface " + tunnelName + " up due to: " + err.Error())
}
// reduce the MTU by 20 bytes to accommodate ipip tunnel overhead
if err = netlink.LinkSetMTU(link, link.Attrs().MTU-utils.IPInIPHeaderLength); err != nil {
return nil, errors.New("Failed to set MTU of tunnel interface " + tunnelName + " up due to: " + err.Error())
}
} else {
klog.V(1).Infof(
"Tunnel interface: " + tunnelName + " for the node " + nextHop.String() + " already exists.")
}
// Now that the tunnel link exists, we need to add a route to it, so the node knows where to send traffic bound for
// this interface
out, err = exec.Command("ip", "route", "list", "table", customRouteTableID).CombinedOutput()
if err != nil || !strings.Contains(string(out), "dev "+tunnelName+" scope") {
// nolint:gosec // this exec should be safe from command injection given the parameter's context
if out, err = exec.Command("ip", "route", "add", nextHop.String(), "dev", tunnelName, "table",
customRouteTableID).CombinedOutput(); err != nil {
return nil, fmt.Errorf("failed to add route in custom route table, err: %s, output: %s", err, string(out))
}
}
return link, nil
}
// Cleanup performs the cleanup of configurations done
func (nrc *NetworkRoutingController) Cleanup() {
klog.Infof("Cleaning up NetworkRoutesController configurations")
// Pod egress cleanup
err := nrc.deletePodEgressRule()
if err != nil {
// Changed to level 1 logging as errors occur when ipsets have already been cleaned and needlessly worries users
klog.V(1).Infof("Error deleting Pod egress iptables rule: %v", err)
}
err = nrc.deleteBadPodEgressRules()
if err != nil {
// Changed to level 1 logging as errors occur when ipsets have already been cleaned and needlessly worries users
klog.V(1).Infof("Error deleting Pod egress iptables rule: %s", err.Error())
}
// For some reason, if we go too fast into the ipset logic below it causes the system to think that the above
// iptables rules are still referencing the ipsets below, and we get errors
time.Sleep(1 * time.Second)
// delete all ipsets created by kube-router
// There are certain actions like Cleanup() actions that aren't working with full instantiations of the controller
// and in these instances the mutex may not be present and may not need to be present as they are operating out of a
// single goroutine where there is no need for locking
if nil != nrc.ipsetMutex {
klog.V(1).Infof("Attempting to attain ipset mutex lock")
nrc.ipsetMutex.Lock()
klog.V(1).Infof("Attained ipset mutex lock, continuing...")
defer func() {
nrc.ipsetMutex.Unlock()
klog.V(1).Infof("Returned ipset mutex lock")
}()
}
ipset, err := utils.NewIPSet(nrc.isIpv6)
if err != nil {
klog.Errorf("Failed to clean up ipsets: " + err.Error())
return
}
err = ipset.Save()
if err != nil {
klog.Errorf("Failed to clean up ipsets: " + err.Error())
}
err = ipset.DestroyAllWithin()
if err != nil {
klog.Warningf("Error deleting ipset: %s", err.Error())
}
klog.Infof("Successfully cleaned the NetworkRoutesController configuration done by kube-router")
}
func (nrc *NetworkRoutingController) syncNodeIPSets() error {
var err error
start := time.Now()
defer func() {
if nrc.MetricsEnabled {
metrics.ControllerRoutesSyncTime.Observe(time.Since(start).Seconds())
}
}()
klog.V(1).Infof("Attempting to attain ipset mutex lock")
nrc.ipsetMutex.Lock()
klog.V(1).Infof("Attained ipset mutex lock, continuing...")
defer func() {
nrc.ipsetMutex.Unlock()
klog.V(1).Infof("Returned ipset mutex lock")
}()
nodes := nrc.nodeLister.List()
// Collect active PodCIDR(s) and NodeIPs from nodes
currentPodCidrs := make([]string, 0)
currentNodeIPs := make([]string, 0)
for _, obj := range nodes {
node := obj.(*v1core.Node)
podCIDR := node.GetAnnotations()["kube-router.io/pod-cidr"]
if podCIDR == "" {
podCIDR = node.Spec.PodCIDR
}
if podCIDR == "" {
klog.Warningf("Couldn't determine PodCIDR of the %v node", node.Name)
continue
}
currentPodCidrs = append(currentPodCidrs, podCIDR)
nodeIP, err := utils.GetNodeIP(node)
if err != nil {
klog.Errorf("Failed to find a node IP, cannot add to node ipset which could affect routing: %v", err)
continue
}
currentNodeIPs = append(currentNodeIPs, nodeIP.String())
}
// Syncing Pod subnet ipset entries
psSet := nrc.ipSetHandler.Get(podSubnetsIPSetName)
if psSet == nil {
klog.Infof("Creating missing ipset \"%s\"", podSubnetsIPSetName)
_, err = nrc.ipSetHandler.Create(podSubnetsIPSetName, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("ipset \"%s\" not found in controller instance",
podSubnetsIPSetName)
}
psSet = nrc.ipSetHandler.Get(podSubnetsIPSetName)
if nil == psSet {
return fmt.Errorf("failed to get ipsethandler for ipset \"%s\"", podSubnetsIPSetName)
}
}
err = psSet.Refresh(currentPodCidrs)
if err != nil {
return fmt.Errorf("failed to sync Pod Subnets ipset: %s", err)
}
// Syncing Node Addresses ipset entries
naSet := nrc.ipSetHandler.Get(nodeAddrsIPSetName)
if naSet == nil {
klog.Infof("Creating missing ipset \"%s\"", nodeAddrsIPSetName)
_, err = nrc.ipSetHandler.Create(nodeAddrsIPSetName, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("ipset \"%s\" not found in controller instance",
nodeAddrsIPSetName)
}
naSet = nrc.ipSetHandler.Get(nodeAddrsIPSetName)
if nil == naSet {
return fmt.Errorf("failed to get ipsethandler for ipset \"%s\"", nodeAddrsIPSetName)
}
}
err = naSet.Refresh(currentNodeIPs)
if err != nil {
return fmt.Errorf("failed to sync Node Addresses ipset: %s", err)
}
return nil
}
func (nrc *NetworkRoutingController) newIptablesCmdHandler() (*iptables.IPTables, error) {
if nrc.isIpv6 {
return iptables.NewWithProtocol(iptables.ProtocolIPv6)
}
return iptables.NewWithProtocol(iptables.ProtocolIPv4)
}
// ensure there is rule in filter table and FORWARD chain to permit in/out traffic from pods
// this rules will be appended so that any iptables rules for network policies will take
// precedence
func (nrc *NetworkRoutingController) enableForwarding() error {
iptablesCmdHandler, _ := nrc.newIptablesCmdHandler()
comment := "allow outbound traffic from pods"
args := []string{"-m", "comment", "--comment", comment, "-i", "kube-bridge", "-j", "ACCEPT"}
exists, err := iptablesCmdHandler.Exists("filter", "FORWARD", args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
}
comment = "allow inbound traffic to pods"
args = []string{"-m", "comment", "--comment", comment, "-o", "kube-bridge", "-j", "ACCEPT"}
exists, err = iptablesCmdHandler.Exists("filter", "FORWARD", args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
}
comment = "allow outbound node port traffic on node interface with which node ip is associated"
args = []string{"-m", "comment", "--comment", comment, "-o", nrc.nodeInterface, "-j", "ACCEPT"}
exists, err = iptablesCmdHandler.Exists("filter", "FORWARD", args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
if err != nil {
return fmt.Errorf("failed to run iptables command: %s", err.Error())
}
}
return nil
}
func (nrc *NetworkRoutingController) startBgpServer(grpcServer bool) error {
var nodeAsnNumber uint32
node, err := utils.GetNodeObject(nrc.clientset, nrc.hostnameOverride)
if err != nil {
return errors.New("failed to get node object from api server: " + err.Error())
}
if nrc.bgpFullMeshMode {
nodeAsnNumber = nrc.defaultNodeAsnNumber
} else {
nodeasn, ok := node.ObjectMeta.Annotations[nodeASNAnnotation]
if !ok {
return errors.New("could not find ASN number for the node. " +
"Node needs to be annotated with ASN number details to start BGP server")
}
klog.Infof("Found ASN for the node to be %s from the node annotations", nodeasn)
asnNo, err := strconv.ParseUint(nodeasn, 0, asnMaxBitSize)
if err != nil {
return errors.New("failed to parse ASN number specified for the the node")
}
nodeAsnNumber = uint32(asnNo)
nrc.nodeAsnNumber = nodeAsnNumber
}
if clusterid, ok := node.ObjectMeta.Annotations[rrServerAnnotation]; ok {
klog.Infof("Found rr.server for the node to be %s from the node annotation", clusterid)
_, err := strconv.ParseUint(clusterid, 0, routeReflectorMaxID)
if err != nil {
if ip := net.ParseIP(clusterid).To4(); ip == nil {
return errors.New("failed to parse rr.server clusterId specified for the node")
}
}
nrc.bgpClusterID = clusterid
nrc.bgpRRServer = true
} else if clusterid, ok := node.ObjectMeta.Annotations[rrClientAnnotation]; ok {
klog.Infof("Found rr.client for the node to be %s from the node annotation", clusterid)
_, err := strconv.ParseUint(clusterid, 0, routeReflectorMaxID)
if err != nil {
if ip := net.ParseIP(clusterid).To4(); ip == nil {
return errors.New("failed to parse rr.client clusterId specified for the node")
}
}
nrc.bgpClusterID = clusterid
nrc.bgpRRClient = true
}
if prependASN, okASN := node.ObjectMeta.Annotations[pathPrependASNAnnotation]; okASN {
prependRepeatN, okRepeatN := node.ObjectMeta.Annotations[pathPrependRepeatNAnnotation]
if !okRepeatN {
return fmt.Errorf("both %s and %s must be set", pathPrependASNAnnotation, pathPrependRepeatNAnnotation)
}
_, err := strconv.ParseUint(prependASN, 0, asnMaxBitSize)
if err != nil {
return errors.New("failed to parse ASN number specified to prepend")
}
repeatN, err := strconv.ParseUint(prependRepeatN, 0, prependPathMaxBits)
if err != nil {
return errors.New("failed to parse number of times ASN should be repeated")
}
nrc.pathPrepend = true
nrc.pathPrependAS = prependASN
nrc.pathPrependCount = uint8(repeatN)
}
var nodeCommunities []string
nodeBGPCommunitiesAnnotation, ok := node.ObjectMeta.Annotations[nodeCommunitiesAnnotation]
if !ok {
klog.V(1).Info("Did not find any BGP communities on current node's annotations. " +
"Not exporting communities.")
} else {
nodeCommunities = stringToSlice(nodeBGPCommunitiesAnnotation, ",")
for _, nodeCommunity := range nodeCommunities {
if err = validateCommunity(nodeCommunity); err != nil {
klog.Warningf("cannot add BGP community '%s' from node annotation as it does not appear "+
"to be a valid community identifier", nodeCommunity)
continue
}
klog.V(1).Infof("Adding the node community found from node annotation: %s", nodeCommunity)
nrc.nodeCommunities = append(nrc.nodeCommunities, nodeCommunity)
}
if len(nrc.nodeCommunities) < 1 {
klog.Warningf("Found a community specified via annotation %s with value %s but none could be "+
"validated", nodeCommunitiesAnnotation, nodeBGPCommunitiesAnnotation)
}
}
// Get Custom Import Reject CIDRs from annotations
nodeBGPCustomImportRejectAnnotation, ok := node.ObjectMeta.Annotations[nodeCustomImportRejectAnnotation]
if !ok {
klog.V(1).Info("Did not find any node.bgp.customimportreject on current node's annotations. " +
"Skip configuring it.")
} else {
ipNetStrings := stringToSlice(nodeBGPCustomImportRejectAnnotation, ",")
ipNets, err := stringSliceToIPNets(ipNetStrings)
if err != nil {
klog.Warningf("Failed to parse node.bgp.customimportreject specified for the node, skip configuring it")
} else {
nrc.nodeCustomImportRejectIPNets = ipNets
}
}
if grpcServer {
nrc.bgpServer = gobgp.NewBgpServer(
gobgp.GrpcListenAddress(nrc.nodeIP.String() + ":50051" + "," + "127.0.0.1:50051"))
} else {
nrc.bgpServer = gobgp.NewBgpServer()
}
go nrc.bgpServer.Serve()
var localAddressList []string
if ipv4IsEnabled() {
localAddressList = append(localAddressList, nrc.localAddressList...)
}
if ipv6IsEnabled() {
localAddressList = append(localAddressList, "::1")
}
global := &gobgpapi.Global{
As: nodeAsnNumber,
RouterId: nrc.routerID,
ListenAddresses: localAddressList,
ListenPort: int32(nrc.bgpPort),
}
if err := nrc.bgpServer.StartBgp(context.Background(), &gobgpapi.StartBgpRequest{Global: global}); err != nil {
return errors.New("failed to start BGP server due to : " + err.Error())
}
go nrc.watchBgpUpdates()
// If the global routing peer is configured then peer with it
// else attempt to get peers from node specific BGP annotations.
if len(nrc.globalPeerRouters) == 0 {
// Get Global Peer Router ASN configs
nodeBgpPeerAsnsAnnotation, ok := node.ObjectMeta.Annotations[peerASNAnnotation]
if !ok {
klog.Infof("Could not find BGP peer info for the node in the node annotations so " +
"skipping configuring peer.")
return nil
}
asnStrings := stringToSlice(nodeBgpPeerAsnsAnnotation, ",")
peerASNs, err := stringSliceToUInt32(asnStrings)
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to parse node's Peer ASN Numbers Annotation: %s", err)
}
// Get Global Peer Router IP Address configs
nodeBgpPeersAnnotation, ok := node.ObjectMeta.Annotations[peerIPAnnotation]
if !ok {
klog.Infof("Could not find BGP peer info for the node in the node annotations " +
"so skipping configuring peer.")
return nil
}
ipStrings := stringToSlice(nodeBgpPeersAnnotation, ",")
peerIPs, err := stringSliceToIPs(ipStrings)
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to parse node's Peer Addresses Annotation: %s", err)
}
// Get Global Peer Router ASN configs
nodeBgpPeerPortsAnnotation, ok := node.ObjectMeta.Annotations[peerPortAnnotation]
// Default to default BGP port if port annotation is not found
var peerPorts = make([]uint32, 0)
if ok {
portStrings := stringToSlice(nodeBgpPeerPortsAnnotation, ",")
peerPorts, err = stringSliceToUInt32(portStrings)
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to parse node's Peer Port Numbers Annotation: %s", err)
}
}
// Get Global Peer Router Password configs
var peerPasswords []string
nodeBGPPasswordsAnnotation, ok := node.ObjectMeta.Annotations[peerPasswordAnnotation]
if !ok {
klog.Infof("Could not find BGP peer password info in the node's annotations. Assuming no passwords.")
} else {
passStrings := stringToSlice(nodeBGPPasswordsAnnotation, ",")
peerPasswords, err = stringSliceB64Decode(passStrings)
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to parse node's Peer Passwords Annotation")
}
}
// Create and set Global Peer Router complete configs
nrc.globalPeerRouters, err = newGlobalPeers(peerIPs, peerPorts, peerASNs, peerPasswords, nrc.bgpHoldtime,
nrc.nodeIP.String())
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to process Global Peer Router configs: %s", err)
}
nrc.nodePeerRouters = ipStrings
}
if len(nrc.globalPeerRouters) != 0 {
err := connectToExternalBGPPeers(nrc.bgpServer, nrc.globalPeerRouters, nrc.bgpGracefulRestart,
nrc.bgpGracefulRestartDeferralTime, nrc.bgpGracefulRestartTime, nrc.peerMultihopTTL)
if err != nil {
err2 := nrc.bgpServer.StopBgp(context.Background(), &gobgpapi.StopBgpRequest{})
if err2 != nil {
klog.Errorf("Failed to stop bgpServer: %s", err2)
}
return fmt.Errorf("failed to peer with Global Peer Router(s): %s",
err)
}
} else {
klog.Infof("No Global Peer Routers configured. Peering skipped.")
}
return nil
}
// func (nrc *NetworkRoutingController) getExternalNodeIPs(
// NewNetworkRoutingController returns new NetworkRoutingController object
func NewNetworkRoutingController(clientset kubernetes.Interface,
kubeRouterConfig *options.KubeRouterConfig,
nodeInformer cache.SharedIndexInformer, svcInformer cache.SharedIndexInformer,
epInformer cache.SharedIndexInformer, ipsetMutex *sync.Mutex) (*NetworkRoutingController, error) {
var err error
nrc := NetworkRoutingController{ipsetMutex: ipsetMutex}
if kubeRouterConfig.MetricsEnabled {
// Register the metrics for this controller
prometheus.MustRegister(metrics.ControllerBGPadvertisementsReceived)
prometheus.MustRegister(metrics.ControllerBGPInternalPeersSyncTime)
prometheus.MustRegister(metrics.ControllerBPGpeers)
prometheus.MustRegister(metrics.ControllerRoutesSyncTime)
nrc.MetricsEnabled = true
}
nrc.bgpFullMeshMode = kubeRouterConfig.FullMeshMode
nrc.enableCNI = kubeRouterConfig.EnableCNI
nrc.bgpEnableInternal = kubeRouterConfig.EnableiBGP
nrc.bgpGracefulRestart = kubeRouterConfig.BGPGracefulRestart
nrc.bgpGracefulRestartDeferralTime = kubeRouterConfig.BGPGracefulRestartDeferralTime
nrc.bgpGracefulRestartTime = kubeRouterConfig.BGPGracefulRestartTime
nrc.peerMultihopTTL = kubeRouterConfig.PeerMultihopTTL
nrc.enablePodEgress = kubeRouterConfig.EnablePodEgress
nrc.syncPeriod = kubeRouterConfig.RoutesSyncPeriod
nrc.overrideNextHop = kubeRouterConfig.OverrideNextHop
nrc.clientset = clientset
nrc.activeNodes = make(map[string]bool)
nrc.bgpRRClient = false
nrc.bgpRRServer = false
nrc.bgpServerStarted = false
nrc.disableSrcDstCheck = kubeRouterConfig.DisableSrcDstCheck
nrc.initSrcDstCheckDone = false
nrc.routeSyncer = newRouteSyncer(kubeRouterConfig.InjectedRoutesSyncPeriod)
nrc.bgpHoldtime = kubeRouterConfig.BGPHoldTime.Seconds()
if nrc.bgpHoldtime > 65536 || nrc.bgpHoldtime < 3 {
return nil, errors.New("this is an incorrect BGP holdtime range, holdtime must be in the range " +
"3s to 18h12m16s")
}
nrc.hostnameOverride = kubeRouterConfig.HostnameOverride
node, err := utils.GetNodeObject(clientset, nrc.hostnameOverride)
if err != nil {
return nil, errors.New("failed getting node object from API server: " + err.Error())
}
nrc.nodeName = node.Name
nodeIP, err := utils.GetNodeIP(node)
if err != nil {
return nil, errors.New("failed getting IP address from node object: " + err.Error())
}
nrc.nodeIP = nodeIP
nrc.isIpv6 = nodeIP.To4() == nil
if kubeRouterConfig.RouterID != "" {
nrc.routerID = kubeRouterConfig.RouterID
} else {
if nrc.isIpv6 {
return nil, errors.New("router-id must be specified in ipv6 operation")
}
nrc.routerID = nrc.nodeIP.String()
}
// lets start with assumption we hace necessary IAM creds to access EC2 api
nrc.ec2IamAuthorized = true
if nrc.enableCNI {
nrc.cniConfFile = os.Getenv("KUBE_ROUTER_CNI_CONF_FILE")
if nrc.cniConfFile == "" {
nrc.cniConfFile = "/etc/cni/net.d/10-kuberouter.conf"
}
if _, err := os.Stat(nrc.cniConfFile); os.IsNotExist(err) {
return nil, errors.New("CNI conf file " + nrc.cniConfFile + " does not exist.")
}
}
cidr, err := utils.GetPodCidrFromNodeSpec(clientset, nrc.hostnameOverride)
if err != nil {
klog.Fatalf("Failed to get pod CIDR from node spec. kube-router relies on kube-controller-manager to "+
"allocate pod CIDR for the node or an annotation `kube-router.io/pod-cidr`. Error: %v", err)
return nil, fmt.Errorf("failed to get pod CIDR details from Node.spec: %s", err.Error())
}
nrc.podCidr = cidr
nrc.ipSetHandler, err = utils.NewIPSet(nrc.isIpv6)
if err != nil {
return nil, err
}
_, err = nrc.ipSetHandler.Create(podSubnetsIPSetName, utils.TypeHashNet, utils.OptionTimeout, "0")
if err != nil {
return nil, err
}
_, err = nrc.ipSetHandler.Create(nodeAddrsIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return nil, err
}
if kubeRouterConfig.EnablePodEgress || len(nrc.clusterCIDR) != 0 {
nrc.enablePodEgress = true
}
if kubeRouterConfig.ClusterAsn != 0 {
if !((kubeRouterConfig.ClusterAsn >= 64512 && kubeRouterConfig.ClusterAsn <= 65535) ||
(kubeRouterConfig.ClusterAsn >= 4200000000 && kubeRouterConfig.ClusterAsn <= 4294967294)) {
return nil, errors.New("invalid ASN number for cluster ASN")
}
nrc.defaultNodeAsnNumber = uint32(kubeRouterConfig.ClusterAsn)
} else {
nrc.defaultNodeAsnNumber = 64512 // this magic number is first of the private ASN range, use it as default
}
nrc.advertiseClusterIP = kubeRouterConfig.AdvertiseClusterIP
nrc.advertiseExternalIP = kubeRouterConfig.AdvertiseExternalIP
nrc.advertiseLoadBalancerIP = kubeRouterConfig.AdvertiseLoadBalancerIP
nrc.advertisePodCidr = kubeRouterConfig.AdvertiseNodePodCidr
nrc.autoMTU = kubeRouterConfig.AutoMTU
nrc.enableOverlays = kubeRouterConfig.EnableOverlay
nrc.overlayType = kubeRouterConfig.OverlayType
nrc.CNIFirewallSetup = sync.NewCond(&sync.Mutex{})
nrc.bgpPort = kubeRouterConfig.BGPPort
// Convert ints to uint32s
peerASNs := make([]uint32, 0)
for _, i := range kubeRouterConfig.PeerASNs {
peerASNs = append(peerASNs, uint32(i))
}
// Convert uints to uint16s
peerPorts := make([]uint32, 0)
for _, i := range kubeRouterConfig.PeerPorts {
peerPorts = append(peerPorts, uint32(i))
}
// PeerPasswords as cli params take precedence over password file
peerPasswords := make([]string, 0)
if len(kubeRouterConfig.PeerPasswords) != 0 {
peerPasswords, err = stringSliceB64Decode(kubeRouterConfig.PeerPasswords)
if err != nil {
return nil, fmt.Errorf("failed to parse CLI Peer Passwords flag: %s", err)
}
} else if len(kubeRouterConfig.PeerPasswordsFile) != 0 {
// Contents of the pw file should be in the same format as pw from CLI arg
pwFileBytes, err := ioutil.ReadFile(kubeRouterConfig.PeerPasswordsFile)
if err != nil {
return nil, fmt.Errorf("error loading Peer Passwords File : %s", err)
}
pws := strings.Split(string(pwFileBytes), ",")
peerPasswords, err = stringSliceB64Decode(pws)
if err != nil {
return nil, fmt.Errorf("failed to decode CLI Peer Passwords file: %s", err)
}
}
nrc.globalPeerRouters, err = newGlobalPeers(kubeRouterConfig.PeerRouters, peerPorts,
peerASNs, peerPasswords, nrc.bgpHoldtime, nrc.nodeIP.String())
if err != nil {
return nil, fmt.Errorf("error processing Global Peer Router configs: %s", err)
}
nrc.nodeSubnet, nrc.nodeInterface, err = getNodeSubnet(nodeIP)
if err != nil {
return nil, errors.New("failed find the subnet of the node IP and interface on" +
"which its configured: " + err.Error())
}
bgpLocalAddressListAnnotation, ok := node.ObjectMeta.Annotations[bgpLocalAddressAnnotation]
if !ok {
klog.Infof("Could not find annotation `kube-router.io/bgp-local-addresses` on node object so BGP "+
"will listen on node IP: %s address.", nrc.nodeIP.String())
nrc.localAddressList = append(nrc.localAddressList, nrc.nodeIP.String())
} else {
klog.Infof("Found annotation `kube-router.io/bgp-local-addresses` on node object so BGP will listen "+
"on local IP's: %s", bgpLocalAddressListAnnotation)
localAddresses := stringToSlice(bgpLocalAddressListAnnotation, ",")
for _, addr := range localAddresses {
ip := net.ParseIP(addr)
if ip == nil {
klog.Fatalf("Invalid IP address %s specified in `kube-router.io/bgp-local-addresses`.", addr)
}
}
nrc.localAddressList = append(nrc.localAddressList, localAddresses...)
}
nrc.svcLister = svcInformer.GetIndexer()
nrc.ServiceEventHandler = nrc.newServiceEventHandler()
nrc.epLister = epInformer.GetIndexer()
nrc.EndpointsEventHandler = nrc.newEndpointsEventHandler()
nrc.nodeLister = nodeInformer.GetIndexer()
nrc.NodeEventHandler = nrc.newNodeEventHandler()
return &nrc, nil
}
| [
"\"KUBE_ROUTER_CNI_CONF_FILE\""
]
| []
| [
"KUBE_ROUTER_CNI_CONF_FILE"
]
| [] | ["KUBE_ROUTER_CNI_CONF_FILE"] | go | 1 | 0 | |
dockerfiles/itest/itest/itest.py | import contextlib
import csv
import json
import os
import subprocess
import time
import urllib2
import socket
import kazoo.client
import pytest
CONTAINER_PREFIX = os.environ.get('CONTAINER_PREFIX', 'na')
ZOOKEEPER_CONNECT_STRING = CONTAINER_PREFIX + "zookeeper_1:2181"
# Authoritative data for tests
SERVICES = {
# HTTP service with a custom endpoint
'service_three.main': {
'host': 'servicethree_1',
'ip_address': socket.gethostbyname(CONTAINER_PREFIX + 'servicethree_1'),
'port': 1024,
'proxy_port': 20060,
'mode': 'http',
'healthcheck_uri': '/my_healthcheck_endpoint',
'discover': 'habitat',
'advertise': ['habitat', 'region'],
},
# HTTP service with a custom endpoint
'service_three.logging': {
'host': 'servicethree_1',
'ip_address': socket.gethostbyname(CONTAINER_PREFIX + 'servicethree_1'),
'port': 1024,
'proxy_port': 20050,
'mode': 'http',
'healthcheck_uri': '/my_healthcheck_endpoint',
'discover': 'habitat',
'advertise': ['habitat'],
},
# TCP service
'service_one.main': {
'host': 'serviceone_1',
'ip_address': socket.gethostbyname(CONTAINER_PREFIX + 'serviceone_1'),
'port': 1025,
'proxy_port': 20028,
'mode': 'tcp',
'discover': 'region',
'advertise': ['region'],
},
# HTTP service with a custom endpoint and chaos
'service_three_chaos.main': {
'host': 'servicethreechaos_1',
'ip_address': socket.gethostbyname(CONTAINER_PREFIX + 'servicethreechaos_1'),
'port': 1024,
'proxy_port': 20061,
'mode': 'http',
'healthcheck_uri': '/my_healthcheck_endpoint',
'chaos': True,
'discover': 'region',
'advertise': ['region'],
},
# HTTP with headers required for the healthcheck
'service_two.main': {
'host': 'servicetwo_1',
'ip_address': socket.gethostbyname(CONTAINER_PREFIX + 'servicetwo_1'),
'port': 1999,
'proxy_port': 20090,
'mode': 'http',
'discover': 'habitat',
'advertise': ['habitat'],
'healthcheck_uri': '/lil_brudder',
'extra_healthcheck_headers': {
'X-Mode': 'ro',
},
},
}
# How long Synapse gets to configure HAProxy on startup. This value is
# intentionally generous to avoid any build flakes.
SETUP_DELAY_S = 30
SOCKET_TIMEOUT = 10
SYNAPSE_ROOT_DIR = '/var/run/synapse'
SYNAPSE_TOOLS_CONFIGURATIONS = {
'haproxy': ['/etc/synapse/synapse-tools.conf.json'],
'nginx': [
'/etc/synapse/synapse-tools-both.conf.json',
'/etc/synapse/synapse-tools-nginx.conf.json',
]
}
YIELD_PARAMS = [
item for sublist in SYNAPSE_TOOLS_CONFIGURATIONS.values()
for item in sublist
]
MAP_FILE = '/var/run/synapse/maps/ip_to_service.map'
INITIAL_MAP_FILE_CONTENTS = ''
with open(MAP_FILE, 'r') as f:
INITIAL_MAP_FILE_CONTENTS = f.read()
def reset_map_file():
"""To avoid flakiness, reset the map file
before tests that depend on it.
"""
with open(MAP_FILE, 'w+') as f:
f.seek(0)
f.write(INITIAL_MAP_FILE_CONTENTS)
@pytest.yield_fixture(scope='class', params=YIELD_PARAMS)
def setup(request):
pre_setup = getattr(request.node._obj, "pre_setup", None)
if callable(pre_setup):
pre_setup()
try:
os.makedirs(SYNAPSE_ROOT_DIR)
except OSError:
# Path already exists
pass
zk = kazoo.client.KazooClient(hosts=ZOOKEEPER_CONNECT_STRING)
zk.start()
try:
# Fake out a nerve registration in Zookeeper for each service
for name, data in SERVICES.iteritems():
labels = dict(
('%s:my_%s' % (advertise_typ, advertise_typ), '')
for advertise_typ in data['advertise']
)
zk.create(
path=('/smartstack/global/%s/itesthost' % name),
value=(json.dumps({
'host': data['ip_address'],
'port': data['port'],
'name': data['host'],
'labels': labels,
})),
ephemeral=True,
sequence=True,
makepath=True,
)
# This is the tool that is installed by the synapse-tools package.
# Run it to generate a new synapse configuration file.
subprocess.check_call(
['configure_synapse'],
env=dict(
os.environ, SYNAPSE_TOOLS_CONFIG_PATH=request.param
)
)
# Normally configure_synapse would start up synapse using 'service synapse start'.
# However, this silently fails because we don't have an init process in our
# Docker container. So instead we manually start up synapse ourselves.
synapse_process = subprocess.Popen(
'synapse --config /etc/synapse/synapse.conf.json'.split(),
env={
'PATH': '/opt/rbenv/bin:' + os.environ['PATH'],
}
)
time.sleep(SETUP_DELAY_S)
try:
yield request.param
finally:
synapse_process.kill()
synapse_process.wait()
finally:
zk.stop()
def _sort_lists_in_dict(d):
for k in d:
if isinstance(d[k], dict):
d[k] = _sort_lists_in_dict(d[k])
elif isinstance(d[k], list):
d[k] = sorted(d[k])
return d
class TestGroupOne(object):
def test_haproxy_config_valid(self, setup):
subprocess.check_call(['haproxy-synapse', '-c', '-f', '/var/run/synapse/haproxy.cfg'])
def test_haproxy_synapse_reaper(self, setup):
# This should run with no errors. Everything is running as root, so we need
# to use the --username option here.
subprocess.check_call(['haproxy_synapse_reaper', '--username', 'root'])
def test_synapse_qdisc_tool(self, setup):
# Can't actually manipulate qdisc or iptables in a docker, so this
# is what we have for now
subprocess.check_call(['synapse_qdisc_tool', '--help'])
def test_generate_map(self, setup):
# generate_container_ip_map needs docker client but since this
# runs inside docker itself, we need to add one separately.
subprocess.check_call(['generate_container_ip_map', '--help'])
def test_synapse_services(self, setup):
expected_services = [
'service_three.main',
'service_three.main.region',
'service_one.main',
'service_three_chaos.main',
'service_two.main',
'service_three.logging',
]
with open('/etc/synapse/synapse.conf.json') as fd:
synapse_config = json.load(fd)
actual_services = synapse_config['services'].keys()
# nginx adds listener "services" which contain the proxy
# back to HAProxy sockets which actually do the load balancing
if setup in SYNAPSE_TOOLS_CONFIGURATIONS['nginx']:
nginx_services = [
'service_three_chaos.main.nginx_listener',
'service_one.main.nginx_listener',
'service_two.main.nginx_listener',
'service_three.main.nginx_listener',
'service_three.logging.nginx_listener',
]
expected_services.extend(nginx_services)
assert set(expected_services) == set(actual_services)
def test_http_synapse_service_config(self, setup):
expected_service_entry = {
'default_servers': [],
'use_previous_backends': False,
'discovery': {
'hosts': [ZOOKEEPER_CONNECT_STRING],
'method': 'zookeeper',
'path': '/smartstack/global/service_three.main',
'label_filters': [
{
'label': 'habitat:my_habitat',
'value': '',
'condition': 'equals',
},
],
}
}
with open('/etc/synapse/synapse.conf.json') as fd:
synapse_config = json.load(fd)
actual_service_entry = synapse_config['services'].get('service_three.main')
# Unit tests already test the contents of the haproxy and nginx sections
# itests operate at a higher level of abstraction and need not care about
# how exactly SmartStack achieves the goal of load balancing
# So, we just check that the sections are there, but not what's in them!
assert 'haproxy' in actual_service_entry
del actual_service_entry['haproxy']
if setup in SYNAPSE_TOOLS_CONFIGURATIONS['nginx']:
assert 'nginx' in actual_service_entry
del actual_service_entry['nginx']
actual_service_entry = _sort_lists_in_dict(actual_service_entry)
expected_service_entry = _sort_lists_in_dict(expected_service_entry)
assert expected_service_entry == actual_service_entry
def test_backup_http_synapse_service_config(self, setup):
expected_service_entry = {
'default_servers': [],
'use_previous_backends': False,
'discovery': {
'hosts': [ZOOKEEPER_CONNECT_STRING],
'method': 'zookeeper',
'path': '/smartstack/global/service_three.main',
'label_filters': [
{
'label': 'region:my_region',
'value': '',
'condition': 'equals',
},
],
}
}
with open('/etc/synapse/synapse.conf.json') as fd:
synapse_config = json.load(fd)
actual_service_entry = synapse_config['services'].get('service_three.main.region')
# Unit tests already test the contents of the haproxy and nginx sections
# itests operate at a higher level of abstraction and need not care about
# how exactly SmartStack achieves the goal of load balancing
# So, we just check that the sections are there, but not what's in them!
assert 'haproxy' in actual_service_entry
del actual_service_entry['haproxy']
if setup in SYNAPSE_TOOLS_CONFIGURATIONS['nginx']:
assert 'nginx' in actual_service_entry
del actual_service_entry['nginx']
actual_service_entry = _sort_lists_in_dict(actual_service_entry)
expected_service_entry = _sort_lists_in_dict(expected_service_entry)
assert expected_service_entry == actual_service_entry
def test_tcp_synapse_service_config(self, setup):
expected_service_entry = {
'default_servers': [],
'use_previous_backends': False,
'discovery': {
'hosts': [ZOOKEEPER_CONNECT_STRING],
'method': 'zookeeper',
'path': '/smartstack/global/service_one.main',
'label_filters': [
{
'label': 'region:my_region',
'value': '',
'condition': 'equals',
},
],
},
}
with open('/etc/synapse/synapse.conf.json') as fd:
synapse_config = json.load(fd)
actual_service_entry = synapse_config['services'].get('service_one.main')
# Unit tests already test the contents of the haproxy and nginx sections
# itests operate at a higher level of abstraction and need not care about
# how exactly SmartStack achieves the goal of load balancing
# So, we just check that the sections are there, but not what's in them!
assert 'haproxy' in actual_service_entry
del actual_service_entry['haproxy']
if setup in SYNAPSE_TOOLS_CONFIGURATIONS['nginx']:
assert 'nginx' in actual_service_entry
del actual_service_entry['nginx']
actual_service_entry = _sort_lists_in_dict(actual_service_entry)
expected_service_entry = _sort_lists_in_dict(expected_service_entry)
assert expected_service_entry == actual_service_entry
def test_hacheck(self, setup):
for name, data in SERVICES.iteritems():
# Just test our HTTP service
if data['mode'] != 'http':
continue
url = 'http://%s:6666/http/%s/0%s' % (
data['ip_address'], name, data['healthcheck_uri'])
headers = {
'X-Haproxy-Server-State':
'UP 2/3; host=srv2; port=%d; name=bck/srv2;'
'node=lb1; weight=1/2; scur=13/22; qcur=0' % data['port']
}
headers.update(data.get('extra_healthcheck_headers', {}))
request = urllib2.Request(url=url, headers=headers)
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
assert page.read().strip() == 'OK'
def test_synapse_haproxy_stats_page(self, setup):
haproxy_stats_uri = 'http://localhost:32123/;csv'
with contextlib.closing(
urllib2.urlopen(haproxy_stats_uri, timeout=SOCKET_TIMEOUT)) as haproxy_stats:
reader = csv.DictReader(haproxy_stats)
rows = [(row['# pxname'], row['svname'], row['check_status']) for row in reader]
for name, data in SERVICES.iteritems():
if 'chaos' in data:
continue
svname = '%s_%s:%d' % (data['host'], data['ip_address'], data['port'])
check_status = 'L7OK'
assert (name, svname, check_status) in rows
def test_http_service_is_accessible_using_haproxy(self, setup):
for name, data in SERVICES.iteritems():
if data['mode'] == 'http' and 'chaos' not in data:
uri = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
with contextlib.closing(urllib2.urlopen(uri, timeout=SOCKET_TIMEOUT)) as page:
assert page.read().strip() == 'OK'
def test_tcp_service_is_accessible_using_haproxy(self, setup):
for name, data in SERVICES.iteritems():
if data['mode'] == 'tcp':
s = socket.create_connection(
address=(data['ip_address'], data['port']),
timeout=SOCKET_TIMEOUT)
s.close()
def test_file_output(self, setup):
output_directory = os.path.join(SYNAPSE_ROOT_DIR, 'services')
for name, data in SERVICES.iteritems():
with open(os.path.join(output_directory, name + '.json')) as f:
service_data = json.load(f)
if 'chaos' in data:
assert len(service_data) == 0
continue
assert len(service_data) == 1
service_instance = service_data[0]
assert service_instance['name'] == data['host']
assert service_instance['port'] == data['port']
assert service_instance['host'] == data['ip_address']
def test_http_service_returns_503(self, setup):
data = SERVICES['service_three_chaos.main']
uri = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
with pytest.raises(urllib2.HTTPError) as excinfo:
with contextlib.closing(urllib2.urlopen(uri, timeout=SOCKET_TIMEOUT)):
assert False
assert excinfo.value.getcode() == 503
def test_logging_plugin(self, setup):
# Test plugins with only HAProxy
if 'nginx' not in setup and 'both' not in setup:
# Send mock requests
name = 'service_three.logging'
data = SERVICES[name]
url = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
self.send_requests(urls=[url])
# Check for requests in log file
log_file = '/var/log/haproxy.log'
expected = 'provenance Test service_three.logging'
self. check_plugin_logs(log_file, expected)
def test_source_required_plugin(self, setup):
# Test plugins with only HAProxy
if 'nginx' not in setup and 'both' not in setup:
name = 'service_two.main'
data = SERVICES[name]
url = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
# First, test with the service IP present in the map file
request = urllib2.Request(url=url, headers={'X-Smartstack-Origin': 'Spoof-Value'})
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
assert page.info().dict['x-smartstack-origin'] == 'Test'
# Helper for sending requests
def send_requests(self, urls, headers=None):
for url in urls:
request = urllib2.Request(url=url)
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
assert page.read().strip() == 'OK'
# Helper for checking requests logged by logging plugin
def check_plugin_logs(self, log_file, expected):
try:
with open(log_file) as f:
logs = f.readlines()
matching_logs = filter(lambda x: expected in x, logs)
assert len(matching_logs) >= 1
except IOError:
assert False
class TestGroupTwo(object):
@staticmethod
def pre_setup():
"""Remove the entry for 127.0.0.1
from the maps file to simulate a call
from an unknown service.
"""
reset_map_file()
map_file = '/var/run/synapse/maps/ip_to_service.map'
f = open(map_file, "r+")
lines = f.readlines()
f.seek(0)
for l in lines:
if not l.startswith('127.0.0.1'):
f.write(l)
f.truncate()
f.close()
def test_source_required_plugin_without_map_entry(self, setup):
# Test plugins with only HAProxy
if 'nginx' not in setup and 'both' not in setup:
name = 'service_two.main'
data = SERVICES[name]
url = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
# First, test with the service IP present in the map file
request = urllib2.Request(url=url, headers={'X-Smartstack-Origin': 'Spoof-Value'})
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
assert page.info().dict['x-smartstack-origin'] == '0'
def test_map_debug(self):
reset_map_file()
"""We want to make sure that the process
to update the map every 5 seconds. For this,
we will add an entry to the map and see if it
is reflected in 5s.
"""
test_ip = '169.254.255.254'
test_svc = 'new-service-just-added'
map_file = '/var/run/synapse/maps/ip_to_service.map'
f = open(map_file, 'a')
f.write('\n' + test_ip + ' ' + test_svc)
f.close()
time.sleep(5)
map_url = 'http://localhost:32124/'
request = urllib2.Request(url=map_url)
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
raw = page.read()
svc_map = json.loads(raw)
assert test_ip in svc_map
assert svc_map[test_ip] == test_svc
class TestGroupThree(object):
@staticmethod
def pre_setup():
"""Remove the map file to simulate what happens
on boxes (such as role::devbox) where the map file
is not generated at all.
"""
reset_map_file()
map_file = '/var/run/synapse/maps/ip_to_service.map'
if os.path.isfile(map_file):
os.remove(map_file)
def test_source_required_plugin_without_map_entry(self, setup):
# Test plugins with only HAProxy
if 'nginx' not in setup and 'both' not in setup:
name = 'service_two.main'
data = SERVICES[name]
url = 'http://localhost:%d%s' % (data['proxy_port'], data['healthcheck_uri'])
# First, test with the service IP present in the map file
request = urllib2.Request(url=url, headers={'X-Smartstack-Origin': 'Spoof-Value'})
with contextlib.closing(
urllib2.urlopen(request, timeout=SOCKET_TIMEOUT)) as page:
assert page.info().dict['x-smartstack-origin'] == '0' | []
| []
| [
"CONTAINER_PREFIX",
"PATH"
]
| [] | ["CONTAINER_PREFIX", "PATH"] | python | 2 | 0 | |
book/db_test.go | package book_test
import (
"database/sql"
"github.com/stretchr/testify/suite"
"github.com/testingallthethings/033-go-rest/book"
"github.com/testingallthethings/033-go-rest/rest"
"os"
"testing"
_ "github.com/lib/pq"
)
type DBRetrieverSuite struct {
suite.Suite
}
func TestDBRetrieverSuite(t *testing.T) {
suite.Run(t, new(DBRetrieverSuite))
}
var (
db *sql.DB
r book.DBRetriever
)
func (s *DBRetrieverSuite) SetupTest() {
db, _ = sql.Open("postgres", os.Getenv("DATABASE_URL"))
r = book.NewDBRetriever(db)
}
func (s *DBRetrieverSuite) TearDownTest() {
db.Close()
}
func (s *DBRetrieverSuite) TestRetrievingBookThatDoesNotExist() {
_, err := r.FindBookBy("123456789")
s.Equal(rest.ErrBookNotFound, err)
}
func (s *DBRetrieverSuite) TestRetrievingBookThatExists() {
db.Exec("INSERT INTO book (isbn, name, image, genre, year_published) VALUES ('987654321', 'Testing All The Things', 'testing.jpg', 'Computing', 2021)")
b, err := r.FindBookBy("987654321")
s.NoError(err)
book := rest.Book{
ISBN: "987654321",
Title: "Testing All The Things",
Image: "testing.jpg",
Genre: "Computing",
YearPublished: 2021,
}
s.Equal(book, b)
}
func (s *DBRetrieverSuite) TestWhenUnexpectedErrorRetrievingBook() {
db.Close()
_, err := r.FindBookBy("123456789")
s.Equal(book.ErrFailedToRetrieveBook, err)
}
| [
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | go | 1 | 0 | |
module4-acid-and-database-scalability-tradeoffs/rpg_mongo_queries.py | #rpg_mongo_queries.py
"""
use [MongoDB queries](https://docs.mongodb.com/manual/tutorial/query-documents/) to answer
the same questions as you did from the first module (when the RPG data was in
SQLite).
How many total Characters are there?
How many of each specific subclass?
How many total Items?
How many of the Items are weapons? How many are not?
How many Items does each character have? (Return first 20 rows)
How many Weapons does each character have? (Return first 20 rows)
On average, how many Items does each Character have?
On average, how many Weapons does each character have?
To complete the assignment you should write a file rpg_queries.py that imports sqlite3 and
programmatically executes and reports results for the above queries.
"""
#Imports
import pymongo
import os
from dotenv import load_dotenv
load_dotenv()
#Establish MongoDB connection
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
client = pymongo.MongoClient(f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority")
db = client.rpg_json # "test_database" or whatever you want to call it
collection = db.rpg_db
#1. How many characters are there?
character_count = collection.count_documents({'model': 'charactercreator.character'})
print(f"QUESTION 1: There are {character_count} characters.\n")
#2. How many of each specific subclass?
print("QUESTION 2:")
for subclass in ['cleric', 'fighter', 'mage', 'necromancer', 'thief']:
character_subclass_count = collection.count_documents({'model': 'charactercreator.'+subclass})
print(f"There are {character_subclass_count} {subclass}s as a subclass of characters.")
print('\n')
#3. How many total items?
item_count = collection.count_documents({'model':'armory.item'})
print(f"QUESTION 3: There are {item_count} items.\n")
#4. How many Items are weapons? How many are not?
weapon_count = collection.count_documents({'model':'armory.weapon'})
print("QUESTION 4:")
print(f"There are {weapon_count} weapons.")
print(f"{item_count - weapon_count} are not weapons.\n")
#5. How many items does each character have?(Return first 20 rows)
characters = collection.find({'model': 'charactercreator.character'})
print("QUESTION 5:")
for character in characters[:20]:
print(character['fields']['name'], len(character['fields']['inventory']))
print('\n')
#6. How many Weapons does each character have? (Return first 20 rows)
print("QUESTION 6:")
characters = collection.find({'model': 'charactercreator.character'})
weapons = collection.find({'model':'armory.weapon'})
weapon_pk = [weapon['pk'] for weapon in weapons ]
for character in characters[:20]:
inventory = character['fields']['inventory']
num_weapons = len([item for item in inventory if item in weapon_pk ])
print(character['fields']['name'], num_weapons)
print('\n')
#7. On average, how many Items does each Character have?
average_items = item_count/character_count
print(f"QUESTION 7: Each character has about {average_items:.2f} items.\n" )
#8. On average, how many weapons does each character have?
average_weapons = weapon_count/character_count
print(f"QUESTION 8: Each character has about {average_weapons:.2f} weapons.\n" ) | []
| []
| [
"MONGO_CLUSTER_NAME",
"MONGO_PASSWORD",
"MONGO_USER"
]
| [] | ["MONGO_CLUSTER_NAME", "MONGO_PASSWORD", "MONGO_USER"] | python | 3 | 0 | |
test/e2e/e2e_suite_test.go | package e2e_test
import (
"os"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
mongocli "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/cli/mongocli"
)
const (
EventuallyTimeout = 100 * time.Second
ConsistentlyTimeout = 1 * time.Second
PollingInterval = 10 * time.Second
)
var (
// default
Platform = "kind"
K8sVersion = "v1.17.17"
)
func TestE2e(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "E2E Suite")
}
var _ = BeforeSuite(func() {
GinkgoWriter.Write([]byte("==============================Before==============================\n"))
SetDefaultEventuallyTimeout(EventuallyTimeout)
SetDefaultEventuallyPollingInterval(PollingInterval)
SetDefaultConsistentlyDuration(ConsistentlyTimeout)
checkUpEnvironment()
GinkgoWriter.Write([]byte("========================End of Before==============================\n"))
})
// checkUpEnvironment initial check setup
func checkUpEnvironment() {
Platform = os.Getenv("K8S_PLATFORM")
K8sVersion = os.Getenv("K8S_VERSION")
Expect(os.Getenv("MCLI_ORG_ID")).ShouldNot(BeEmpty(), "Please, setup MCLI_ORG_ID environment variable")
Expect(os.Getenv("MCLI_PUBLIC_API_KEY")).ShouldNot(BeEmpty(), "Please, setup MCLI_PUBLIC_API_KEY environment variable")
Expect(os.Getenv("MCLI_PRIVATE_API_KEY")).ShouldNot(BeEmpty(), "Please, setup MCLI_PRIVATE_API_KEY environment variable")
Expect(os.Getenv("MCLI_OPS_MANAGER_URL")).ShouldNot(BeEmpty(), "Please, setup MCLI_OPS_MANAGER_URL environment variable")
mongocli.GetVersionOutput()
}
func checkUpAWSEnviroment() {
Expect(os.Getenv("AWS_ACCESS_KEY_ID")).ShouldNot(BeEmpty(), "Please, setup AWS_ACCESS_KEY_ID environment variable for test with AWS")
Expect(os.Getenv("AWS_SECRET_ACCESS_KEY")).ShouldNot(BeEmpty(), "Please, setup AWS_SECRET_ACCESS_KEY environment variable for test with AWS")
}
func checkUpAzureEnviroment() {
Expect(os.Getenv("AZURE_CLIENT_ID")).ShouldNot(BeEmpty(), "Please, setup AZURE_CLIENT_ID environment variable for test with Azure")
Expect(os.Getenv("AZURE_TENANT_ID")).ShouldNot(BeEmpty(), "Please, setup AZURE_TENANT_ID environment variable for test with Azure")
Expect(os.Getenv("AZURE_CLIENT_SECRET")).ShouldNot(BeEmpty(), "Please, setup AZURE_CLIENT_SECRET environment variable for test with Azure")
Expect(os.Getenv("AZURE_SUBSCRIPTION_ID")).ShouldNot(BeEmpty(), "Please, setup AZURE_SUBSCRIPTION_ID environment variable for test with Azure")
}
| [
"\"K8S_PLATFORM\"",
"\"K8S_VERSION\"",
"\"MCLI_ORG_ID\"",
"\"MCLI_PUBLIC_API_KEY\"",
"\"MCLI_PRIVATE_API_KEY\"",
"\"MCLI_OPS_MANAGER_URL\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AZURE_CLIENT_ID\"",
"\"AZURE_TENANT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_SUBSCRIPTION_ID\""
]
| []
| [
"MCLI_PUBLIC_API_KEY",
"AWS_SECRET_ACCESS_KEY",
"AZURE_CLIENT_ID",
"MCLI_ORG_ID",
"AZURE_TENANT_ID",
"AZURE_CLIENT_SECRET",
"AZURE_SUBSCRIPTION_ID",
"K8S_PLATFORM",
"AWS_ACCESS_KEY_ID",
"MCLI_OPS_MANAGER_URL",
"MCLI_PRIVATE_API_KEY",
"K8S_VERSION"
]
| [] | ["MCLI_PUBLIC_API_KEY", "AWS_SECRET_ACCESS_KEY", "AZURE_CLIENT_ID", "MCLI_ORG_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "K8S_PLATFORM", "AWS_ACCESS_KEY_ID", "MCLI_OPS_MANAGER_URL", "MCLI_PRIVATE_API_KEY", "K8S_VERSION"] | go | 12 | 0 | |
django_middleware_tutorial/wsgi.py | """
WSGI config for django_middleware_tutorial project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_middleware_tutorial.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
gr_scripts/uniflex_wifi_transceiver_n1.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Uniflex Wifi Transceiver
# Generated: Fri Mar 10 14:30:08 2017
##################################################
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
from wifi_phy_hier import wifi_phy_hier # grc-generated hier_block
import SimpleXMLRPCServer
import foo
import ieee802_11
import threading
import time
class uniflex_wifi_transceiver(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Uniflex Wifi Transceiver")
##################################################
# Variables
##################################################
self.usrp_addr = usrp_addr = "addr=192.168.10.2"
self.tx_gain = tx_gain = 0.75
self.src_mac = src_mac = [0x12, 0x34, 0x56, 0x78, 0x90, 0xab]
self.samp_rate = samp_rate = 5e6
self.rx_gain = rx_gain = 0.75
self.lo_offset = lo_offset = 0
self.freq = freq = 5890000000
self.encoding = encoding = 0
self.dst_mac = dst_mac = [0x30, 0x14, 0x4a, 0xe6, 0x46, 0xe4]
self.chan_est = chan_est = 0
self.bss_mac = bss_mac = [0x42, 0x42, 0x42, 0x42, 0x42, 0x42]
##################################################
# Blocks
##################################################
self.xmlrpc_server_0 = SimpleXMLRPCServer.SimpleXMLRPCServer(('localhost', 8080), allow_none=True)
self.xmlrpc_server_0.register_instance(self)
self.xmlrpc_server_0_thread = threading.Thread(target=self.xmlrpc_server_0.serve_forever)
self.xmlrpc_server_0_thread.daemon = True
self.xmlrpc_server_0_thread.start()
self.wifi_phy_hier_0 = wifi_phy_hier(
bandwidth=samp_rate,
chan_est=chan_est,
encoding=encoding,
frequency=freq,
sensitivity=0.56,
)
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join((usrp_addr, "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_source_0.set_center_freq(uhd.tune_request(freq, rf_freq = freq - lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
self.uhd_usrp_source_0.set_normalized_gain(rx_gain, 0)
self.uhd_usrp_sink_0 = uhd.usrp_sink(
",".join((usrp_addr, "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
'packet_len',
)
self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(freq, rf_freq = freq - lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
self.uhd_usrp_sink_0.set_normalized_gain(tx_gain, 0)
self.ieee802_11_parse_mac_0 = ieee802_11.parse_mac(False, True)
self.ieee802_11_mac_0 = ieee802_11.mac((src_mac), (dst_mac), (bss_mac))
self.ieee802_11_ether_encap_0 = ieee802_11.ether_encap(False)
self.foo_wireshark_connector_0 = foo.wireshark_connector(127, False)
self.foo_packet_pad2_0 = foo.packet_pad2(False, False, 0.001, 10000, 10000)
(self.foo_packet_pad2_0).set_min_output_buffer(100000)
self.blocks_tuntap_pdu_0 = blocks.tuntap_pdu('tap0', 440, False)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((0.6, ))
(self.blocks_multiply_const_vxx_0).set_min_output_buffer(100000)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_char*1, '/tmp/wifi.pcap', True)
self.blocks_file_sink_0.set_unbuffered(True)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_tuntap_pdu_0, 'pdus'), (self.ieee802_11_ether_encap_0, 'from tap'))
self.msg_connect((self.ieee802_11_ether_encap_0, 'to tap'), (self.blocks_tuntap_pdu_0, 'pdus'))
self.msg_connect((self.ieee802_11_ether_encap_0, 'to wifi'), (self.ieee802_11_mac_0, 'app in'))
self.msg_connect((self.ieee802_11_mac_0, 'phy out'), (self.wifi_phy_hier_0, 'mac_in'))
self.msg_connect((self.wifi_phy_hier_0, 'mac_out'), (self.foo_wireshark_connector_0, 'in'))
self.msg_connect((self.wifi_phy_hier_0, 'mac_out'), (self.ieee802_11_ether_encap_0, 'from wifi'))
self.msg_connect((self.wifi_phy_hier_0, 'mac_out'), (self.ieee802_11_mac_0, 'phy in'))
self.msg_connect((self.wifi_phy_hier_0, 'mac_out'), (self.ieee802_11_parse_mac_0, 'in'))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.foo_packet_pad2_0, 0))
self.connect((self.foo_packet_pad2_0, 0), (self.uhd_usrp_sink_0, 0))
self.connect((self.foo_wireshark_connector_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.wifi_phy_hier_0, 0))
self.connect((self.wifi_phy_hier_0, 0), (self.blocks_multiply_const_vxx_0, 0))
def get_usrp_addr(self):
return self.usrp_addr
def set_usrp_addr(self, usrp_addr):
self.usrp_addr = usrp_addr
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
self.uhd_usrp_sink_0.set_normalized_gain(self.tx_gain, 0)
def get_src_mac(self):
return self.src_mac
def set_src_mac(self, src_mac):
self.src_mac = src_mac
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wifi_phy_hier_0.set_bandwidth(self.samp_rate)
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
self.uhd_usrp_source_0.set_normalized_gain(self.rx_gain, 0)
def get_lo_offset(self):
return self.lo_offset
def set_lo_offset(self, lo_offset):
self.lo_offset = lo_offset
self.uhd_usrp_source_0.set_center_freq(uhd.tune_request(self.freq, rf_freq = self.freq - self.lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.freq, rf_freq = self.freq - self.lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.wifi_phy_hier_0.set_frequency(self.freq)
self.uhd_usrp_source_0.set_center_freq(uhd.tune_request(self.freq, rf_freq = self.freq - self.lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.freq, rf_freq = self.freq - self.lo_offset, rf_freq_policy=uhd.tune_request.POLICY_MANUAL), 0)
def get_encoding(self):
return self.encoding
def set_encoding(self, encoding):
self.encoding = encoding
self.wifi_phy_hier_0.set_encoding(self.encoding)
def get_dst_mac(self):
return self.dst_mac
def set_dst_mac(self, dst_mac):
self.dst_mac = dst_mac
def get_chan_est(self):
return self.chan_est
def set_chan_est(self, chan_est):
self.chan_est = chan_est
self.wifi_phy_hier_0.set_chan_est(self.chan_est)
def get_bss_mac(self):
return self.bss_mac
def set_bss_mac(self, bss_mac):
self.bss_mac = bss_mac
def main(top_block_cls=uniflex_wifi_transceiver, options=None):
tb = top_block_cls()
tb.start()
try:
raw_input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait()
if __name__ == '__main__':
main()
| []
| []
| [
"GRC_HIER_PATH"
]
| [] | ["GRC_HIER_PATH"] | python | 1 | 0 | |
backend/classes/jwt_authenticator.py | import os
from fastapi import HTTPException, status, Request
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
from datetime import datetime, timedelta
from jose import jwt, JWTError
from ..repositories import user_repository
class JWTAuthenticator:
__SECRET_KEY: str
__ALGORITHM: str
__ACCESS_TOKEN_EXPIRE_MINUTES: int
__pwd_context: CryptContext
def __init__(self):
self.__SECRET_KEY = os.getenv('JWT_SECRET_KEY')
self.__ALGORITHM = os.getenv('JWT_ALGORITHM')
self.__ACCESS_TOKEN_EXPIRE_MINUTES = int(os.getenv('JWT_EXPIRE_MINUTES'))
self.__pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')
# Generate a hash
def generate_hash(self, password):
return self.__pwd_context.hash(password)
# Verify a password
def verify_password(self, plain_password, hashed_password):
return self.__pwd_context.verify(plain_password, hashed_password)
# Create an access token
def create_access_token(self, data: dict):
to_encode = data.copy()
expire = datetime.utcnow() + timedelta(minutes=self.__ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({'exp': expire})
encoded_jwt = jwt.encode(to_encode, self.__SECRET_KEY, algorithm=self.__ALGORITHM)
return encoded_jwt
# Get a current user from http headers
async def get_current_user(self, request: Request, raise_error: bool = True):
if not 'authorization' in request.headers.keys():
raise HTTPException(status_code=401, detail='Unauthorized')
# get a token
authorization = request.headers['Authorization']
if not authorization.startswith('Bearer ') and raise_error == True:
raise HTTPException(status_code=401, detail='Unauthorized')
token = authorization.replace('Bearer ', '')
if token == 'null':
return dict({'id': 0})
# get a user
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Could not validate credentials',
headers={'WWW-Authenticate': 'Bearer'},
)
try:
payload = jwt.decode(token, self.__SECRET_KEY, algorithms=[self.__ALGORITHM])
username: str = payload.get('username')
if username is None:
raise credentials_exception
except JWTError:
raise credentials_exception
user = await user_repository.select_by_username(username=username)
if user is None:
raise credentials_exception
return user
| []
| []
| [
"JWT_ALGORITHM",
"JWT_EXPIRE_MINUTES",
"JWT_SECRET_KEY"
]
| [] | ["JWT_ALGORITHM", "JWT_EXPIRE_MINUTES", "JWT_SECRET_KEY"] | python | 3 | 0 | |
cmd/symbols/internal/pkg/ctags/parser.go | package ctags
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/pkg/env"
)
type Entry struct {
Name string
Path string
Line int
Kind string
Language string
Parent string
ParentKind string
Pattern string
Signature string
FileLimited bool
}
const debug = false
var logErrors = os.Getenv("DEPLOY_TYPE") == "dev"
type Parser interface {
Parse(path string, content []byte) ([]Entry, error)
Close()
}
func isCommandAvailable(name string) bool {
cmd := exec.Command("/bin/sh", "-c", "command -v "+name)
if err := cmd.Run(); err != nil {
return false
}
return true
}
var ctagsCommand = env.Get("CTAGS_COMMAND", "universal-ctags", "ctags command (should point to universal-ctags executable compiled with JSON and seccomp support)")
// GetCommand returns the ctags command from the CTAGS_COMMAND environment
// variable, falling back to `universal-ctags`. Panics if the command doesn't
// exist.
func GetCommand() string {
if !isCommandAvailable(ctagsCommand) {
panic(fmt.Errorf("ctags command %s not found", ctagsCommand))
}
return ctagsCommand
}
func NewParser(ctagsCommand string) (Parser, error) {
opt := "default"
// TODO(sqs): Figure out why running with --_interactive=sandbox causes `Bad system call` inside Docker, and
// reenable it.
//
// if runtime.GOOS == "linux" {
// opt = "sandbox"
// }
cmd := exec.Command(ctagsCommand, "--_interactive="+opt, "--fields=*",
"--languages=Basic,C,C#,C++,Clojure,Cobol,CSS,CUDA,D,Elixir,elm,Erlang,Go,haskell,Java,JavaScript,kotlin,Lisp,Lua,MatLab,ObjectiveC,OCaml,Perl,Perl6,PHP,Protobuf,Python,R,Ruby,Rust,scala,Scheme,Sh,swift,Tcl,typescript,tsx,Verilog,Vim",
"--map-CSS=+.scss", "--map-CSS=+.less", "--map-CSS=+.sass",
)
in, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
out, err := cmd.StdoutPipe()
if err != nil {
in.Close()
return nil, err
}
cmd.Stderr = os.Stderr
proc := ctagsProcess{
cmd: cmd,
in: in,
out: bufio.NewScanner(out),
outPipe: out,
}
if err := cmd.Start(); err != nil {
return nil, err
}
var init reply
if err := proc.read(&init); err != nil {
proc.Close()
return nil, err
}
if init.Typ == "error" {
proc.Close()
return nil, errors.Errorf("starting %s failed with: %s", ctagsCommand, init.Message)
}
return &proc, nil
}
type ctagsProcess struct {
cmd *exec.Cmd
in io.WriteCloser
out *bufio.Scanner
outPipe io.ReadCloser
}
func (p *ctagsProcess) Close() {
p.cmd.Process.Kill()
p.outPipe.Close()
p.in.Close()
}
func (p *ctagsProcess) read(rep *reply) error {
if !p.out.Scan() {
err := p.out.Err()
if err == nil {
// p.out.Err() returns nil if the Scanner hit EOF,
// but EOF is unexpected and means the process is bad and needs to be cleaned up
err = errors.New("unexpected EOF from ctags")
}
return err
}
if debug {
log.Printf("read %s", p.out.Text())
}
// See https://github.com/universal-ctags/ctags/issues/1493
if bytes.Equal([]byte("(null)"), p.out.Bytes()) {
return nil
}
err := json.Unmarshal(p.out.Bytes(), rep)
if err != nil {
return fmt.Errorf("unmarshal(%s): %v", p.out.Text(), err)
}
return nil
}
func (p *ctagsProcess) post(req *request, content []byte) error {
body, err := json.Marshal(req)
if err != nil {
return err
}
body = append(body, '\n')
if debug {
log.Printf("post %q", body)
}
if _, err = p.in.Write(body); err != nil {
return err
}
_, err = p.in.Write(content)
if debug {
log.Println(string(content))
}
return err
}
type request struct {
Command string `json:"command"`
Filename string `json:"filename"`
Size int `json:"size"`
}
type reply struct {
// Init
Typ string `json:"_type"`
Name string `json:"name"`
Version string `json:"version"`
// completed
Command string `json:"command"`
Path string `json:"path"`
Language string `json:"language"`
Line int `json:"line"`
Kind string `json:"kind"`
End int `json:"end"`
Scope string `json:"scope"`
ScopeKind string `json:"scopeKind"`
Access string `json:"access"`
File bool `json:"file"`
Signature string `json:"signature"`
Pattern string `json:"pattern"`
// error
Message string `json:"message"`
}
func (p *ctagsProcess) Parse(name string, content []byte) (entries []Entry, err error) {
req := request{
Command: "generate-tags",
Size: len(content),
Filename: name,
}
if err := p.post(&req, content); err != nil {
return nil, err
}
entries = make([]Entry, 0, 250)
for {
var rep reply
if err := p.read(&rep); err != nil {
return nil, err
}
if rep.Typ == "error" && logErrors {
log.Printf("error parsing file %s: %s", name, rep.Message)
}
if rep.Typ == "completed" {
break
}
entries = append(entries, Entry{
Name: rep.Name,
Path: rep.Path,
Line: rep.Line,
Kind: rep.Kind,
Language: rep.Language,
Parent: rep.Scope,
ParentKind: rep.ScopeKind,
Pattern: rep.Pattern,
Signature: rep.Signature,
FileLimited: rep.File,
})
}
return entries, nil
}
| [
"\"DEPLOY_TYPE\""
]
| []
| [
"DEPLOY_TYPE"
]
| [] | ["DEPLOY_TYPE"] | go | 1 | 0 | |
lib/galaxy/model/migrate/versions/0027_request_events.py | """
This migration script adds the request_event table and
removes the state field in the request table
"""
from __future__ import print_function
import datetime
import logging
import sys
from sqlalchemy import Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT
from sqlalchemy.exc import NoSuchTableError
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import TrimmedString
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def display_migration_details():
print("========================================")
print("This migration script adds the request_event table and")
print("removes the state field in the request table")
print("========================================")
RequestEvent_table = Table('request_event', metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "request_id", Integer, ForeignKey( "request.id" ), index=True ),
Column( "state", TrimmedString( 255 ), index=True ),
Column( "comment", TEXT ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
display_migration_details()
def localtimestamp():
if migrate_engine.name in ['mysql', 'postgres', 'postgresql']:
return "LOCALTIMESTAMP"
elif migrate_engine.name == 'sqlite':
return "current_date || ' ' || current_time"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
def nextval( table, col='id' ):
if migrate_engine.name in ['postgres', 'postgresql']:
return "nextval('%s_%s_seq')" % ( table, col )
elif migrate_engine.name in ['mysql', 'sqlite']:
return "null"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
# Load existing tables
metadata.reflect()
# Add new request_event table
try:
RequestEvent_table.create()
except Exception as e:
log.debug( "Creating request_event table failed: %s" % str( e ) )
# move the current state of all existing requests to the request_event table
cmd = \
"INSERT INTO request_event " + \
"SELECT %s AS id," + \
"%s AS create_time," + \
"%s AS update_time," + \
"request.id AS request_id," + \
"request.state AS state," + \
"'%s' AS comment " + \
"FROM request;"
cmd = cmd % ( nextval('request_event'), localtimestamp(), localtimestamp(), 'Imported from request table')
migrate_engine.execute( cmd )
if migrate_engine.name != 'sqlite':
# Delete the state column
try:
Request_table = Table( "request", metadata, autoload=True )
except NoSuchTableError:
Request_table = None
log.debug( "Failed loading table request" )
if Request_table is not None:
try:
Request_table.c.state.drop()
except Exception as e:
log.debug( "Deleting column 'state' to request table failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
| []
| []
| []
| [] | [] | python | null | null | null |
src/files/AppDir.java | package files;
import java.io.File;
public class AppDir {
private static File APP_DIR = null;
static File getAppDir(String appName) {
if(APP_DIR != null) {
return APP_DIR;
}
String os = System.getProperty("os.name").toLowerCase();
if(os.equals("linux")) {
APP_DIR = new File(new File(System.getProperty("user.home"), ".config"), appName);
} else if(os.startsWith("windows")) {
APP_DIR = new File(System.getenv("APPDATA"), appName);
} else {
APP_DIR = new File(System.getProperty("user.dir"), appName);
}
if(!APP_DIR.exists()) {
APP_DIR.mkdir();
}
return APP_DIR;
}
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | java | 1 | 0 | |
horizon/utils/functions.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from django.conf import settings
from django.contrib.auth import logout # noqa
from django import http
from django.utils.encoding import force_unicode
from django.utils.functional import lazy # noqa
from django.utils import translation
def _lazy_join(separator, strings):
return separator.join([force_unicode(s)
for s in strings])
lazy_join = lazy(_lazy_join, unicode)
def bytes_to_gigabytes(bytes):
# Converts the number of bytes to the next highest number of Gigabytes
# For example 5000000 (5 Meg) would return '1'
return int(math.ceil(float(bytes) / 1024 ** 3))
def add_logout_reason(request, response, reason):
# Store the translated string in the cookie
lang = translation.get_language_from_request(request)
with translation.override(lang):
reason = unicode(reason).encode('utf-8')
response.set_cookie('logout_reason', reason, max_age=30)
def logout_with_message(request, msg):
"""Send HttpResponseRedirect to LOGOUT_URL.
`msg` is a message displayed on the login page after the logout, to explain
the logout reason.
"""
logout(request)
response = http.HttpResponseRedirect(
'%s?next=%s' % (settings.LOGOUT_URL, request.path))
add_logout_reason(request, response, msg)
return response
def get_page_size(request, default=20):
session = request.session
cookies = request.COOKIES
return int(session.get('horizon_pagesize',
cookies.get('horizon_pagesize',
getattr(settings,
'API_RESULT_PAGE_SIZE',
default))))
| []
| []
| []
| [] | [] | python | null | null | null |
cxgo/main.go | package main
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
cxcore "github.com/skycoin/cx/cx"
"github.com/skycoin/cx/cxgo/actions"
api2 "github.com/skycoin/cx/cxgo/api"
"github.com/skycoin/cx/cxgo/cxgo"
"github.com/skycoin/cx/cxgo/cxgo0"
"github.com/skycoin/cx/cxgo/parser"
"github.com/skycoin/skycoin/src/util/logging"
)
const VERSION = "0.7.1"
var (
logger = logging.MustGetLogger("newcoin")
apiClient = &http.Client{Timeout: 10 * time.Second}
genesisBlockURL = "http://127.0.0.1:%d/api/v1/block?seq=0"
profile *os.File
)
var (
// ErrMissingProjectRoot is returned when the project root parameter is missing
ErrMissingProjectRoot = errors.New("missing project root")
// ErrMissingSecretKey is returned when genesis secret is missing when distributing coins
ErrMissingSecretKey = errors.New("missing genesis secret key")
genesisSignature = ""
)
func getJSON(url string, target interface{}) error {
r, err := apiClient.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
// func initCXBlockchain(initPrgrm []byte, coinname, seckey string) error {
// var err error
//
// // check that data.db does not exist
// // if it does, delete it
// userHome := actions.UserHome()
// dbPath := filepath.Join(userHome, "."+coinname, "data.db")
// if _, err := cxcore.CXStatFile(dbPath); err == nil {
// logger.Infof("deleting %s", dbPath)
// err = cxcore.CXRemoveFile(dbPath)
// if err != nil {
// return err
// }
// }
//
// if seckey == "" {
// return ErrMissingSecretKey
// }
//
// genesisSecKey, err := cipher.SecKeyFromHex(seckey)
// if err != nil {
// return err
// }
//
// configDir := os.Getenv("GOPATH") + "/src/github.com/skycoin/cx/"
// configFile := "fiber.toml"
// configFilepath := filepath.Join(configDir, configFile)
// // check that the config file exists
// if _, err := cxcore.CXStatFile(configFilepath); os.IsNotExist(err) {
// return err
// }
//
// projectRoot := os.Getenv("GOPATH") + "/src/github.com/skycoin/cx"
// if projectRoot == "" {
// return ErrMissingProjectRoot
// }
// if _, err := cxcore.CXStatFile(projectRoot); os.IsNotExist(err) {
// return err
// }
//
// coinFile := filepath.Join(projectRoot, fmt.Sprintf("cmd/%[1]s/%[1]s.go", coinname))
// if _, err := cxcore.CXStatFile(coinFile); os.IsNotExist(err) {
// return err
// }
//
// // get fiber params
// params, err := fiber.NewConfig(configFile, configDir)
//
// cmd := exec.Command("go", "run", filepath.Join(projectRoot, fmt.Sprintf("cmd/%[1]s/%[1]s.go", coinname)), "-block-publisher=true", fmt.Sprintf("-blockchain-secret-key=%s", seckey),
// "-disable-incoming", "-max-out-msg-len=134217929")
//
// var genesisSig string
// var genesisBlock readable.Block
//
// stdoutIn, _ := cmd.StdoutPipe()
// stderrIn, _ := cmd.StderrPipe()
// cmd.Start()
//
// // fetch genesisSig and genesisBlock
// go func() {
// defer cmd.Process.Kill()
//
// genesisSigRegex, err := regexp.Compile(`Genesis block signature=([0-9a-zA-Z]+)`)
// if err != nil {
// logger.Error("error in regexp for genesis block signature")
// logger.Error(err)
// return
// }
//
// scanner := bufio.NewScanner(stdoutIn)
// scanner.Split(bufio.ScanLines)
// for scanner.Scan() {
//
// m := scanner.Text()
// logger.Info("Scanner: " + m)
// if genesisSigRegex.MatchString(m) {
// genesisSigSubString := genesisSigRegex.FindStringSubmatch(m)
// genesisSig = genesisSigSubString[1]
//
// // get genesis block
// err = getJSON(fmt.Sprintf(genesisBlockURL, params.Node.WebInterfacePort), &genesisBlock)
//
// return
// }
// }
// }()
//
// go func() {
// scanner := bufio.NewScanner(stderrIn)
// scanner.Split(bufio.ScanLines)
// for scanner.Scan() {
// logger.Error(scanner.Text())
// }
// }()
//
// cmd.Wait()
//
// // check that we were able to get genesisSig and genesisUxID
//
// if genesisSig != "" && len(genesisBlock.Body.Transactions) != 0 {
// genesisSignature = genesisSig
// logger.Infof("genesis sig: %s", genesisSig)
//
// // -- create new skycoin daemon to inject distribution transaction -- //
// if err != nil {
// logger.Error("error getting fiber parameters")
// return err
// }
//
// // get node config
// params.Node.DataDirectory = fmt.Sprintf("$HOME/.%s", coinname)
// nodeConfig := skycoin.NewNodeConfig("", params.Node)
//
// // create a new fiber coin instance
// newcoin := skycoin.NewCoin(
// skycoin.Config{
// Node: nodeConfig,
// },
// logger,
// )
//
// // parse config values
// newcoin.ParseConfig(flag.CommandLine)
//
// // dconf := newcoin.ConfigureDaemon()
// vconf := newcoin.ConfigureVisor()
//
// userHome := actions.UserHome()
// dbPath := filepath.Join(userHome, "."+coinname, "data.db")
//
// // logger.Infof("opening visor db: %s", dconf.Visor.DBPath)
// logger.Infof("opening visor db: %s", dbPath)
// db, err := visor.OpenDB(dbPath, false)
// if err != nil {
// logger.Error("Error opening DB")
// return err
// }
// defer db.Close()
//
// vs, err := visor.New(vconf, db, nil)
// if err != nil {
// logger.Error("Error with NewVisor")
// return err
// }
//
// headSeq, _, err := vs.HeadBkSeq()
// if err != nil {
// logger.Error("Error with HeadBkSeq")
// return err
// } else if headSeq == 0 {
// if len(genesisBlock.Body.Transactions) != 0 {
// var tx coin.Transaction
//
// UxID := genesisBlock.Body.Transactions[0].Out[0].Hash
// output := cipher.MustSHA256FromHex(UxID)
// tx.PushInput(output)
//
// addr := cipher.MustDecodeBase58Address("TkyD4wD64UE6M5BkNQA17zaf7Xcg4AufwX")
// tx.PushOutput(addr, uint64(1e10), 10000, initPrgrm)
//
// seckeys := make([]cipher.SecKey, 1)
// seckey := genesisSecKey.Hex()
// seckeys[0] = cipher.MustSecKeyFromHex(seckey)
// tx.SignInputs(seckeys)
//
// tx.UpdateHeader()
// err = tx.Verify()
//
// if err != nil {
// logger.Panic(err)
// }
//
// _, _, _, err := vs.InjectUserTransaction(tx)
// if err != nil {
// panic(err)
// }
// } else {
// logger.Error("ERROR: len genesis block was zero")
// }
// } else {
// logger.Error("ERROR: headSeq not zero")
// }
// } else {
// logger.Error("error getting genesis block")
// }
// return err
// }
func runNode(mode string, options cxCmdFlags) *exec.Cmd {
switch mode {
case "publisher":
return exec.Command("cxcoin", "-enable-all-api-sets",
"-block-publisher=true",
"-localhost-only",
"-disable-default-peers",
"-custom-peers-file=localhost-peers.txt",
"-download-peerlist=false",
"-launch-browser=false",
fmt.Sprintf("-blockchain-secret-key=%s", options.secKey),
fmt.Sprintf("-genesis-address=%s", options.genesisAddress),
fmt.Sprintf("-genesis-signature=%s", options.genesisSignature),
fmt.Sprintf("-blockchain-public-key=%s", options.pubKey),
"-max-txn-size-unconfirmed=134217728",
"-max-txn-size-create-block=134217728",
"-max-block-size=134217728",
"-max-in-msg-len=134217929",
"-max-out-msg-len=134217929", // I don't know why this value, but the logger stated a value >= than this is needed
)
case "peer":
return exec.Command("cxcoin", "-enable-all-api-sets",
"-localhost-only",
"-disable-default-peers",
"-custom-peers-file=localhost-peers.txt",
"-download-peerlist=false",
"-launch-browser=false",
fmt.Sprintf("-genesis-address=%s", options.genesisAddress),
fmt.Sprintf("-genesis-signature=%s", options.genesisSignature),
fmt.Sprintf("-blockchain-public-key=%s", options.pubKey),
// "-web-interface-port=$(expr $2 + 420)",
fmt.Sprintf("-web-interface-port=%d", options.port+420),
fmt.Sprintf("-port=%d", options.port),
fmt.Sprintf("-data-dir=/tmp/%d", options.port),
"-max-txn-size-unconfirmed=134217728",
"-max-txn-size-create-block=134217728",
"-max-block-size=134217728",
"-max-in-msg-len=134217929",
"-max-out-msg-len=134217929", // I don't know why this value, but the logger stated a value >= than this is needed
)
default:
return nil
}
}
// optionTokenize checks if the user wants to use CX to generate the lexer tokens
func optionTokenize(options cxCmdFlags, fileNames []string) {
var r *os.File
var w *os.File
var err error
if len(fileNames) == 0 {
r = os.Stdin
} else {
sourceFilename := fileNames[0]
if len(fileNames) > 1 {
fmt.Fprintln(os.Stderr, "Multiple source files detected. Ignoring all except", sourceFilename)
}
r, err = cxcore.CXOpenFile(sourceFilename)
if err != nil {
fmt.Fprintln(os.Stderr, "Error reading:", sourceFilename, err)
return
}
defer r.Close()
}
if options.compileOutput == "" {
w = os.Stdout
} else {
tokenFilename := options.compileOutput
w, err = cxcore.CXCreateFile(tokenFilename)
if err != nil {
fmt.Fprintln(os.Stderr, "Error writing:", tokenFilename, err)
return
}
defer w.Close()
}
parser.Tokenize(r, w)
}
// optionGenWallet checks if the user wants to use CX to create a new wallet. If
// this is the case, a wallet is generated for a peer node.
// func optionGenWallet(options cxCmdFlags) {
// if options.walletSeed == "" {
// fmt.Println("creating a wallet requires a seed provided with --wallet-seed")
// return
// }
// if options.walletId == "" {
// // Although there is a default walletId.
// // This error should only occur if the user intentionally provides an empty id.
// fmt.Println("creating a wallet requires an id provided with --wallet-id")
// return
// }
//
// wltOpts := wallet.Options{
// Label: "cxcoin",
// Seed: options.walletSeed,
// }
//
// wlt, err := cli.GenerateWallet(options.walletId, wltOpts, 1)
// if err != nil {
// panic(err)
// }
// // To Do: This needs to be changed or any CX chains will constantly be destroyed after each reboot.
// err = wlt.Save("/tmp/6001/wallets/")
// if err != nil {
// panic(err)
// }
//
// wltJSON, err := json.MarshalIndent(wlt.Meta, "", "\t")
// if err != nil {
// panic(err)
// }
//
// // Printing JSON with wallet information
// fmt.Println(string(wltJSON))
// }
// optionGenAddress checks if the user wants to use CX to generate a new wallet
// address. If this is the case, CX prints the wallet information to standard
// output.
// func optionGenAddress(options cxCmdFlags) {
// // Create a random seed to create a temporary wallet.
// seed := cli.MakeAlphanumericSeed()
// wltOpts := wallet.Options{
// Label: "cxcoin",
// Seed: seed,
// }
//
// // Generate temporary wallet.
// wlt, err := cli.GenerateWallet(wallet.NewWalletFilename(), wltOpts, 1)
// if err != nil {
// panic(err)
// }
//
// rw := wallet.NewReadableWallet(wlt)
//
// output, err := json.MarshalIndent(rw, "", " ")
// if err != nil {
// panic(err)
// }
//
// // Print all the wallet data.
// fmt.Println(string(output))
// }
// optionRunNode checks if the user wants to run an `options.publisherMode` or
// `options.peerMode` node for a CX chain. If it's the case, either a publisher
// or a peer node
func optionRunNode(options cxCmdFlags) {
var cmd *exec.Cmd
if options.publisherMode {
cmd = runNode("publisher", options)
} else if options.peerMode {
cmd = runNode("peer", options)
}
stdoutIn, _ := cmd.StdoutPipe()
stderrIn, _ := cmd.StderrPipe()
cmd.Start()
go func() {
defer cmd.Process.Kill()
scanner := bufio.NewScanner(stdoutIn)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
m := scanner.Text()
logger.Info("Scanner: " + m)
}
}()
go func() {
scanner := bufio.NewScanner(stderrIn)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
logger.Error(scanner.Text())
}
}()
}
func cleanupAndExit(exitCode int) {
StopCPUProfile(profile)
os.Exit(exitCode)
}
func parseProgram(options cxCmdFlags, fileNames []string, sourceCode []*os.File) (bool, []byte, []byte) {
profile := StartCPUProfile("parse")
defer StopCPUProfile(profile)
defer DumpMEMProfile("parse")
StartProfile("parse")
defer StopProfile("parse")
actions.PRGRM = cxcore.MakeProgram()
corePkgsPrgrm, err := cxcore.GetProgram()
if err != nil {
panic(err)
}
actions.PRGRM.Packages = corePkgsPrgrm.Packages
if options.webMode {
ServiceMode()
return false, nil, nil
}
// TODO @evanlinjin: Do we need this? What is the 'leaps' command?
if options.ideMode {
IdeServiceMode()
ServiceMode()
return false, nil, nil
}
// TODO @evanlinjin: We do not need a persistent mode?
if options.webPersistentMode {
go ServiceMode()
PersistentServiceMode()
return false, nil, nil
}
// TODO @evanlinjin: This is a separate command now.
if options.tokenizeMode {
optionTokenize(options, fileNames)
return false, nil, nil
}
// var bcPrgrm *CXProgram
var sPrgrm []byte
// In case of a CX chain, we need to temporarily store the blockchain code heap elsewhere,
// so we can then add it after the transaction code's data segment.
var bcHeap []byte
if options.transactionMode || options.broadcastMode {
chainStatePrelude(&sPrgrm, &bcHeap, actions.PRGRM) // TODO: refactor injection logic
}
// Parsing all the source code files sent as CLI arguments to CX.
cxgo.ParseSourceCode(sourceCode, fileNames)
// setting project's working directory
if !options.replMode && len(sourceCode) > 0 {
cxgo0.PRGRM0.Path = determineWorkDir(sourceCode[0].Name())
}
// Checking if a main package exists. If not, create and add it to `PRGRM`.
if _, err := actions.PRGRM.GetFunction(cxcore.MAIN_FUNC, cxcore.MAIN_PKG); err != nil {
initMainPkg(actions.PRGRM)
}
// Setting what function to start in if using the REPL.
actions.ReplTargetFn = cxcore.MAIN_FUNC
// Adding *init function that initializes all the global variables.
cxgo.AddInitFunction(actions.PRGRM)
actions.LineNo = 0
if cxcore.FoundCompileErrors {
cleanupAndExit(cxcore.CX_COMPILATION_ERROR)
}
return true, bcHeap, sPrgrm
}
func runProgram(options cxCmdFlags, cxArgs []string, sourceCode []*os.File, bcHeap []byte, sPrgrm []byte) {
StartProfile("run")
defer StopProfile("run")
if options.replMode || len(sourceCode) == 0 {
actions.PRGRM.SelectProgram()
repl()
return
}
// If it's a CX chain transaction, we need to add the heap extracted
// from the retrieved CX chain program state.
if options.transactionMode || options.broadcastMode {
mergeBlockchainHeap(bcHeap, sPrgrm) // TODO: refactor injection logic
}
if options.blockchainMode {
// TODO @evanlinjin: Consider removing this section completely.
// Broadcast mode is disabled here. These features has been moved to
// the 'github.com/skycoin/cx-chains' repo.
panic("blockchainMode is moved to the github.com/skycoin/cx-chains repo")
// // Initializing the CX chain.
// err := actions.PRGRM.RunCompiled(0, cxArgs)
// if err != nil {
// panic(err)
// }
//
// actions.PRGRM.RemovePackage(cxcore.MAIN_FUNC)
//
// // Removing garbage from the heap. Only the global variables should be left
// // as these are independent from function calls.
// cxcore.MarkAndCompact(actions.PRGRM)
// actions.PRGRM.HeapSize = actions.PRGRM.HeapPointer
//
// // We already removed the main package, so it's
// // len(PRGRM.Packages) instead of len(PRGRM.Packages) - 1.
// actions.PRGRM.BCPackageCount = len(actions.PRGRM.Packages)
// s := cxcore.Serialize(actions.PRGRM, actions.PRGRM.BCPackageCount)
// s = cxcore.ExtractBlockchainProgram(s, s)
//
// configDir := os.Getenv("GOPATH") + "/src/github.com/skycoin/cx/"
// configFile := "fiber"
//
// cmd := exec.Command("go", "install", "./cmd/newcoin/...")
// cmd.Start()
// cmd.Wait()
//
// cmd = exec.Command("newcoin", "createcoin",
// fmt.Sprintf("--coin=%s", options.programName),
// fmt.Sprintf("--template-dir=%s%s", os.Getenv("GOPATH"), "/src/github.com/skycoin/cx/template"),
// "--config-file="+configFile+".toml",
// "--config-dir="+configDir,
// )
// cmd.Start()
// cmd.Wait()
//
// cmd = exec.Command("go", "install", "./cmd/cxcoin/...")
// cmd.Start()
// cmd.Wait()
//
// err = initCXBlockchain(s, options.programName, options.secKey)
// if err != nil {
// panic(err)
// }
// fmt.Println("\ngenesis signature:", genesisSignature)
//
// viper.SetConfigName(configFile) // name of config file (without extension)
// viper.AddConfigPath(".") // optionally look for config in the working directory
// err = viper.ReadInConfig() // Find and read the config file
// if err != nil { // Handle errors reading the config file
// panic(err)
// }
//
// viper.Set("node.genesis_signature_str", genesisSignature)
// viper.WriteConfig()
//
// cmd = exec.Command("newcoin", "createcoin",
// fmt.Sprintf("--coin=%s", options.programName),
// fmt.Sprintf("--template-dir=%s%s", os.Getenv("GOPATH"), "/src/github.com/skycoin/cx/template"),
// "--config-file="+configFile+".toml",
// "--config-dir="+configDir,
// )
// cmd.Start()
// cmd.Wait()
// cmd = exec.Command("go", "install", "./cmd/cxcoin/...")
// cmd.Start()
// cmd.Wait()
} else if options.broadcastMode {
// TODO @evanlinjin: Consider removing this section completely.
// Broadcast mode is disabled here. These features has been moved to
// the 'github.com/skycoin/cx-chains' repo.
panic("broadcastMode is moved to the github.com/skycoin/cx-chains repo")
// // Setting the CX runtime to run `PRGRM`.
// actions.PRGRM.SelectProgram()
// cxcore.MarkAndCompact(actions.PRGRM)
//
// s := cxcore.Serialize(actions.PRGRM, actions.PRGRM.BCPackageCount)
// txnCode := cxcore.ExtractTransactionProgram(sPrgrm, s)
//
// // All these HTTP requests need to be dropped in favor of calls to calls to functions
// // from the `cli` or `api` Skycoin packages
// addr := fmt.Sprintf("http://127.0.0.1:%d", options.port+420)
// skycoinClient := api.NewClient(addr)
// csrfToken, err := skycoinClient.CSRF()
// if err != nil {
// panic(err)
// }
//
// url := fmt.Sprintf("http://127.0.0.1:%d/api/v1/wallet/transaction", options.port+420)
//
// var dataMap map[string]interface{}
// dataMap = make(map[string]interface{}, 0)
// dataMap["mainExprs"] = txnCode
// dataMap["hours_selection"] = map[string]string{"type": "manual"}
// // dataMap["wallet_id"] = map[string]string{"id": options.walletId}
// dataMap["wallet_id"] = string(options.walletId)
// dataMap["to"] = []interface{}{map[string]string{"address": "2PBcLADETphmqWV7sujRZdh3UcabssgKAEB", "coins": "1", "hours": "0"}}
//
// jsonStr, err := json.Marshal(dataMap)
// if err != nil {
// panic(err)
// }
//
// req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
// req.Header.Set("X-CSRF-Token", csrfToken)
// req.Header.Set("Content-Type", "application/json")
//
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// panic(err)
// }
//
// defer resp.Body.Close()
// body, err := ioutil.ReadAll(resp.Body)
// if err != nil {
// panic(err)
// }
//
// var respBody map[string]interface{}
// if err := json.Unmarshal(body, &respBody); err != nil {
// // Printing the body instead of `err`. Body has the error generated in the Skycoin API.
// fmt.Println(string(body))
// return
// }
//
// url = fmt.Sprintf("http://127.0.0.1:%d/api/v1/injectTransaction", options.port+420)
// dataMap = make(map[string]interface{}, 0)
// dataMap["rawtx"] = respBody["encoded_transaction"]
//
// jsonStr, err = json.Marshal(dataMap)
// if err != nil {
// panic(err)
// }
//
// req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
// req.Header.Set("X-CSRF-Token", csrfToken)
// req.Header.Set("Content-Type", "application/json")
//
// resp, err = client.Do(req)
// if err != nil {
// panic(err)
// }
//
// body, err = ioutil.ReadAll(resp.Body)
// if err != nil {
// panic(err)
// }
} else {
// Normal run of a CX program.
err := actions.PRGRM.RunCompiled(0, cxArgs)
if err != nil {
panic(err)
}
if cxcore.AssertFailed() {
os.Exit(cxcore.CX_ASSERT)
}
}
}
func Run(args []string) {
runtime.LockOSThread()
runtime.GOMAXPROCS(2)
options := defaultCmdFlags()
parseFlags(&options, args)
// Checking if CXPATH is set, either by setting an environment variable
// or by setting the `--cxpath` flag.
checkCXPathSet(options)
// Does the user want to run a CX publisher or peer node?
if options.publisherMode || options.peerMode {
optionRunNode(options)
}
// Does the user want to generate a new wallet address?
if options.genAddress {
panic("genAddress features is now moved to github.com/skycoin/cx-chains repo")
// optionGenAddress(options)
return
}
// Does the user want to generate a new wallet address?
if options.walletMode {
panic("genWallet features is now moved to github.com/skycoin/cx-chains repo")
// optionGenWallet(options)
return
}
if checkhelp(args) {
commandLine.PrintDefaults()
return
}
// Does the user want to print the command-line help?
if options.printHelp {
printHelp()
return
}
// Does the user want to print CX's version?
if options.printVersion {
printVersion()
return
}
if options.initialHeap != "" {
cxcore.INIT_HEAP_SIZE = parseMemoryString(options.initialHeap)
}
if options.maxHeap != "" {
cxcore.MAX_HEAP_SIZE = parseMemoryString(options.maxHeap)
if cxcore.MAX_HEAP_SIZE < cxcore.INIT_HEAP_SIZE {
// Then MAX_HEAP_SIZE overrides INIT_HEAP_SIZE's value.
cxcore.INIT_HEAP_SIZE = cxcore.MAX_HEAP_SIZE
}
}
if options.stackSize != "" {
cxcore.STACK_SIZE = parseMemoryString(options.stackSize)
actions.DataOffset = cxcore.STACK_SIZE
}
if options.minHeapFreeRatio != float64(0) {
cxcore.MIN_HEAP_FREE_RATIO = float32(options.minHeapFreeRatio)
}
if options.maxHeapFreeRatio != float64(0) {
cxcore.MAX_HEAP_FREE_RATIO = float32(options.maxHeapFreeRatio)
}
// options, file pointers, filenames
cxArgs, sourceCode, fileNames := cxcore.ParseArgsForCX(commandLine.Args(), true)
// Propagate some options out to other packages.
parser.DebugLexer = options.debugLexer // in package parser
DebugProfileRate = options.debugProfile
DebugProfile = DebugProfileRate > 0
if run, bcHeap, sPrgrm := parseProgram(options, fileNames, sourceCode); run {
runProgram(options, cxArgs, sourceCode, bcHeap, sPrgrm)
}
}
// mergeBlockchainHeap adds the heap `bcHeap` found in the program state of a CX
// chain to the program to be run `PRGRM` and updates all the references to heap
// objects found in the transaction code considering the data segment found in
// the serialized program `sPrgrm`.
func mergeBlockchainHeap(bcHeap, sPrgrm []byte) {
// Setting the CX runtime to run `PRGRM`.
actions.PRGRM.SelectProgram()
bcHeapLen := len(bcHeap)
remHeapSpace := len(actions.PRGRM.Memory[actions.PRGRM.HeapStartsAt:])
fullDataSegSize := actions.PRGRM.HeapStartsAt - actions.PRGRM.StackSize
// Copying blockchain code heap.
if bcHeapLen > remHeapSpace {
// We don't have enough space. We're using the available bytes...
for c := 0; c < remHeapSpace; c++ {
actions.PRGRM.Memory[actions.PRGRM.HeapStartsAt+c] = bcHeap[c]
}
// ...and then we append the remaining bytes.
actions.PRGRM.Memory = append(actions.PRGRM.Memory, bcHeap[remHeapSpace:]...)
} else {
// We have enough space and we simply write the bytes.
for c := 0; c < bcHeapLen; c++ {
actions.PRGRM.Memory[actions.PRGRM.HeapStartsAt+c] = bcHeap[c]
}
}
// Recalculating the heap size.
actions.PRGRM.HeapSize = len(actions.PRGRM.Memory) - actions.PRGRM.HeapStartsAt
txnDataLen := fullDataSegSize - cxcore.GetSerializedDataSize(sPrgrm)
// TODO: CX chains only work with one package at the moment (in the blockchain code). That is what that "1" is for.
// Displacing the references to heap objects by `txnDataLen`.
// This needs to be done as the addresses to the heap objects are displaced
// by the addition of the transaction code's data segment.
cxcore.DisplaceReferences(actions.PRGRM, txnDataLen, 1)
}
// Used for the -heap-initial, -heap-max and -stack-size flags.
// This function parses, for example, "1M" to 1048576 (the corresponding number of bytes)
// Possible suffixes are: G or g (gigabytes), M or m (megabytes), K or k (kilobytes)
func parseMemoryString(s string) int {
suffix := s[len(s)-1]
_, notSuffix := strconv.ParseFloat(string(suffix), 64)
if notSuffix == nil {
// then we don't have a suffix
num, err := strconv.ParseInt(s, 10, 64)
if err != nil {
// malformed size
return -1
}
return int(num)
} else {
// then we have a suffix
num, err := strconv.ParseFloat(s[:len(s)-1], 64)
if err != nil {
// malformed size
return -1
}
// The user can use suffixes to give as input gigabytes, megabytes or kilobytes.
switch suffix {
case 'G', 'g':
return int(num * 1073741824)
case 'M', 'm':
return int(num * 1048576)
case 'K', 'k':
return int(num * 1024)
default:
return -1
}
}
}
func unsafeEval(code string) (out string) {
var lexer *parser.Lexer
defer func() {
if r := recover(); r != nil {
out = fmt.Sprintf("%v", r)
lexer.Stop()
}
}()
// storing strings sent to standard output
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
actions.LineNo = 0
actions.PRGRM = cxcore.MakeProgram()
cxgo0.PRGRM0 = actions.PRGRM
cxgo0.Parse(code)
actions.PRGRM = cxgo0.PRGRM0
lexer = parser.NewLexer(bytes.NewBufferString(code))
parser.Parse(lexer)
//yyParse(lexer)
cxgo.AddInitFunction(actions.PRGRM)
if err := actions.PRGRM.RunCompiled(0, nil); err != nil {
actions.PRGRM = cxcore.MakeProgram()
return fmt.Sprintf("%s", err)
}
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, r)
outC <- buf.String()
}()
w.Close()
os.Stdout = old // restoring the real stdout
out = <-outC
actions.PRGRM = cxcore.MakeProgram()
return out
}
func Eval(code string) string {
runtime.GOMAXPROCS(2)
ch := make(chan string, 1)
var result string
go func() {
result = unsafeEval(code)
ch <- result
}()
timer := time.NewTimer(20 * time.Second)
defer timer.Stop()
select {
case <-ch:
return result
case <-timer.C:
actions.PRGRM = cxcore.MakeProgram()
return "Timed out."
}
}
type SourceCode struct {
Code string
}
func ServiceMode() {
host := ":5336"
mux := http.NewServeMux()
mux.Handle("/", http.FileServer(http.Dir("./dist")))
mux.Handle("/program/", api2.NewAPI("/program", actions.PRGRM))
mux.HandleFunc("/eval", func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var b []byte
var err error
if b, err = ioutil.ReadAll(r.Body); err != nil {
http.Error(w, err.Error(), 500)
return
}
var source SourceCode
if err := json.Unmarshal(b, &source); err != nil {
http.Error(w, err.Error(), 500)
return
}
if err := r.ParseForm(); err == nil {
fmt.Fprintf(w, "%s", Eval(source.Code+"\n"))
}
})
if listener, err := net.Listen("tcp", host); err == nil {
fmt.Println("Starting CX web service on http://127.0.0.1:5336/")
http.Serve(listener, mux)
}
}
func IdeServiceMode() {
// Leaps's host address
ideHost := "localhost:5335"
// Working directory for ide
sharedPath := fmt.Sprintf("%s/src/github.com/skycoin/cx", os.Getenv("GOPATH"))
// Start Leaps
// cmd = `leaps -address localhost:5335 $GOPATH/src/skycoin/cx`
cmnd := exec.Command("leaps", "-address", ideHost, sharedPath)
// Just leave start command
cmnd.Start()
}
func PersistentServiceMode() {
fmt.Println("Start persistent for service mode!")
fi := bufio.NewReader(os.Stdin)
for {
var inp string
var ok bool
printPrompt()
if inp, ok = readline(fi); ok {
if isJSON(inp) {
var err error
client := &http.Client{}
body := bytes.NewBufferString(inp)
req, err := http.NewRequest("GET", "http://127.0.0.1:5336/eval", body)
if err != nil {
fmt.Println(err)
return
}
if resp, err := client.Do(req); err != nil {
fmt.Println(err)
} else {
fmt.Println(resp.Status)
}
}
}
}
}
func determineWorkDir(filename string) string {
filename = filepath.FromSlash(filename)
i := strings.LastIndexByte(filename, os.PathSeparator)
if i == -1 {
i = 0
}
return filename[:i]
}
func printPrompt() {
if actions.ReplTargetMod != "" {
fmt.Println(fmt.Sprintf(":package %s ...", actions.ReplTargetMod))
fmt.Printf("* ")
} else if actions.ReplTargetFn != "" {
fmt.Println(fmt.Sprintf(":func %s {...", actions.ReplTargetFn))
fmt.Printf("\t* ")
} else if actions.ReplTargetStrct != "" {
fmt.Println(fmt.Sprintf(":struct %s {...", actions.ReplTargetStrct))
fmt.Printf("\t* ")
} else {
fmt.Printf("* ")
}
}
func repl() {
fmt.Println("CX", VERSION)
fmt.Println("More information about CX is available at http://cx.skycoin.com/ and https://github.com/skycoin/cx/")
cxcore.InREPL = true
// fi := bufio.NewReader(os.NewFile(0, "stdin"))
fi := bufio.NewReader(os.Stdin)
// scanner := bufio.NewScanner(os.Stdin)
for {
var inp string
var ok bool
printPrompt()
if inp, ok = readline(fi); ok {
if actions.ReplTargetFn != "" {
inp = fmt.Sprintf(":func %s {\n%s\n}\n", actions.ReplTargetFn, inp)
}
if actions.ReplTargetMod != "" {
inp = fmt.Sprintf("%s", inp)
}
if actions.ReplTargetStrct != "" {
inp = fmt.Sprintf(":struct %s {\n%s\n}\n", actions.ReplTargetStrct, inp)
}
b := bytes.NewBufferString(inp)
parser.Parse(parser.NewLexer(b))
//yyParse(NewLexer(b))
} else {
if actions.ReplTargetFn != "" {
actions.ReplTargetFn = ""
continue
}
if actions.ReplTargetStrct != "" {
actions.ReplTargetStrct = ""
continue
}
if actions.ReplTargetMod != "" {
actions.ReplTargetMod = ""
continue
}
fmt.Printf("\nBye!\n")
break
}
}
}
// chainStatePrelude initializes the program structure `prgrm` with data from
// the program state stored on a CX chain.
func chainStatePrelude(sPrgrm *[]byte, bcHeap *[]byte, prgrm *cxcore.CXProgram) {
resp, err := http.Get("http://127.0.0.1:6420/api/v1/programState?addrs=TkyD4wD64UE6M5BkNQA17zaf7Xcg4AufwX")
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err := json.Unmarshal(body, &sPrgrm); err != nil {
fmt.Println(string(body))
return
}
memOff := cxcore.GetSerializedMemoryOffset(*sPrgrm)
stackSize := cxcore.GetSerializedStackSize(*sPrgrm)
// sPrgrm with Stack and Heap
sPrgrmSH := (*sPrgrm)[:memOff]
// Appending new stack
sPrgrmSH = append(sPrgrmSH, make([]byte, stackSize)...)
// Appending data and heap segment
sPrgrmSH = append(sPrgrmSH, (*sPrgrm)[memOff:]...)
*bcHeap = (*sPrgrm)[memOff+cxcore.GetSerializedDataSize(*sPrgrm):]
*prgrm = *cxcore.Deserialize(sPrgrmSH)
// We need to start adding new data elements after the CX chain
// program state's data segment
actions.DataOffset = prgrm.HeapStartsAt
}
// initMainPkg adds a `main` package with an empty `main` function to `prgrm`.
func initMainPkg(prgrm *cxcore.CXProgram) {
mod := cxcore.MakePackage(cxcore.MAIN_PKG)
prgrm.AddPackage(mod)
fn := cxcore.MakeFunction(cxcore.MAIN_FUNC, actions.CurrentFile, actions.LineNo)
mod.AddFunction(fn)
}
// checkCXPathSet checks if the user has set the environment variable
// `CXPATH`. If not, CX creates a workspace at $HOME/cx, along with $HOME/cx/bin,
// $HOME/cx/pkg and $HOME/cx/src
func checkCXPathSet(options cxCmdFlags) {
// Determining the filepath of the directory where the user
// started the `cx` command.
_, err := os.Executable()
if err != nil {
panic(err)
}
// cxcore.COREPATH = filepath.Dir(ex) // TODO @evanlinjin: Not used.
CXPATH := ""
if os.Getenv("CXPATH") != "" {
CXPATH = os.Getenv("CXPATH")
}
// `options.cxpath` overrides `os.Getenv("CXPATH")`
if options.cxpath != "" {
CXPATH, err = filepath.Abs(options.cxpath)
if err != nil {
panic(err)
}
}
if os.Getenv("CXPATH") == "" && options.cxpath == "" {
usr, err := user.Current()
if err != nil {
panic(err)
}
CXPATH = usr.HomeDir + "/cx/"
}
cxcore.BINPATH = filepath.Join(CXPATH, "bin/")
cxcore.PKGPATH = filepath.Join(CXPATH, "pkg/")
cxcore.SRCPATH = filepath.Join(CXPATH, "src/")
// Creating directories in case they do not exist.
if _, err := cxcore.CXStatFile(CXPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(CXPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.BINPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.BINPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.PKGPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.PKGPATH, 0755)
}
if _, err := cxcore.CXStatFile(cxcore.SRCPATH); os.IsNotExist(err) {
cxcore.CXMkdirAll(cxcore.SRCPATH, 0755)
}
}
// ----------------------------------------------------------------
// Utility functions
func readline(fi *bufio.Reader) (string, bool) {
s, err := fi.ReadString('\n')
s = strings.Replace(s, "\n", "", -1)
s = strings.Replace(s, "\r", "", -1)
for _, ch := range s {
if ch == rune(4) {
err = io.EOF
break
}
}
if err != nil {
return "", false
}
return s, true
}
func isJSON(str string) bool {
var js map[string]interface{}
err := json.Unmarshal([]byte(str), &js)
return err == nil
}
| [
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"CXPATH\"",
"\"CXPATH\"",
"\"CXPATH\"",
"\"CXPATH\""
]
| []
| [
"GOPATH",
"CXPATH"
]
| [] | ["GOPATH", "CXPATH"] | go | 2 | 0 | |
io.go | package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"github.com/ansel1/merry"
"github.com/lunixbochs/vtclean"
rollbarAPI "github.com/stvp/rollbar"
"golang.org/x/crypto/ssh/terminal"
)
// Stdout is used to mock stdout for testing
var Stdout io.Writer = os.Stdout
// Stderr is to mock stderr for testing
var Stderr io.Writer = os.Stderr
var errLogger = newLogger(ErrLogPath)
// ExitFn is used to mock os.Exit
var ExitFn = os.Exit
// Debugging is HEROKU_DEBUG
var Debugging = isDebugging()
// DebuggingHeaders is HEROKU_DEBUG_HEADERS
var DebuggingHeaders = isDebuggingHeaders()
var swallowSigint = false
func newLogger(path string) *log.Logger {
err := os.MkdirAll(filepath.Dir(path), 0777)
must(err)
file, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
must(err)
return log.New(file, "", log.LstdFlags)
}
// Exit just calls os.Exit, but can be mocked out for testing
func Exit(code int) {
TriggerBackgroundUpdate()
currentAnalyticsCommand.RecordEnd(code)
ShowCursor()
ExitFn(code)
}
// Err just calls `fmt.Fprint(Stderr, a...)` but can be mocked out for testing.
func Err(a ...interface{}) {
fmt.Fprint(Stderr, a...)
Log(a...)
}
// Errf just calls `fmt.Fprintf(Stderr, a...)` but can be mocked out for testing.
func Errf(format string, a ...interface{}) {
fmt.Fprintf(Stderr, format, a...)
Logf(format, a...)
}
// Errln just calls `fmt.Fprintln(Stderr, a...)` but can be mocked out for testing.
func Errln(a ...interface{}) {
fmt.Fprintln(Stderr, a...)
Logln(a...)
}
// Print is used to replace `fmt.Print()` but can be mocked out for testing.
func Print(a ...interface{}) {
fmt.Fprint(Stdout, a...)
}
// Printf is used to replace `fmt.Printf()` but can be mocked out for testing.
func Printf(format string, a ...interface{}) {
fmt.Fprintf(Stdout, format, a...)
}
// Println is used to replace `fmt.Println()` but can be mocked out for testing.
func Println(a ...interface{}) {
fmt.Fprintln(Stdout, a...)
}
// Log is used to print debugging information
// It will be added to the logfile in ~/.cache/heroku/error.log or printed out if HEROKU_DEBUG is set.
func Log(a ...interface{}) {
errLogger.Print(vtclean.Clean(fmt.Sprint(a...), false))
}
// Logln is used to print debugging information
// It will be added to the logfile in ~/.cache/heroku/error.log
func Logln(a ...interface{}) {
Log(fmt.Sprintln(a...))
}
// Logf is used to print debugging information
// It will be added to the logfile in ~/.cache/heroku/error.log
func Logf(format string, a ...interface{}) {
Log(fmt.Sprintf(format, a...))
}
// Debugln is used to print debugging information
// It will be added to the logfile in ~/.cache/heroku/error.log and stderr if HEROKU_DEBUG is set.
func Debugln(a ...interface{}) {
Logln(a...)
if Debugging {
fmt.Fprintln(Stderr, a...)
}
}
// Debugf is used to print debugging information
// It will be added to the logfile in ~/.cache/heroku/error.log and stderr if HEROKU_DEBUG is set.
func Debugf(format string, a ...interface{}) {
Logf(format, a...)
if Debugging {
fmt.Fprintf(Stderr, format, a...)
}
}
// WarnIfError is a helper that prints out formatted error messages
// it will emit to rollbar
// it does not exit
func WarnIfError(err error) {
if err == nil {
return
}
err = merry.Wrap(err)
Warn(err.Error())
Debugln(merry.Details(err))
rollbar(err, "warning")
}
// Warn shows a message with excalamation points prepended to stderr
func Warn(msg string) {
if actionMsg != "" {
Errln(yellow(" !"))
}
prefix := " " + yellow(ErrorArrow) + " "
msg = strings.TrimSpace(msg)
msg = strings.Join(strings.Split(msg, "\n"), "\n"+prefix)
Errln(prefix + msg)
if actionMsg != "" {
Err(actionMsg + "...")
}
}
// Error shows a message with excalamation points prepended to stderr
func Error(msg string) {
if actionMsg != "" {
Errln(red(" !"))
}
prefix := " " + red(ErrorArrow) + " "
msg = strings.TrimSpace(msg)
msg = strings.Join(strings.Split(msg, "\n"), "\n"+prefix)
Errln(prefix + msg)
}
// ExitWithMessage shows an error message then exits with status code 2
// It does not emit to rollbar
func ExitWithMessage(format string, a ...interface{}) {
currentAnalyticsCommand.Valid = false
Error(fmt.Sprintf(format, a...))
Exit(2)
}
// ErrorArrow is the triangle or bang that prefixes errors
var ErrorArrow = errorArrow()
func errorArrow() string {
if windows() {
return "!"
}
return "▸"
}
func must(err error) {
if err != nil {
panic(err)
}
}
// LogIfError logs out an error if one arises
func LogIfError(e error) {
if e != nil {
Debugln(e.Error())
Debugln(string(debug.Stack()))
rollbar(e, "info")
}
}
// ONE is the string 1
const ONE = "1"
func isDebugging() bool {
debug := strings.ToUpper(os.Getenv("HEROKU_DEBUG"))
if debug == "TRUE" || debug == ONE {
return true
}
return false
}
func isDebuggingHeaders() bool {
debug := strings.ToUpper(os.Getenv("HEROKU_DEBUG_HEADERS"))
if debug == "TRUE" || debug == ONE {
return true
}
return false
}
func yellow(s string) string {
if supportsColor() && !windows() {
return "\x1b[33m" + s + "\x1b[39m"
}
return s
}
func red(s string) string {
if supportsColor() && !windows() {
return "\x1b[31m" + s + "\x1b[39m"
}
return s
}
func green(s string) string {
if supportsColor() && !windows() {
return "\x1b[32m" + s + "\x1b[39m"
}
return s
}
func cyan(s string) string {
if supportsColor() && !windows() {
return "\x1b[36m" + s + "\x1b[39m"
}
return s
}
func windows() bool {
return runtime.GOOS == WINDOWS
}
func istty() bool {
return terminal.IsTerminal(int(os.Stdout.Fd()))
}
func supportsColor() bool {
if !istty() {
return false
}
for _, arg := range Args {
if arg == "--no-color" {
return false
}
}
if os.Getenv("COLOR") == "false" {
return false
}
if os.Getenv("TERM") == "dumb" {
return false
}
if config != nil && config.Color != nil && !*config.Color {
return false
}
return true
}
func plural(word string, count int) string {
if count == 1 {
return word
}
return word + "s"
}
// ShowCursor displays the cursor
func ShowCursor() {
if supportsColor() && !windows() {
Print("\u001b[?25h")
}
}
func hideCursor() {
if supportsColor() && !windows() {
Print("\u001b[?25l")
}
}
var actionMsg string
func action(msg, done string, fn func()) {
actionMsg = msg
Err(actionMsg + "...")
hideCursor()
fn()
actionMsg = ""
ShowCursor()
if done != "" {
Errln(" " + done)
}
}
func handleSignal(s os.Signal, fn func()) {
c := make(chan os.Signal, 1)
signal.Notify(c, s)
go func() {
<-c
fn()
}()
}
func handlePanic() {
if crashing {
// if already crashing just let the error bubble
// or else potential fork-bomb
return
}
crashing = true
if rec := recover(); rec != nil {
err, ok := rec.(error)
if !ok {
err = merry.New(rec.(string))
}
err = merry.Wrap(err)
Error(err.Error())
Debugln(merry.Details(err))
rollbar(err, "error")
Exit(1)
}
}
func rollbar(err error, level string) {
if os.Getenv("TESTING") == ONE {
return
}
rollbarAPI.Platform = "client"
rollbarAPI.Token = "d40104ae6fa8477dbb6907370231d7d8"
rollbarAPI.Environment = Channel
rollbarAPI.ErrorWriter = nil
rollbarAPI.CodeVersion = GitSHA
var cmd string
if len(Args) > 1 {
cmd = Args[1]
}
fields := []*rollbarAPI.Field{
{"version", Version},
{"os", runtime.GOOS},
{"arch", runtime.GOARCH},
{"command", cmd},
}
rollbarAPI.Error(level, err, fields...)
rollbarAPI.Wait()
}
func readJSON(obj interface{}, path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
return json.NewDecoder(f).Decode(&obj)
}
func saveJSON(obj interface{}, path string) error {
data, err := json.MarshalIndent(obj, "", " ")
if err != nil {
return err
}
return ioutil.WriteFile(path, data, 0644)
}
// truncates the beginning of a file
func truncate(path string, n int) {
f, err := os.Open(path)
if err != nil {
LogIfError(err)
return
}
scanner := bufio.NewScanner(f)
lines := make([]string, 0, n+1)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
lines = append(lines, scanner.Text())
if len(lines) > n {
lines = lines[1:]
}
}
lines = append(lines, "")
ioutil.WriteFile(path, []byte(strings.Join(lines, "\n")), 0644)
}
| [
"\"HEROKU_DEBUG\"",
"\"HEROKU_DEBUG_HEADERS\"",
"\"COLOR\"",
"\"TERM\"",
"\"TESTING\""
]
| []
| [
"TESTING",
"COLOR",
"HEROKU_DEBUG_HEADERS",
"TERM",
"HEROKU_DEBUG"
]
| [] | ["TESTING", "COLOR", "HEROKU_DEBUG_HEADERS", "TERM", "HEROKU_DEBUG"] | go | 5 | 0 | |
test/titan/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "titan.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
datalabelingservice/rename_dataset_labels_request_response.go | // Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
package datalabelingservice
import (
"fmt"
"github.com/oracle/oci-go-sdk/v58/common"
"net/http"
"strings"
)
// RenameDatasetLabelsRequest wrapper for the RenameDatasetLabels operation
//
// See also
//
// Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/datalabelingservice/RenameDatasetLabels.go.html to see an example of how to use RenameDatasetLabelsRequest.
type RenameDatasetLabelsRequest struct {
// Unique Dataset OCID
DatasetId *string `mandatory:"true" contributesTo:"path" name:"datasetId"`
// Details for renaming Labels in the LabelSet of the Dataset.
RenameDatasetLabelsDetails `contributesTo:"body"`
// A token that uniquely identifies a request so it can be retried in case of a timeout or
// server error without risk of executing that same action again. Retry tokens expire after 24
// hours, but can be invalidated before then due to conflicting operations. For example, if a resource
// has been deleted and purged from the system, then a retry of the original creation request
// might be rejected.
OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`
// For optimistic concurrency control. In the PUT or DELETE call
// for a resource, set the `if-match` parameter to the value of the
// etag from a previous GET or POST response for that resource.
// The resource will be updated or deleted only if the etag you
// provide matches the resource's current etag value.
IfMatch *string `mandatory:"false" contributesTo:"header" name:"if-match"`
// The client request ID for tracing.
OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request RenameDatasetLabelsRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request RenameDatasetLabelsRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
_, err := request.ValidateEnumValue()
if err != nil {
return http.Request{}, err
}
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders)
}
// BinaryRequestBody implements the OCIRequest interface
func (request RenameDatasetLabelsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {
return nil, false
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request RenameDatasetLabelsRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// ValidateEnumValue returns an error when providing an unsupported enum value
// This function is being called during constructing API request process
// Not recommended for calling this function directly
func (request RenameDatasetLabelsRequest) ValidateEnumValue() (bool, error) {
errMessage := []string{}
if len(errMessage) > 0 {
return true, fmt.Errorf(strings.Join(errMessage, "\n"))
}
return false, nil
}
// RenameDatasetLabelsResponse wrapper for the RenameDatasetLabels operation
type RenameDatasetLabelsResponse struct {
// The underlying http response
RawResponse *http.Response
// A unique Oracle-assigned identifier for the asynchronous request. You can use this to query the status of the asynchronous operation.
OpcWorkRequestId *string `presentIn:"header" name:"opc-work-request-id"`
// A unique Oracle-assigned identifier for the request. If you need to contact
// Oracle about a particular request, please provide the request ID.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}
func (response RenameDatasetLabelsResponse) String() string {
return common.PointerString(response)
}
// HTTPResponse implements the OCIResponse interface
func (response RenameDatasetLabelsResponse) HTTPResponse() *http.Response {
return response.RawResponse
}
| []
| []
| []
| [] | [] | go | null | null | null |
simplechat/core.py | #!/usr/bin/env python
# encoding: utf-8
"""
Simple chat server with a backend built with Redis PubSub.
Each chatroom is backed up by a Redis channel to which
users are subscribed.
By default, every new user is subscribed to the 'default' pubsub.
Every message is stored as a dictionnary with 'name' and 'text'
as keys holding data.
There are two interfaces: telnet and websockets, they are isolated.
"""
import gevent
import json
import os
import redis
import socket
import string
import thread
import time
from flask import Flask, redirect, render_template, request, session, url_for
from flask_sockets import Sockets
from flask_script import Manager
REDIS_URL = os.environ.get('REDIS_URL', '127.0.0.1:6379')
USERS = {}
ROOMS = []
redis_server = redis.from_url(REDIS_URL)
app = Flask(__name__)
app.secret_key = 'keep it secret'
app.debug = 'DEBUG'
websocketserver = Sockets(app)
manager = Manager(app)
class User(object):
def __init__(self, name, connection=None, room=None, telnet=None):
self.name = name
self.connection = connection
self.room = room
self.telnet = telnet
USERS[name] = self
class Backend(object):
"""backend for simple chat based on redis PubSub"""
def __init__(self, name=None):
self.name = name or 'default'
self.users = dict()
self.pubsub = redis_server.pubsub()
self.pubsub.subscribe(self.name)
def __str__(self):
return '<ChatRoom {0}>'.format(self.name)
def __unpack__(self):
"""Yield out data from pubsub"""
for item in self.pubsub.listen():
message = item.get('data')
if item['type'] == 'message':
yield message
def register(self, user):
"""Register a user"""
self.users[user.name] = user
user.room = self
redis_server.publish(
self.name,
json.dumps({
'name': 'simplechat',
'text': '{0} joined the chat'.format(user.name)
})
)
def remove(self, user):
"""Remove a user"""
if self.name != 'default':
redis_server.publish(
self.name,
json.dumps({
'name': 'simplechat',
'text': '{0} left the room'.format(user.name)
})
)
del self.users[user.name]
def parse(self, data):
"""Parsing messages"""
payload = json.loads(data)
name = payload['name']
message = payload['text']
user = self.users[name]
if self.name == 'default': # commands available in default
if message.startswith('/join'):
new_room = message.split('/join ')[1]
if user.telnet:
new_room = 'telnet.{0}'.format(new_room)
room = [i for i in ROOMS if i.name == new_room]
if not room:
room = Backend(new_room)
ROOMS.append(room)
room.start()
else:
room = room[0]
message = [
'Entering room: {0}'.format(room.name),
'Active users are:{0}'.format(
'\n'.join(['* {0}'.format(i) for i in room.users])),
'* {0} (** this is you)'.format(user.name),
'End of list'
]
room.register(user)
self.remove(user)
elif message == '/users':
users = list()
for i in USERS:
if i == user.name:
users.append('* {0} (**this is you)'.format(i))
else:
users.append('* {0}'.format(i))
message = [
'Connected users are:',
'\n'.join(users),
'End of list'
]
elif message == '/rooms':
rooms = [
i for i in ROOMS
if i.name != 'default' and i.startswith('telnet.')
]
if rooms:
message = [
'Active rooms are:',
'\n'.join(['* {0} ({1})'.format(
i.name, len(i.users)
) for i in rooms]),
'End of list'
]
else:
message = ['No active room detected. Create one']
elif message == '/quit':
self.remove(user)
message = ['BYE']
del USERS[user.name]
redis_server.srem('users', user.name)
else:
message = ['Sorry, unknown command or wrong domain']
elif message == '/leave':
room = filter(lambda x: x.name=='default', ROOMS)[0]
room.register(user)
self.remove(user)
message = ['Leaving room {0}'.format(self.name)]
else:
message = ['Sorry, unknown command or wrong domain']
return {'name': 'simplechat', 'text': '\n'.join(message)}
def send(self, user, data):
"""Send data to registered user. Delete on failure"""
payload = json.loads(data)
name = payload['name']
message = payload['text']
null_data = (
(message.startswith('/') and user.name != name) or
(message == '{0} joined the chat'.format(user.name)) or
(message == '{0} left the room'.format(user.name))
)
if message.startswith('/') and user.name == name:
payload = self.parse(data)
elif self.name == 'default' and user.name == name:
payload = {
'name': 'simplechat',
'text': 'Please, join a room to start a discussion'
}
elif null_data:
payload = None
if payload:
try:
if user.room.name != self.name:
user.room.send(user, json.dumps(payload))
else:
if user.telnet:
if payload['name'] == 'simplechat':
data = '<= {0}\n=> '.format(
payload['text'].replace('\n', '\n=> ')
)
else:
data = '<= ({0}) {1}'.format(payload['text'])
else:
data = json.dumps(payload)
user.connection.send(data)
except Exception as exc: # directly discard on conn failure
self.remove(user)
def run(self):
"""Listen and send messages"""
for data in self.__unpack__():
for _, user in self.users.items():
thread.start_new_thread(self.send, (user, data))
def start(self):
thread.start_new_thread(self.run, ())
default = Backend()
ROOMS.append(default)
default.start()
@app.route('/')
def index():
if 'username' in session:
username = session['username']
if not USERS:
return redirect(url_for('logout'))
return render_template('index.html', username=username)
return redirect(url_for('register'))
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
if request.method == 'POST':
username = request.form['username']
if username and not username in USERS:
User(username, room=default)
session['username'] = request.form['username']
return redirect(url_for('index'))
elif not username or username[0] not in string.letters + string.digits:
error = 'Invalid user name'
elif username in USERS:
error = 'User name already taken'
return render_template('register.html', error=error)
@app.route('/logout')
def logout():
session.pop('username', None)
session.clear()
return redirect(url_for('index'))
@websocketserver.route('/submit')
def inbox(ws):
"""Receives incoming chat messages, inserts them into Redis."""
username = session['username']
user = USERS[username]
while not ws.closed:
# Sleep to prevent *constant* context-switches.
gevent.sleep(0.1)
message = ws.receive()
if message:
redis_server.publish(user.room.name, message)
@websocketserver.route('/receive')
def outbox(ws):
"""Sends outgoing chat messages"""
username = session['username']
user = USERS[username]
user.connection = ws
user.room.register(user)
while not ws.closed:
# Context switch while `ChatBackend.start` is running in the background.
gevent.sleep(0.1)
class SocketServer(object):
"""Simple TCP socket server"""
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(False)
self.socket.bind((self.host, self.port))
self.socket.listen(1)
print 'Listening on {0}'.format(self.socket.getsockname())
def accept(self):
"""Continuously accept inbound connections"""
def inner_thread(client):
while True:
client.send('<= Login name?\n=> ')
try:
name = client.recv(1024).strip() # no trailing spaces
except socket.error:
continue
if name in USERS:
client.send('<= Sorry, username already taken\n')
elif not name or name[0] not in string.letters + string.digits:
client.send('<= Invalid username\n')
else:
client.setblocking(False)
client.send('<= Welcome {0}\n=> '.format(name))
user = User(
name, room=default, connection=client, telnet=True
)
default.register(user)
break
while True:
try:
client, address = self.socket.accept()
except socket.error:
break
msgs = [
'Welcome to the simplechat chat server',
'/users : gives you the list of connected users',
'/rooms: gives you the list of available rooms',
'/join room_name: allows you to join conversation in a room',
'/leave: let you leave the room',
'/quit: disconnects you from the server'
]
for msg in msgs:
client.send('<= {0}\n'.format(msg))
thread.start_new_thread(inner_thread, (client,))
def recv(self):
"""Continuously accept incoming messages"""
for _, user in USERS.items():
try:
message = user.connection.recv(1024).strip()
data = json.dumps({'name': user.name, 'text': message})
except socket.error:
continue
redis_server.publish(user.room.name, data)
user.connection.send('=> ')
time.sleep(.1)
def run(self):
"""Main routine to launch the server"""
while True:
try:
self.accept()
self.recv()
except (KeyboardInterrupt, SystemExit):
print 'Closing server'
break
@manager.option('-H', '--host', dest='host')
@manager.option('-p', '--port', dest='port')
def runsocketserver(host=None, port=None):
host = host or '0.0.0.0'
port = port or 4242
server = SocketServer(host, int(port))
server.run()
if __name__ == '__main__':
manager.run()
# EOF
| []
| []
| [
"REDIS_URL"
]
| [] | ["REDIS_URL"] | python | 1 | 0 | |
metaspace/recal/setup.py | # -*- coding: utf-8 -*-
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
def fix_includes_hack():
# IDK what happened to the GitHub Actions container image, but suddenly the build started
# failing on Windows with this error:
# MSIWarp\src\lib\warp\warp.cpp(375,32): error C3861: 'back_inserter': identifier not found
header_contents = open('MSIWarp/src/lib/warp/warp.hpp', 'rt').read()
if '<iterator>' not in header_contents:
header_contents = header_contents.replace(
'#include <vector>', '#include <vector>\n#include <iterator>'
)
open('MSIWarp/src/lib/warp/warp.hpp', 'wt').write(header_contents)
## This CMakeExtension stuff is part of MSIWarp vendoring (bundling a built copy of their library with our library)
## It's hacky and should be removed as soon as there's a MSIWarp package available on PyPI
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
fix_includes_hack()
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
print() # Add an empty line for cleaner output
setup(
name='msi_recal',
version='0.2.3',
description='Pipeline for mostly unsupervised recalibration of imzML mass spectrometry data',
url='https://github.com/metaspace2020/metaspace/tree/master/metaspace/recal',
author='Alexandrov Team, EMBL',
package_dir={
'msiwarp': 'MSIWarp/src/python-bindings/msiwarp',
'': '.',
},
packages=[*find_packages(), 'msiwarp', 'msiwarp.util'],
package_data={'msi_recal': ['dbs/*.csv']},
install_requires=[
'numpy',
'scipy',
'matplotlib',
'seaborn',
'pyimzml',
'pyMSpec',
'cpyMSpec',
'scikit-learn',
'typing-extensions; python_version < "3.8"',
],
# Vendorize MSIWarp because it's not on PyPI yet
ext_modules=[CMakeExtension('msiwarp/msiwarp_cpp')],
cmdclass={"build_ext": CMakeBuild},
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
autotest/ogr/ogr_gml_read.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: GML Reading Driver testing.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2006, Frank Warmerdam <[email protected]>
# Copyright (c) 2008-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import shutil
###############################################################################
# Test reading geometry and attribute from ionic wfs gml file.
#
def ogr_gml_1():
gdaltest.have_gml_reader = 0
try:
gml_ds = ogr.Open( 'data/ionic_wfs.gml' )
except:
gml_ds = None
if gml_ds is None:
if gdal.GetLastErrorMsg().find('Xerces') != -1:
return 'skip'
else:
gdaltest.post_reason( 'failed to open test file.' )
return 'fail'
gdaltest.have_gml_reader = 1
if gml_ds.GetLayerCount() != 1:
gdaltest.post_reason( 'wrong number of layers' )
return 'fail'
lyr = gml_ds.GetLayerByName('GEM')
feat = lyr.GetNextFeature()
if feat.GetField('Name') != 'Aartselaar':
gdaltest.post_reason( 'Wrong name field value' )
return 'fail'
wkt = 'POLYGON ((44038 511549,44015 511548,43994 511522,43941 511539,43844 511514,43754 511479,43685 511521,43594 511505,43619 511452,43645 511417,4363 511387,437 511346,43749 511298,43808 511229,43819 511205,4379 511185,43728 511167,43617 511175,43604 511151,43655 511125,43746 511143,43886 511154,43885 511178,43928 511186,43977 511217,4404 511223,44008 511229,44099 51131,44095 511335,44106 51135,44127 511379,44124 511435,44137 511455,44105 511467,44098 511484,44086 511499,4407 511506,44067 511535,44038 511549))'
if ogrtest.check_feature_geometry( feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason( 'got unexpected feature.' )
return 'fail'
return 'success'
###############################################################################
# Do the same test somewhere without a .gfs file.
def ogr_gml_2():
if not gdaltest.have_gml_reader:
return 'skip'
# copy gml file (but not .gfs file)
open('tmp/ionic_wfs.gml','w').write(open('data/ionic_wfs.gml').read())
gml_ds = ogr.Open( 'tmp/ionic_wfs.gml' )
if gml_ds.GetLayerCount() != 1:
gdaltest.post_reason( 'wrong number of layers' )
return 'fail'
lyr = gml_ds.GetLayerByName('GEM')
feat = lyr.GetNextFeature()
if feat.GetField('Name') != 'Aartselaar':
gdaltest.post_reason( 'Wrong name field value' )
return 'fail'
wkt = 'POLYGON ((44038 511549,44015 511548,43994 511522,43941 511539,43844 511514,43754 511479,43685 511521,43594 511505,43619 511452,43645 511417,4363 511387,437 511346,43749 511298,43808 511229,43819 511205,4379 511185,43728 511167,43617 511175,43604 511151,43655 511125,43746 511143,43886 511154,43885 511178,43928 511186,43977 511217,4404 511223,44008 511229,44099 51131,44095 511335,44106 51135,44127 511379,44124 511435,44137 511455,44105 511467,44098 511484,44086 511499,4407 511506,44067 511535,44038 511549))'
if ogrtest.check_feature_geometry( feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason( 'got unexpected feature.' )
return 'fail'
return 'success'
###############################################################################
# Similar test for RNF style line data.
def ogr_gml_3():
if not gdaltest.have_gml_reader:
return 'skip'
gml_ds = ogr.Open( 'data/rnf_eg.gml' )
if gml_ds.GetLayerCount() != 1:
gdaltest.post_reason( 'wrong number of layers' )
return 'fail'
lyr = gml_ds.GetLayerByName('RoadSegment')
feat = lyr.GetNextFeature()
if feat.GetField('ngd_id') != 817792:
gdaltest.post_reason( 'Wrong ngd_id field value' )
return 'fail'
if feat.GetField('type') != 'HWY':
gdaltest.post_reason( 'Wrong type field value' )
return 'fail'
wkt = 'LINESTRING (-63.500411040289066 46.240122507771368,-63.501009714909742 46.240344881690326,-63.502170462373471 46.241041855639622,-63.505862621395394 46.24195250605576,-63.506719184531178 46.242002742901576,-63.507197272602212 46.241931577811606,-63.508403092799554 46.241752283460158,-63.509946573455622 46.241745397977233)'
if ogrtest.check_feature_geometry( feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason( 'got unexpected feature.' )
return 'fail'
return 'success'
###############################################################################
# Test of read GML file with UTF-8 BOM indicator.
# Test also support for nested GML elements (#3680)
def ogr_gml_4():
if not gdaltest.have_gml_reader:
return 'skip'
gml_ds = ogr.Open( 'data/bom.gml' )
if gml_ds.GetLayerCount() != 1:
gdaltest.post_reason( 'wrong number of layers' )
return 'fail'
lyr = gml_ds.GetLayerByName('CartographicText')
if lyr.GetFeatureCount() != 3:
gdaltest.post_reason( 'wrong number of features' )
return 'fail'
# Test 1st feature
feat = lyr.GetNextFeature()
if feat.GetField('featureCode') != 10198:
gdaltest.post_reason( 'Wrong featureCode field value' )
return 'fail'
if feat.GetField('anchorPosition') != 8:
gdaltest.post_reason( 'Wrong anchorPosition field value' )
return 'fail'
wkt = 'POINT (347243.85 461299.5)'
if ogrtest.check_feature_geometry( feat, wkt):
return 'fail'
# Test 2nd feature
feat = lyr.GetNextFeature()
if feat.GetField('featureCode') != 10069:
gdaltest.post_reason( 'Wrong featureCode field value' )
return 'fail'
wkt = 'POINT (347251.45 461250.85)'
if ogrtest.check_feature_geometry( feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Test of read GML file that triggeered bug #2349
def ogr_gml_5():
if not gdaltest.have_gml_reader:
return 'skip'
gml_ds = ogr.Open( 'data/ticket_2349_test_1.gml' )
lyr = gml_ds.GetLayerByName('MyPolyline')
lyr.SetAttributeFilter( 'height > 300' )
lyr.GetNextFeature()
return 'success'
###############################################################################
# Test of various FIDs (various prefixes and lengths) (Ticket#1017)
def ogr_gml_6():
if not gdaltest.have_gml_reader:
return 'skip'
files = ['test_point1', 'test_point2', 'test_point3', 'test_point4']
fids = []
for filename in files:
fids[:] = []
gml_ds = ogr.Open( 'data' + os.sep + filename + '.gml' )
lyr = gml_ds.GetLayer()
feat = lyr.GetNextFeature()
while feat is not None:
if ( feat.GetFID() < 0 ) or ( feat.GetFID() in fids ):
os.remove( 'data' + os.sep + filename + '.gfs' )
gdaltest.post_reason( 'Wrong FID value' )
return 'fail'
fids.append(feat.GetFID())
feat = lyr.GetNextFeature()
os.remove( 'data' + os.sep + filename + '.gfs' )
return 'success'
###############################################################################
# Test of colon terminated prefixes for attribute values (Ticket#2493)
def ogr_gml_7():
if not gdaltest.have_gml_reader:
return 'skip'
gdal.SetConfigOption('GML_EXPOSE_FID', 'FALSE')
gml_ds = ogr.Open( 'data/test_point.gml' )
gdal.SetConfigOption('GML_EXPOSE_FID', None)
lyr = gml_ds.GetLayer()
ldefn = lyr.GetLayerDefn()
# Test fix for #2969
if lyr.GetFeatureCount() != 5:
gdaltest.post_reason( 'Bad feature count' )
return 'fail'
try:
ldefn.GetFieldDefn(0).GetFieldTypeName
except:
return 'skip'
if ldefn.GetFieldDefn(0).GetFieldTypeName(ldefn.GetFieldDefn(0).GetType())\
!= 'Real':
return 'fail'
if ldefn.GetFieldDefn(1).GetFieldTypeName(ldefn.GetFieldDefn(1).GetType())\
!= 'Integer':
return 'fail'
if ldefn.GetFieldDefn(2).GetFieldTypeName(ldefn.GetFieldDefn(2).GetType())\
!= 'String':
return 'fail'
return 'success'
###############################################################################
# Test a GML file with some non-ASCII UTF-8 content that triggered a bug (Ticket#2948)
def ogr_gml_8():
if not gdaltest.have_gml_reader:
return 'skip'
gml_ds = ogr.Open( 'data/utf8.gml' )
lyr = gml_ds.GetLayer()
feat = lyr.GetNextFeature()
if sys.version_info >= (3,0,0):
if feat.GetFieldAsString('name') != '\xc4\x80liamanu'.encode('latin1').decode('utf-8'):
print(feat.GetFieldAsString('name'))
return 'fail'
else:
if feat.GetFieldAsString('name') != '\xc4\x80liamanu':
print(feat.GetFieldAsString('name'))
return 'fail'
gml_ds.Destroy()
return 'success'
###############################################################################
# Test writing invalid UTF-8 content in a GML file (ticket #2971)
def ogr_gml_9():
if not gdaltest.have_gml_reader:
return 'skip'
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('tmp/broken_utf8.gml')
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('test', ogr.OFTString))
dst_feat = ogr.Feature( lyr.GetLayerDefn() )
dst_feat.SetFieldBinaryFromHexString('test', '80626164') # \x80bad'
# Avoid the warning
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature( dst_feat )
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat.Destroy()
ds.Destroy()
ds = ogr.Open('tmp/broken_utf8.gml')
lyr = ds.GetLayerByName('test')
feat = lyr.GetNextFeature()
if feat.GetField('test') != '?bad':
gdaltest.post_reason('Unexpected content.')
print(feat.GetField('test'))
return 'fail'
feat.Destroy()
ds.Destroy()
os.remove('tmp/broken_utf8.gml')
os.remove('tmp/broken_utf8.xsd')
return 'success'
###############################################################################
# Test writing different data types in a GML file (ticket #2857)
# TODO: Add test for other data types as they are added to the driver.
def ogr_gml_10():
if not gdaltest.have_gml_reader:
return 'skip'
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('tmp/fields.gml')
lyr = ds.CreateLayer('test')
field_defn = ogr.FieldDefn('string', ogr.OFTString)
field_defn.SetWidth(100)
lyr.CreateField(field_defn)
lyr.CreateField(ogr.FieldDefn('date', ogr.OFTDate))
field_defn = ogr.FieldDefn('real', ogr.OFTReal)
field_defn.SetWidth(4)
field_defn.SetPrecision(2)
lyr.CreateField(field_defn)
lyr.CreateField(ogr.FieldDefn('float', ogr.OFTReal))
field_defn = ogr.FieldDefn('integer', ogr.OFTInteger)
field_defn.SetWidth(5)
lyr.CreateField(field_defn)
dst_feat = ogr.Feature( lyr.GetLayerDefn() )
dst_feat.SetField('string', 'test string of length 24')
dst_feat.SetField('date', '2003/04/22')
dst_feat.SetField('real', 12.34)
dst_feat.SetField('float', 1234.5678)
dst_feat.SetField('integer', '1234')
ret = lyr.CreateFeature( dst_feat )
if ret != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat.Destroy()
ds.Destroy()
ds = ogr.Open('tmp/fields.gml')
lyr = ds.GetLayerByName('test')
feat = lyr.GetNextFeature()
if feat.GetFieldDefnRef(feat.GetFieldIndex('string')).GetType() != ogr.OFTString:
gdaltest.post_reason('String type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('string')).GetType()))
return 'fail'
if feat.GetFieldDefnRef(feat.GetFieldIndex('date')).GetType() != ogr.OFTString:
gdaltest.post_reason('Date type is not reported as OFTString. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('date')).GetType()))
return 'fail'
if feat.GetFieldDefnRef(feat.GetFieldIndex('real')).GetType() != ogr.OFTReal:
gdaltest.post_reason('Real type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('real')).GetType()))
return 'fail'
if feat.GetFieldDefnRef(feat.GetFieldIndex('float')).GetType() != ogr.OFTReal:
gdaltest.post_reason('Float type is not reported as OFTReal. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('float')).GetType()))
return 'fail'
if feat.GetFieldDefnRef(feat.GetFieldIndex('integer')).GetType() != ogr.OFTInteger:
gdaltest.post_reason('Integer type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('integer')).GetType()))
return 'fail'
if feat.GetField('string') != 'test string of length 24':
gdaltest.post_reason('Unexpected string content.' + feat.GetField('string') )
return 'fail'
if feat.GetField('date') != '2003/04/22':
gdaltest.post_reason('Unexpected string content.' + feat.GetField('date') )
return 'fail'
if feat.GetFieldAsDouble('real') != 12.34:
gdaltest.post_reason('Unexpected real content.')
return 'fail'
if feat.GetField('float') != 1234.5678:
gdaltest.post_reason('Unexpected float content.')
return 'fail'
if feat.GetField('integer') != 1234:
gdaltest.post_reason('Unexpected integer content.')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('string')).GetWidth() != 100:
gdaltest.post_reason('Unexpected width of string field.')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('real')).GetWidth() != 4:
gdaltest.post_reason('Unexpected width of real field.')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('real')).GetPrecision() != 2:
gdaltest.post_reason('Unexpected precision of real field.')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('integer')).GetWidth() != 5:
gdaltest.post_reason('Unexpected width of integer field.')
return 'fail'
feat.Destroy();
ds.Destroy()
os.remove('tmp/fields.gml')
os.remove('tmp/fields.xsd')
return 'success'
###############################################################################
# Test reading a geometry element specified with <GeometryElementPath>
def ogr_gml_11():
if not gdaltest.have_gml_reader:
return 'skip'
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/testgeometryelementpath.gml').st_mtime
gfs_mtime = os.stat('data/testgeometryelementpath.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/testgeometryelementpath.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/testgeometryelementpath.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeometryColumn() != 'location1container|location1':
gdaltest.post_reason('did not get expected geometry column name')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('attrib1') != 'attrib1_value':
gdaltest.post_reason('did not get expected value for attrib1')
return 'fail'
if feat.GetField('attrib2') != 'attrib2_value':
gdaltest.post_reason('did not get expected value for attrib2')
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (3 50)':
gdaltest.post_reason('did not get expected geometry')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a virtual GML file
def ogr_gml_12():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('/vsizip/data/testgeometryelementpath.zip/testgeometryelementpath.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeometryColumn() != 'location1container|location1':
gdaltest.post_reason('did not get expected geometry column name')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('attrib1') != 'attrib1_value':
gdaltest.post_reason('did not get expected value for attrib1')
return 'fail'
if feat.GetField('attrib2') != 'attrib2_value':
gdaltest.post_reason('did not get expected value for attrib2')
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (3 50)':
gdaltest.post_reason('did not get expected geometry')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading GML with StringList, IntegerList and RealList fields
def ogr_gml_13():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/testlistfields.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsStringList(feat.GetFieldIndex('attrib1')) != ['value1','value2']:
gdaltest.post_reason('did not get expected value for attrib1')
return 'fail'
if feat.GetField(feat.GetFieldIndex('attrib2')) != 'value3':
gdaltest.post_reason('did not get expected value for attrib2')
return 'fail'
if feat.GetFieldAsIntegerList(feat.GetFieldIndex('attrib3')) != [4,5]:
gdaltest.post_reason('did not get expected value for attrib3')
return 'fail'
if feat.GetFieldAsDoubleList(feat.GetFieldIndex('attrib4')) != [6.1,7.1]:
gdaltest.post_reason('did not get expected value for attrib4')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test xlink resolution
def ogr_gml_14():
if not gdaltest.have_gml_reader:
return 'skip'
# We need CURL for xlink resolution, and a sign that Curl is available
# is the availability of the WMS driver
try:
gdaltest.wms_drv = gdal.GetDriverByName( 'WMS' )
except:
gdaltest.wms_drv = None
if gdaltest.wms_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://download.osgeo.org/gdal/data/gml/xlink3.gml') is None:
print('cannot open URL')
return 'skip'
files = [ 'xlink1.gml', 'xlink2.gml', 'expected1.gml', 'expected2.gml' ]
for file in files:
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/gml/' + file, file ):
return 'skip'
gdal.SetConfigOption( 'GML_SKIP_RESOLVE_ELEMS', 'NONE' )
gdal.SetConfigOption( 'GML_SAVE_RESOLVED_TO', 'tmp/cache/xlink1resolved.gml' )
gml_ds = ogr.Open( 'tmp/cache/xlink1.gml' )
gml_ds = None
gdal.SetConfigOption( 'GML_SKIP_RESOLVE_ELEMS', 'gml:directedNode' )
gdal.SetConfigOption( 'GML_SAVE_RESOLVED_TO', 'tmp/cache/xlink2resolved.gml' )
gml_ds = ogr.Open( 'tmp/cache/xlink1.gml' )
del gml_ds
gdal.SetConfigOption( 'GML_SKIP_RESOLVE_ELEMS', None )
gdal.SetConfigOption( 'GML_SAVE_RESOLVED_TO', None )
try:
fp = open( 'tmp/cache/xlink1resolved.gml', 'r' )
text = fp.read()
fp.close()
os.remove( 'tmp/cache/xlink1resolved.gml' )
fp = open( 'tmp/cache/expected1.gml', 'r' )
expectedtext = fp.read()
fp.close()
except:
return 'fail'
if text != expectedtext:
print('Problem with file 1')
return 'fail'
try:
fp = open( 'tmp/cache/xlink2resolved.gml', 'r' )
text = fp.read()
fp.close()
os.remove( 'tmp/cache/xlink2resolved.gml' )
fp = open( 'tmp/cache/expected2.gml', 'r' )
expectedtext = fp.read()
fp.close()
except:
return 'fail'
if text != expectedtext:
print('Problem with file 2')
return 'fail'
return 'success'
###############################################################################
# Run test_ogrsf
def ogr_gml_15():
if not gdaltest.have_gml_reader:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_point.gml')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Read CityGML generic attributes
def ogr_gml_16():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/citygml.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('Name_') != 'aname' or \
feat.GetField('a_int_attr') != 2 or \
feat.GetField('a_double_attr') != 3.45:
feat.DumpReadable()
gdaltest.post_reason('did not get expected values')
return 'fail'
return 'success'
###############################################################################
# Read layer SRS for WFS 1.0.0 return
def ogr_gml_17():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/gnis_pop_100.gml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('GEOGCS["WGS 84"') == -1:
gdaltest.post_reason('did not get expected SRS')
print(got_wkt)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POINT (2.09 34.12)':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
return 'success'
###############################################################################
# Read layer SRS for WFS 1.1.0 return
def ogr_gml_18():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/gnis_pop_110.gml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('GEOGCS["WGS 84"') == -1:
gdaltest.post_reason('did not get expected SRS')
print(got_wkt)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POINT (2.09 34.12)':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
return 'success'
###############################################################################
# Read layer SRS for WFS 1.1.0 return, but without trying to restore
# (long, lat) order. So we should get EPSGA:4326 and (lat, long) order
def ogr_gml_19():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'data/gnis_pop_110.gfs' )
except:
pass
gdal.SetConfigOption('GML_INVERT_AXIS_ORDER_IF_LAT_LONG', 'NO')
ds = ogr.Open('data/gnis_pop_110.gml')
gdal.SetConfigOption('GML_INVERT_AXIS_ORDER_IF_LAT_LONG', None)
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('GEOGCS["WGS 84"') == -1 or \
got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1:
gdaltest.post_reason('did not get expected SRS')
print(got_wkt)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POINT (34.12 2.09)':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
return 'success'
###############################################################################
# Test parsing a .xsd where the type definition is before its reference
def ogr_gml_20():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'data/archsites.gfs' )
except:
pass
ds = ogr.Open('data/archsites.gml')
lyr = ds.GetLayer(0)
ldefn = lyr.GetLayerDefn()
try:
ldefn.GetFieldDefn(0).GetFieldTypeName
except:
return 'skip'
idx = ldefn.GetFieldIndex("gml_id")
if idx == -1:
gdaltest.post_reason('did not get expected column "gml_id"')
return 'fail'
idx = ldefn.GetFieldIndex("cat")
fddefn = ldefn.GetFieldDefn(idx)
if fddefn.GetFieldTypeName(fddefn.GetType()) != 'Integer64':
gdaltest.post_reason('did not get expected column type for col "cat"')
return 'fail'
idx = ldefn.GetFieldIndex("str1")
fddefn = ldefn.GetFieldDefn(idx)
if fddefn.GetFieldTypeName(fddefn.GetType()) != 'String':
gdaltest.post_reason('did not get expected column type for col "str1"')
return 'fail'
if lyr.GetGeometryColumn() != 'the_geom':
gdaltest.post_reason('did not get expected geometry column name')
return 'fail'
if ldefn.GetGeomType() != ogr.wkbPoint:
gdaltest.post_reason('did not get expected geometry type')
return 'fail'
ds = None
try:
os.stat('data/archsites.gfs')
gdaltest.post_reason('did not expected .gfs -> XSD parsing failed')
return 'fail'
except:
return 'success'
###############################################################################
# Test writing GML3
def ogr_gml_21(format = 'GML3'):
if not gdaltest.have_gml_reader:
return 'skip'
# Create GML3 file
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
for filename in ['tmp/gml_21.gml', 'tmp/gml_21.xsd', 'tmp/gml_21.gfs']:
try:
os.remove(filename)
except:
pass
ds = ogr.GetDriverByName('GML').CreateDataSource('tmp/gml_21.gml', options = ['FORMAT=' + format] )
lyr = ds.CreateLayer('firstlayer', srs = sr)
lyr.CreateField(ogr.FieldDefn('string_field', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (2 49)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'foo')
geom = ogr.CreateGeometryFromWkt('POINT (3 48)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
# Reopen the file
ds = ogr.Open('tmp/gml_21.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('did not get expected geometry')
return 'fail'
ds = None
# Test that .gml and .xsd are identical to what is expected
f1 = open('tmp/gml_21.gml', 'rt')
if format == 'GML3.2':
f2 = open('data/expected_gml_gml32.gml', 'rt')
else:
f2 = open('data/expected_gml_21.gml', 'rt')
line1 = f1.readline()
line2 = f2.readline()
while line1 != '':
line1 = line1.strip()
line2 = line2.strip()
if line1 != line2:
gdaltest.post_reason('.gml file not identical to expected')
print(open('tmp/gml_21.gml', 'rt').read())
return 'fail'
line1 = f1.readline()
line2 = f2.readline()
f1.close()
f2.close()
f1 = open('tmp/gml_21.xsd', 'rt')
if format == 'GML3':
f2 = open('data/expected_gml_21.xsd', 'rt')
elif format == 'GML3.2':
f2 = open('data/expected_gml_gml32.xsd', 'rt')
else:
f2 = open('data/expected_gml_21_deegree3.xsd', 'rt')
line1 = f1.readline()
line2 = f2.readline()
while line1 != '':
line1 = line1.strip()
line2 = line2.strip()
if line1 != line2:
gdaltest.post_reason('.xsd file not identical to expected')
print(open('tmp/gml_21.xsd', 'rt').read())
return 'fail'
line1 = f1.readline()
line2 = f2.readline()
f1.close()
f2.close()
return 'success'
def ogr_gml_21_deegree3():
return ogr_gml_21('GML3Deegree')
def ogr_gml_21_gml32():
return ogr_gml_21('GML3.2')
###############################################################################
# Read a OpenLS DetermineRouteResponse document
def ogr_gml_22():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/paris_typical_strike_demonstration.xml')
lyr = ds.GetLayerByName('RouteGeometry')
if lyr is None:
gdaltest.post_reason('cannot find RouteGeometry')
return 'fail'
lyr = ds.GetLayerByName('RouteInstruction')
if lyr is None:
gdaltest.post_reason('cannot find RouteInstruction')
return 'fail'
count = lyr.GetFeatureCount()
if count != 9:
gdaltest.post_reason('did not get expected feature count')
print(count)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that use SRS defined in global gml:Envelope if no SRS is set for any
# feature geometry
def ogr_gml_23():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'tmp/global_geometry.gfs' )
except:
pass
shutil.copy('data/global_geometry.xml', 'tmp/global_geometry.xml')
# Here we use only the .xml file
ds = ogr.Open('tmp/global_geometry.xml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('GEOGCS["WGS 84"') == -1 or \
got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') != -1:
gdaltest.post_reason('did not get expected SRS')
print(got_wkt)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POINT (2 49)':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
extent = lyr.GetExtent()
if extent != (2.0, 3.0, 49.0, 50.0):
gdaltest.post_reason('did not get expected layer extent')
print(extent)
return 'fail'
return 'success'
###############################################################################
# Test that use SRS defined in global gml:Envelope if no SRS is set for any
# feature geometry
def ogr_gml_24():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'data/global_geometry.gfs' )
except:
pass
# Here we use only the .xml file and the .xsd file
ds = ogr.Open('data/global_geometry.xml')
lyr = ds.GetLayer(0)
# Because we read the .xsd, we (currently) don't find the SRS
#sr = lyr.GetSpatialRef()
#got_wkt = sr.ExportToWkt()
#if got_wkt.find('GEOGCS["WGS 84"') == -1 or \
# got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') != -1:
# gdaltest.post_reason('did not get expected SRS')
# print(got_wkt)
# return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POINT (2 49)':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
extent = lyr.GetExtent()
if extent != (2.0, 3.0, 49.0, 50.0):
gdaltest.post_reason('did not get expected layer extent')
print(extent)
return 'fail'
return 'success'
###############################################################################
# Test fixes for #3934 and #3935
def ogr_gml_25():
if not gdaltest.have_gml_reader:
return 'skip'
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would crash')
return 'skip'
try:
os.remove( 'data/curveProperty.gfs' )
except:
pass
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', 'YES')
ds = ogr.Open('data/curveProperty.xml')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POLYGON ((14 21,6 21,6 9,14 9,22 9,22 21,14 21))':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
return 'success'
###############################################################################
# Test writing and reading 3D geoms (GML2)
def ogr_gml_26():
if not gdaltest.have_gml_reader:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_26.gml data/poly.shp -zfield eas_id')
f = open('tmp/ogr_gml_26.gml', 'rt')
content = f.read()
f.close()
if content.find("<gml:coord><gml:X>478315.53125</gml:X><gml:Y>4762880.5</gml:Y><gml:Z>158</gml:Z></gml:coord>") == -1:
return 'fail'
ds = ogr.Open('tmp/ogr_gml_26.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPolygon25D:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test writing and reading 3D geoms (GML3)
def ogr_gml_27():
if not gdaltest.have_gml_reader:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_27.gml data/poly.shp -zfield eas_id -dsco FORMAT=GML3')
f = open('tmp/ogr_gml_27.gml', 'rt')
content = f.read()
f.close()
if content.find("<gml:lowerCorner>478315.53125 4762880.5 158</gml:lowerCorner>") == -1:
return 'fail'
ds = ogr.Open('tmp/ogr_gml_27.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPolygon25D:
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test writing and reading layers of type wkbNone (#4154)
def ogr_gml_28():
if not gdaltest.have_gml_reader:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
return 'skip'
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_28.gml data/idlink.dbf')
# Try with .xsd
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('fail')
return 'fail'
ds = None
os.unlink('tmp/ogr_gml_28.xsd')
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('fail')
return 'fail'
ds = None
# Try with .gfs
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading FME GMLs
def ogr_gml_29():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/testfmegml.gml')
expected_results = [ [ ogr.wkbMultiPoint, 'MULTIPOINT (2 49)' ],
[ ogr.wkbMultiPolygon, 'MULTIPOLYGON (((2 49,3 49,3 50,2 50,2 49)))'],
[ ogr.wkbMultiLineString, 'MULTILINESTRING ((2 49,3 50))'],
]
for j in range(len(expected_results)):
lyr = ds.GetLayer(j)
if lyr.GetGeomType() != expected_results[j][0]:
gdaltest.post_reason('layer %d, did not get expected layer geometry type' % j)
return 'fail'
for i in range(2):
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != expected_results[j][1]:
gdaltest.post_reason('layer %d, did not get expected geometry' % j)
print(got_wkt)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a big field and a big geometry
def ogr_gml_30():
if not gdaltest.have_gml_reader:
return 'skip'
field1 = " "
for i in range(11):
field1 = field1 + field1
geom = "0 1 "
for i in range(9):
geom = geom + geom
data = """<FeatureCollection xmlns:gml="http://www.opengis.net/gml">
<gml:featureMember>
<layer1>
<geometry><gml:LineString><gml:posList>%s</gml:posList></gml:LineString></geometry>
<field1>A%sZ</field1>
</layer1>
</gml:featureMember>
</FeatureCollection>""" % (geom, field1)
f = gdal.VSIFOpenL("/vsimem/ogr_gml_30.gml", "wb")
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
ds = ogr.Open("/vsimem/ogr_gml_30.gml")
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
field1 = feat.GetField(0)
geom_wkt = feat.GetGeometryRef().ExportToWkt()
ds = None
gdal.Unlink("/vsimem/ogr_gml_30.gml")
gdal.Unlink("/vsimem/ogr_gml_30.gfs")
if len(field1) != 2050:
gdaltest.post_reason('did not get expected len(field1)')
print(field1)
print(len(field1))
return 'fail'
if len(geom_wkt) != 2060:
gdaltest.post_reason('did not get expected len(geom_wkt)')
print(geom_wkt)
print(len(geom_wkt))
return 'fail'
return 'success'
###############################################################################
# Test SEQUENTIAL_LAYERS
def ogr_gml_31():
if not gdaltest.have_gml_reader:
return 'skip'
gdal.SetConfigOption('GML_READ_MODE', 'SEQUENTIAL_LAYERS')
ret = ogr_gml_29()
gdal.SetConfigOption('GML_READ_MODE', None)
if ret != 'success':
return ret
# Test reading second layer and then first layer
gdal.SetConfigOption('GML_READ_MODE', 'SEQUENTIAL_LAYERS')
ds = ogr.Open('data/testfmegml.gml')
gdal.SetConfigOption('GML_READ_MODE', None)
lyr = ds.GetLayer(1)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
if feat.GetFID() != 1:
gdaltest.post_reason('did not get feature when reading directly second layer')
return 'fail'
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
if feat.GetFID() != 1:
gdaltest.post_reason('did not get feature when reading back first layer')
return 'fail'
return 'success'
###############################################################################
# Test SEQUENTIAL_LAYERS without a .gfs
def ogr_gml_32():
if not gdaltest.have_gml_reader:
return 'skip'
# Test without .xsd or .gfs
f = gdal.VSIFOpenL("data/testfmegml.gml", "rb")
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
f = gdal.VSIFOpenL("/vsimem/ogr_gml_31.gml", "wb")
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_gml_31.gml')
lyr = ds.GetLayer(1)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
if feat.GetFID() != 1:
gdaltest.post_reason('did not get feature when reading directly second layer')
return 'fail'
ds = None
f = gdal.VSIFOpenL("/vsimem/ogr_gml_31.gfs", "rb")
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
data = str(data)
if data.find("<SequentialLayers>true</SequentialLayers>") == -1:
gdaltest.post_reason('did not find <SequentialLayers>true</SequentialLayers> in .gfs')
return 'fail'
gdal.Unlink("/vsimem/ogr_gml_31.gml")
gdal.Unlink("/vsimem/ogr_gml_31.gfs")
return 'success'
###############################################################################
# Test INTERLEAVED_LAYERS
def ogr_gml_33():
if not gdaltest.have_gml_reader:
return 'skip'
# Test reading second layer and then first layer
gdal.SetConfigOption('GML_READ_MODE', 'INTERLEAVED_LAYERS')
ds = ogr.Open('data/testfmegml_interleaved.gml')
gdal.SetConfigOption('GML_READ_MODE', None)
read_sequence = [ [0,1],
[0,None],
[1,3],
[2,5],
[2,None],
[0,2],
[1,4],
[1,None],
[2,6],
[2,None],
[0,None],
[1,None],
[2,None] ]
for i in range(len(read_sequence)):
lyr = ds.GetLayer(read_sequence[i][0])
feat = lyr.GetNextFeature()
if feat is None:
fid = None
else:
fid = feat.GetFID()
expected_fid = read_sequence[i][1]
if fid != expected_fid:
gdaltest.post_reason('failed at step %d' % i)
return 'fail'
return 'success'
###############################################################################
# Test writing non-ASCII UTF-8 content (#4117, #4299)
def ogr_gml_34():
if not gdaltest.have_gml_reader:
return 'skip'
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource( '/vsimem/ogr_gml_34.gml' )
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn("name", ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '\xc4\x80liamanu<&')
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open( '/vsimem/ogr_gml_34.gml' )
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsString('name') != '\xc4\x80liamanu<&':
print(feat.GetFieldAsString('name'))
return 'fail'
ds = None
gdal.Unlink( '/vsimem/ogr_gml_34.gml' )
gdal.Unlink( '/vsimem/ogr_gml_34.gfs' )
return 'success'
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=HUGE (#4380)
def ogr_gml_35():
if not gdaltest.have_gml_reader:
return 'skip'
if ogr.GetDriverByName('SQLite') is None:
return 'skip'
if not ogrtest.have_geos():
return 'skip'
try:
os.remove( 'tmp/GmlTopo-sample.sqlite' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.gfs' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.resolved.gml' )
except:
pass
shutil.copy('data/GmlTopo-sample.xml', 'tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'HUGE')
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
try:
os.stat('tmp/GmlTopo-sample.sqlite')
gdaltest.post_reason('did not expect tmp/GmlTopo-sample.sqlite')
return 'fail'
except:
pass
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('did not expect error')
return 'fail'
if ds.GetLayerCount() != 3:
# We have an extra layer : ResolvedNodes
gdaltest.post_reason('expected 3 layers, got %d' % ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
wkt = 'MULTIPOLYGON (((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6)))'
if ogrtest.check_feature_geometry( feat, wkt):
print(feat.GetGeometryRef())
return 'fail'
ds = None
ds = ogr.Open('tmp/GmlTopo-sample.xml')
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, wkt):
print(feat.GetGeometryRef())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=NONE (and new GMLTopoSurface interpretation)
def ogr_gml_36(GML_FACE_HOLE_NEGATIVE = 'NO'):
if not gdaltest.have_gml_reader:
return 'skip'
if GML_FACE_HOLE_NEGATIVE == 'NO':
if not ogrtest.have_geos():
return 'skip'
try:
os.remove( 'tmp/GmlTopo-sample.gfs' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.resolved.gml' )
except:
pass
shutil.copy('data/GmlTopo-sample.xml', 'tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'NONE')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', GML_FACE_HOLE_NEGATIVE)
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('did not expect error')
return 'fail'
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
if GML_FACE_HOLE_NEGATIVE == 'NO':
wkt = 'MULTIPOLYGON (((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6)))'
else:
wkt = 'POLYGON ((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6),(0.2 0.2,0.2 0.4,0.4 0.4,0.5 0.2,0.5 0.1,0.5 0.0,0.2 0.0,0.2 0.2),(0.6 0.1,0.8 0.1,0.8 -0.1,0.6 -0.1,0.6 0.1))'
if ogrtest.check_feature_geometry( feat, wkt):
print(feat.GetGeometryRef())
return 'fail'
ds = None
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', GML_FACE_HOLE_NEGATIVE)
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, wkt):
print(feat.GetGeometryRef())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=NONE with old GMLTopoSurface interpretation
def ogr_gml_37():
return ogr_gml_36('YES')
###############################################################################
# Test new GMLTopoSurface interpretation (#3934) with HUGE xlink resolver
def ogr_gml_38(resolver = 'HUGE'):
if not gdaltest.have_gml_reader:
return 'skip'
if resolver == 'HUGE':
if ogr.GetDriverByName('SQLite') is None:
return 'skip'
if not ogrtest.have_geos():
return 'skip'
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.sqlite' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.gfs' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.resolved.gml' )
except:
pass
shutil.copy('data/sample_gml_face_hole_negative_no.xml', 'tmp/sample_gml_face_hole_negative_no.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', resolver)
ds = ogr.Open('tmp/sample_gml_face_hole_negative_no.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
if resolver == 'HUGE':
try:
os.stat('tmp/sample_gml_face_hole_negative_no.sqlite')
gdaltest.post_reason('did not expect tmp/sample_gml_face_hole_negative_no.sqlite')
return 'fail'
except:
pass
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('did not expect error')
return 'fail'
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
wkt = 'MULTIPOLYGON (((0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6),(0.6 0.1,0.6 -0.1,0.8 -0.1,0.8 0.1,0.6 0.1),(0.2 0.4,0.2 0.2,0.2 0.0,0.5 0.0,0.5 0.1,0.5 0.2,0.4 0.4,0.2 0.4)))'
if ogrtest.check_feature_geometry( feat, wkt):
print(feat.GetGeometryRef())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test new GMLTopoSurface interpretation (#3934) with standard xlink resolver
def ogr_gml_39():
return ogr_gml_38('NONE')
###############################################################################
# Test parsing XSD where simpleTypes not inlined, but defined elsewhere in the .xsd (#4328)
def ogr_gml_40():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/testLookForSimpleType.xml')
lyr = ds.GetLayer(0)
fld_defn = lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('CITYNAME'))
if fld_defn.GetWidth() != 26:
return 'fail'
return 'success'
###############################################################################
# Test validating against .xsd
def ogr_gml_41():
gdaltest.have_gml_validation = False
if not gdaltest.have_gml_reader:
return 'skip'
if not gdaltest.download_file('http://schemas.opengis.net/SCHEMAS_OPENGIS_NET.zip', 'SCHEMAS_OPENGIS_NET.zip' ):
return 'skip'
ds = ogr.Open('data/expected_gml_21.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', '/vsizip/./tmp/cache/SCHEMAS_OPENGIS_NET.zip')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
if val == 0:
if gdal.GetLastErrorMsg().find('not implemented due to missing libxml2 support') == -1:
return 'fail'
return 'skip'
gdaltest.have_gml_validation = True
return 'success'
###############################################################################
# Test validating against .xsd
def ogr_gml_42():
if not gdaltest.have_gml_validation:
return 'skip'
try:
os.mkdir('tmp/cache/SCHEMAS_OPENGIS_NET')
except:
pass
try:
os.stat('tmp/cache/SCHEMAS_OPENGIS_NET/gml')
except:
gdaltest.unzip( 'tmp/cache/SCHEMAS_OPENGIS_NET', 'tmp/cache/SCHEMAS_OPENGIS_NET.zip')
ds = ogr.Open('data/expected_gml_gml32.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
if val == 0:
return 'fail'
return 'success'
###############################################################################
# Test automated downloading of WFS schema
def ogr_gml_43():
# The service times out
return 'skip'
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/wfs_typefeature.gml')
if ds is None:
return 'fail'
ds = None
try:
os.stat('data/wfs_typefeature.gfs')
gfs_found = True
except:
gfs_found = False
pass
if gfs_found:
if gdaltest.gdalurlopen('http://testing.deegree.org:80/deegree-wfs/services?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=app:Springs&NAMESPACE=xmlns(app=http://www.deegree.org/app)') is None:
can_download_schema = False
else:
can_download_schema = gdal.GetDriverByName('HTTP') is not None
if can_download_schema:
gdaltest.post_reason('.gfs found, but schema could be downloaded')
return 'fail'
return 'success'
###############################################################################
# Test providing a custom XSD filename
def ogr_gml_44():
if not gdaltest.have_gml_reader:
return 'skip'
xsd_content = """<?xml version="1.0" encoding="UTF-8"?>
<xs:schema targetNamespace="http://ogr.maptools.org/" xmlns:ogr="http://ogr.maptools.org/" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:gml="http://www.opengis.net/gml" elementFormDefault="qualified" version="1.0">
<xs:import namespace="http://www.opengis.net/gml" schemaLocation="http://schemas.opengeospatial.net/gml/2.1.2/feature.xsd"/><xs:element name="FeatureCollection" type="ogr:FeatureCollectionType" substitutionGroup="gml:_FeatureCollection"/>
<xs:complexType name="FeatureCollectionType">
<xs:complexContent>
<xs:extension base="gml:AbstractFeatureCollectionType">
<xs:attribute name="lockId" type="xs:string" use="optional"/>
<xs:attribute name="scope" type="xs:string" use="optional"/>
</xs:extension>
</xs:complexContent>
</xs:complexType>
<xs:element name="test_point" type="ogr:test_point_Type" substitutionGroup="gml:_Feature"/>
<xs:complexType name="test_point_Type">
<xs:complexContent>
<xs:extension base="gml:AbstractFeatureType">
<xs:sequence>
<xs:element name="geometryProperty" type="gml:GeometryPropertyType" nillable="true" minOccurs="1" maxOccurs="1"/>
<xs:element name="dbl" nillable="true" minOccurs="0" maxOccurs="1">
<xs:simpleType>
<xs:restriction base="xs:decimal">
<xs:totalDigits value="32"/>
<xs:fractionDigits value="3"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
</xs:sequence>
</xs:extension>
</xs:complexContent>
</xs:complexType>
</xs:schema>"""
gdal.FileFromMemBuffer('/vsimem/ogr_gml_44.xsd', xsd_content)
ds = ogr.Open('data/test_point.gml,xsd=/vsimem/ogr_gml_44.xsd')
lyr = ds.GetLayer(0)
# fid and dbl
if lyr.GetLayerDefn().GetFieldCount() != 2:
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gml_44.xsd')
return 'success'
###############################################################################
# Test PREFIX and TARGET_NAMESPACE creation options
def ogr_gml_45():
if not gdaltest.have_gml_reader:
return 'skip'
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_45.gml', options = ['PREFIX=foo', 'TARGET_NAMESPACE=http://bar/'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('dbl', ogr.OFTReal))
dst_feat = ogr.Feature( lyr.GetLayerDefn() )
dst_feat.SetField('str', 'str')
dst_feat.SetField('int', 1)
dst_feat.SetField('dbl', 2.34)
lyr.CreateFeature( dst_feat )
dst_feat = None
ds = None
if not gdaltest.have_gml_validation:
gdal.Unlink('/vsimem/ogr_gml_45.gml')
gdal.Unlink('/vsimem/ogr_gml_45.xsd')
return 'skip'
# Validate document
ds = ogr.Open('/vsimem/ogr_gml_45.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
gdal.Unlink('/vsimem/ogr_gml_45.gml')
gdal.Unlink('/vsimem/ogr_gml_45.xsd')
if val == 0:
return 'fail'
return 'success'
###############################################################################
# Validate different kinds of GML files
def ogr_gml_46():
if not gdaltest.have_gml_validation:
return 'skip'
wkt_list = [ '',
'POINT (0 1)',
# 'POINT (0 1 2)',
'LINESTRING (0 1,2 3)',
# 'LINESTRING (0 1 2,3 4 5)',
'POLYGON ((0 0,0 1,1 1,1 0,0 0))',
# 'POLYGON ((0 0 10,0 1 10,1 1 10,1 0 10,0 0 10))',
'MULTIPOINT (0 1)',
# 'MULTIPOINT (0 1 2)',
'MULTILINESTRING ((0 1,2 3))',
# 'MULTILINESTRING ((0 1 2,3 4 5))',
'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))',
# 'MULTIPOLYGON (((0 0 10,0 1 10,1 1 10,1 0 10,0 0 10)))',
'GEOMETRYCOLLECTION (POINT (0 1))',
# 'GEOMETRYCOLLECTION (POINT (0 1 2))'
]
format_list = [ 'GML2', 'GML3', 'GML3Deegree', 'GML3.2' ]
for wkt in wkt_list:
for format in format_list:
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_46.gml', options = ['FORMAT=%s' % format])
if wkt != '':
geom = ogr.CreateGeometryFromWkt(wkt)
geom_type = geom.GetGeometryType()
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
else:
geom = None
geom_type = ogr.wkbNone
srs = None
lyr = ds.CreateLayer('test', geom_type = geom_type, srs = srs)
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('dbl', ogr.OFTReal))
dst_feat = ogr.Feature( lyr.GetLayerDefn() )
dst_feat.SetField('str', 'str')
dst_feat.SetField('int', 1)
dst_feat.SetField('dbl', 2.34)
dst_feat.SetGeometry(geom)
lyr.CreateFeature( dst_feat )
dst_feat = None
ds = None
# Validate document
ds = ogr.Open('/vsimem/ogr_gml_46.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
got_geom = feat.GetGeometryRef()
if got_geom is None:
got_geom_wkt = ''
else:
got_geom_wkt = got_geom.ExportToWkt()
if got_geom_wkt != wkt:
gdaltest.post_reason('geometry do not match')
print('got %s, expected %s' % (got_geom_wkt, wkt))
feat = None
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
if val == 0:
gdaltest.post_reason('validation failed for format=%s, wkt=%s' % (format, wkt))
f = gdal.VSIFOpenL('/vsimem/ogr_gml_46.gml', 'rb')
content = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(content)
f = gdal.VSIFOpenL('/vsimem/ogr_gml_46.xsd', 'rb')
content = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(content)
gdal.Unlink('/vsimem/ogr_gml_46.gml')
gdal.Unlink('/vsimem/ogr_gml_46.xsd')
if val == 0:
return 'fail'
# Only minor schema changes
if format == 'GML3Deegree':
break
return 'success'
###############################################################################
# Test validation of WFS GML documents
def ogr_gml_47():
if not gdaltest.have_gml_validation:
return 'skip'
filenames = [ 'data/wfs10.xml', 'data/wfs11.xml', 'data/wfs20.xml' ]
for filename in filenames:
# Validate document
ds = ogr.Open(filename)
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
if val == 0:
gdaltest.post_reason('validation failed for file=%s' % filename)
return 'fail'
return 'success'
###############################################################################
# Test that we can parse some particular .xsd files that have the geometry
# field declared as :
# <xsd:element name="geometry" minOccurs="0" maxOccurs="1">
# <xsd:complexType>
# <xsd:sequence>
# <xsd:element ref="gml:_Geometry"/>
# </xsd:sequence>
# </xsd:complexType>
# </xsd:element>
def ogr_gml_48():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/schema_with_geom_in_complextype.xml')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbUnknown:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTString:
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test a pseudo Inspire GML file
def ogr_gml_49():
if not gdaltest.have_gml_reader:
return 'skip'
xsd_content = """<ogr:FeatureCollection xmlns:gml="http://www.opengis.net/gml" xmlns:ogr="http://ogr.maptools.org/">
<gml:featureMember>
<ogr:test>
<ogr:geometry><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>2,49 2,50 3,50 3,49 2,49</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon></ogr:geometry>
<ogr:otherGeometry><gml:Point><gml:pos>-2 -49</gml:pos></gml:Point></ogr:otherGeometry>
</ogr:test>
</gml:featureMember>
</ogr:FeatureCollection>
"""
gdal.FileFromMemBuffer('/vsimem/ogr_gml_49.gml', xsd_content)
ds = ogr.Open('/vsimem/ogr_gml_49.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().GetGeometryType() != ogr.wkbPolygon:
gdaltest.post_reason('failure')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gml_49.gml')
gdal.Unlink('/vsimem/ogr_gml_49.gfs')
return 'success'
###############################################################################
# Test support for StringList, IntegerList, RealList
def ogr_gml_50():
if not gdaltest.have_gml_reader:
return 'skip'
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_50.gml')
lyr = ds.CreateLayer( 'listlayer' )
field_defn = ogr.FieldDefn( 'stringlist', ogr.OFTStringList )
lyr.CreateField( field_defn )
field_defn = ogr.FieldDefn( 'intlist', ogr.OFTIntegerList )
lyr.CreateField( field_defn )
field_defn = ogr.FieldDefn( 'reallist', ogr.OFTRealList )
lyr.CreateField( field_defn )
feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
feat.SetFieldStringList(0, ['a', 'b'])
feat.SetFieldIntegerList(1, [2, 3])
feat.SetFieldDoubleList(2, [4.56, 5.67])
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_gml_50.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsStringList(lyr.GetLayerDefn().GetFieldIndex('stringlist')) != ['a', 'b']:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
if feat.GetFieldAsIntegerList(lyr.GetLayerDefn().GetFieldIndex('intlist')) != [2, 3]:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
if feat.GetFieldAsDoubleList(lyr.GetLayerDefn().GetFieldIndex('reallist')) != [4.56, 5.67]:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gml_50.gml')
gdal.Unlink('/vsimem/ogr_gml_50.xsd')
return 'success'
###############################################################################
# Test -dsco WRITE_FEATURE_BOUNDED_BY=no -dsco STRIP_PREFIX=YES
def ogr_gml_51():
if not gdaltest.have_gml_reader:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
return 'skip'
for format in ['GML2', 'GML3']:
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_51.gml data/poly.shp -dsco FORMAT=%s -dsco WRITE_FEATURE_BOUNDED_BY=no -dsco STRIP_PREFIX=YES' % format)
f = open('tmp/ogr_gml_51.gml', 'rt')
content = f.read()
f.close()
if content.find("<FeatureCollection") == -1:
gdaltest.post_reason('fail')
print(content)
return 'fail'
if format == 'GML3':
if content.find("<featureMember>") == -1:
gdaltest.post_reason('fail')
print(content)
return 'fail'
if content.find("""<poly""") == -1:
gdaltest.post_reason('fail')
print(content)
return 'fail'
if content.find("""<AREA>215229.266</AREA>""") == -1:
gdaltest.post_reason('fail')
print(content)
return 'fail'
if content.find("""<gml:boundedBy><gml:Envelope><gml:lowerCorner>479647""") != -1:
gdaltest.post_reason('fail')
print(content)
return 'fail'
ds = ogr.Open('tmp/ogr_gml_51.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading MTKGML files
def ogr_gml_52():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'data/fake_mtkgml.gfs' )
except:
pass
for i in range(2):
ds = ogr.Open('data/fake_mtkgml.xml')
lyr = ds.GetLayerByName('A')
if lyr.GetGeomType() != ogr.wkbPoint25D:
gdaltest.post_reason('fail')
return 'fail'
srs = lyr.GetSpatialRef()
if srs is None:
gdaltest.post_reason('fail')
return 'fail'
wkt = srs.ExportToWkt()
if wkt.find('3067') < 0:
gdaltest.post_reason('fail')
print(wkt)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('gid') != '1' or \
feat.GetField('regular_attribute') != 5 or \
feat.GetField('foo_href') != 'some_ref' or \
feat.GetField('teksti') != 'En francais !' or \
feat.GetField('teksti_kieli') != 'fr' or \
ogrtest.check_feature_geometry( feat, 'POINT (280000 7000000 0)') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('B')
if lyr.GetGeomType() != ogr.wkbPolygon25D:
gdaltest.post_reason('fail')
return 'fail'
srs = lyr.GetSpatialRef()
if srs is None:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POLYGON ((280000 7000000 0,281000 7000000 0,281000 7001000 0,280000 7001000 0,280000 7000000 0))') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('C')
if lyr.GetGeomType() != ogr.wkbLineString25D:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING (280000 7000000 0,281000 7000000 0,281000 7001000 0,280000 7001000 0,280000 7000000 0)') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
os.remove( 'data/fake_mtkgml.gfs' )
return 'success'
###############################################################################
# Test that we don't recognize .xsd files themselves
def ogr_gml_53():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/archsites.xsd')
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that we can open an empty GML datasource (#249, #5205)
def ogr_gml_54():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.unlink('data/empty.gfs')
except:
pass
ds = ogr.Open('data/empty.gml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
# with .gfs now
ds = ogr.Open('data/empty.gml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
try:
os.unlink('data/empty.gfs')
except:
pass
return 'success'
###############################################################################
# Test support for <xs:include> in schemas
# Necessary for Finnish NLS data
def ogr_gml_55():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/ogr_gml_55.gml')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTString:
return 'fail'
ds = None
try:
os.unlink('data/ogr_gml_55.gfs')
return 'fail'
except:
pass
return 'success'
###############################################################################
# Test support for gml:FeaturePropertyType and multiple geometry field
# Necessary for Finnish NLS data
def ogr_gml_56():
if not gdaltest.have_gml_reader:
return 'skip'
gdal.SetConfigOption('GML_REGISTRY', 'data/ogr_gml_56_registry.xml')
ds = ogr.Open('data/ogr_gml_56.gml')
gdal.SetConfigOption('GML_REGISTRY', None)
lyr = ds.GetLayerByName('mainFeature')
if lyr.GetSpatialRef() is None:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetFieldAsString(feat.GetFieldIndex('subFeatureProperty_href')) != '#subFeature.0':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetFieldAsStringList(feat.GetFieldIndex('subFeatureRepeatedProperty_href')) != ['#subFeatureRepeated.0','#subFeatureRepeated.1']:
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'POLYGON ((0 0,0 1,1 1,1 0,0 0))':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1).ExportToWkt() != 'POINT (10 10)':
gdaltest.post_reason('fail')
return 'failure'
lyr = ds.GetLayerByName('subFeature')
if lyr.GetLayerDefn().GetGeomFieldCount() != 0:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetFieldAsStringList(feat.GetFieldIndex('subFeatureRepeatedProperty_href')) != ['#subFeatureRepeated.2']:
gdaltest.post_reason('fail')
return 'failure'
if feat.GetField('foo') != 'bar':
gdaltest.post_reason('fail')
return 'failure'
lyr = ds.GetLayerByName('subFeatureRepeated')
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'subFeatureRepeated.2':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetField('bar') != 'baz':
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'subFeatureRepeated.0':
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'subFeatureRepeated.1':
gdaltest.post_reason('fail')
return 'failure'
ds = None
try:
os.unlink('data/ogr_gml_56.gfs')
return 'fail'
except:
pass
return 'success'
###############################################################################
# Test write support for multiple geometry field
def ogr_gml_57():
if not gdaltest.have_gml_reader:
return 'skip'
for i in range(4):
options = []
if i == 3:
options = [ 'FORMAT=GML3.2' ]
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_57.gml', options = options)
if ds.TestCapability(ogr.ODsCCreateGeomFieldAfterCreateLayer) != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.CreateLayer('myLayer', geom_type = ogr.wkbNone)
if lyr.TestCapability(ogr.OLCCreateGeomField) != 1:
gdaltest.post_reason('fail')
return 'fail'
geomfielddefn = ogr.GeomFieldDefn('first_geometry', ogr.wkbPoint)
if i == 1 or i == 2:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32630)
geomfielddefn.SetSpatialRef(sr)
lyr.CreateGeomField(geomfielddefn)
geomfielddefn = ogr.GeomFieldDefn('second_geometry', ogr.wkbLineString)
if i == 1:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32630)
geomfielddefn.SetSpatialRef(sr)
elif i == 2:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
geomfielddefn.SetSpatialRef(sr)
lyr.CreateGeomField(geomfielddefn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeomFieldDirectly(0, ogr.CreateGeometryFromWkt('POINT (0 1)'))
feat.SetGeomFieldDirectly(1, ogr.CreateGeometryFromWkt('LINESTRING (2 3,4 5)'))
lyr.CreateFeature(feat)
feat = None
ds = None
if False:
f = gdal.VSIFOpenL('/vsimem/ogr_gml_57.gml', 'rb')
print(gdal.VSIFReadL(1, 1000, f))
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_gml_57.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if i == 1 and feat.GetGeomFieldRef(0).GetSpatialReference().ExportToWkt().find('32630') < 0:
gdaltest.post_reason('fail')
return 'failure'
if i == 1 and feat.GetGeomFieldRef(1).GetSpatialReference().ExportToWkt().find('32630') < 0:
gdaltest.post_reason('fail')
return 'failure'
if i == 2 and feat.GetGeomFieldRef(1).GetSpatialReference().ExportToWkt().find('32631') < 0:
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'POINT (0 1)':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1).ExportToWkt() != 'LINESTRING (2 3,4 5)':
gdaltest.post_reason('fail')
return 'failure'
ds = None
gdal.Unlink('/vsimem/ogr_gml_57.gml')
gdal.Unlink('/vsimem/ogr_gml_57.xsd')
return 'success'
###############################################################################
# Test support for Inspire Cadastral schemas
def ogr_gml_58():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/inspire_cadastralparcel.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetGeomFieldCount() != 2:
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetName() != 'geometry':
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetType() != ogr.wkbMultiPolygon:
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(1).GetName() != 'referencePoint':
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(1).GetType() != ogr.wkbPoint:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralParcel-01'),
('areaValue', 10.0),
('areaValue_uom', 'm2'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('inspireId_localId', 'CadastralParcel-01-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('nationalCadastralReference', 'nationalCadastralReference'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('basicPropertyUnit_href', ['#BPU.1','#BPU.2'] ),
('administrativeUnit_href', '#AU.1'),
('zoning_href', '#CZ.1') ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2.5 49.5)':
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralParcel-02'),
('areaValue', None),
('areaValue_uom', None),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('inspireId_localId', 'CadastralParcel-02-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('nationalCadastralReference', 'nationalCadastralReference'),
('validFrom', None),
('validTo', None),
('basicPropertyUnit_href', None ),
('administrativeUnit_href', None),
('zoning_href', None) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1) is not None:
gdaltest.post_reason('fail')
return 'failure'
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_basicpropertyunit.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetGeomFieldCount() != 0:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'BasicPropertyUnit-01'),
('inspireId_localId', 'BasicPropertyUnit-01-localId'),
('inspireId_namespace', 'namespace'),
('nationalCadastralReference', 'nationalCadastralReference'),
('areaValue', 10.0),
('areaValue_uom', 'm2'),
('validFrom', '2000-01-01T00:00:00.0Z'),
('validTo', '2001-01-01T00:00:00.0Z'),
('beginLifespanVersion', '2002-01-01T00:00:00.0Z'),
('endLifespanVersion', '2003-01-01T00:00:00.0Z'),
('administrativeUnit_href', '#AU.1') ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'BasicPropertyUnit-02'),
('inspireId_localId', 'BasicPropertyUnit-02-localId'),
('inspireId_namespace', 'namespace'),
('nationalCadastralReference', 'nationalCadastralReference'),
('areaValue', None),
('areaValue_uom', None),
('validFrom', '2000-01-01T00:00:00.0Z'),
('validTo', None),
('beginLifespanVersion', '2002-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('administrativeUnit_href', None) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_cadastralboundary.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetGeomFieldCount() != 1:
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetName() != 'geometry':
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetType() != ogr.wkbLineString:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralBoundary-01'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('estimatedAccuracy', 1.0),
('estimatedAccuracy_uom', 'm'),
('inspireId_localId', 'CadastralBoundary-01-localId'),
('inspireId_namespace', 'namespace'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('parcel_href', ['#Parcel.1','#Parcel.2'] ) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'LINESTRING (2 49,3 50)':
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralBoundary-02'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('estimatedAccuracy', None),
('estimatedAccuracy_uom', None),
('inspireId_localId', 'CadastralBoundary-02-localId'),
('inspireId_namespace', 'namespace'),
('validFrom', None),
('validTo', None),
('parcel_href', None ) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'LINESTRING (2 49,3 50)':
gdaltest.post_reason('fail')
return 'failure'
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_cadastralzoning.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetGeomFieldCount() != 2:
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetName() != 'geometry':
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(0).GetType() != ogr.wkbMultiPolygon:
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(1).GetName() != 'referencePoint':
gdaltest.post_reason('fail')
return 'failure'
if lyr_defn.GetGeomFieldDefn(1).GetType() != ogr.wkbPoint:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralZoning-01'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('estimatedAccuracy', 1.0),
('estimatedAccuracy_uom', 'm'),
('inspireId_localId', 'CadastralZoning-01-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('level', '3'),
('levelName', ['English', 'Francais', 'Deutsch']),
('levelName_locale', ['en', 'fr', 'de']),
('name_language', ['language']),
('name_nativeness', ['nativeness']),
('name_nameStatus', ['nameStatus']),
('name_pronunciation', None),
('name_spelling_text', ['text']),
('name_spelling_script', ['script']),
('nationalCadastalZoningReference', 'nationalCadastalZoningReference'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('upperLevelUnit_href', '#ulu.1') ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2.5 49.5)':
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
expected = [ ('gml_id', 'CadastralZoning-02'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('estimatedAccuracy', None),
('estimatedAccuracy_uom', None),
('inspireId_localId', None),
('inspireId_namespace', None),
('label', 'label'),
('level', '3'),
('levelName', ['English']),
('levelName_locale', ['en']),
('name_language', None),
('name_nativeness', None),
('name_nameStatus', None),
('name_pronunciation', None),
('name_spelling_text', None),
('name_spelling_script', None),
('nationalCadastalZoningReference', 'nationalCadastalZoningReference'),
('validFrom', None),
('validTo', None),
('upperLevelUnit_href', None) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(0).ExportToWkt() != 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))':
gdaltest.post_reason('fail')
return 'failure'
if feat.GetGeomFieldRef(1) is not None:
gdaltest.post_reason('fail')
return 'failure'
feat = None
lyr = None
ds = None
return 'success'
###############################################################################
# Test GFS conditions
def ogr_gml_59():
if not gdaltest.have_gml_reader:
return 'skip'
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/testcondition.gml').st_mtime
gfs_mtime = os.stat('data/testcondition.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/testcondition.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/testcondition.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
expected = [ ('name_en', 'English name'),
('name_fr', 'Nom francais'),
('name_others_lang', ['de']),
('name_others', ['Deutsche name']) ]
for (key,val) in expected:
if feat.GetField(key) != val:
print(key)
print(val)
print(feat.GetField(key))
gdaltest.post_reason('fail')
return 'failure'
feat = None
lyr = None
ds = None
return 'success'
########################################################
# Test reading WFS 2.0 GetFeature documents with wfs:FeatureCollection
# as a wfs:member of the top wfs:FeatureCollection
def ogr_gml_60():
if not gdaltest.have_gml_reader:
return 'skip'
# Make sure the .gfs file is more recent that the .gml one
try:
os.unlink('data/wfs_200_multiplelayers.gfs')
except:
pass
for i in range(2):
ds = ogr.Open('data/wfs_200_multiplelayers.gml')
lyr = ds.GetLayerByName('road')
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'road.21':
gdaltest.post_reason('fail')
return 'failure'
lyr = ds.GetLayerByName('popplace')
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'failure'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'popplace.BACMK':
gdaltest.post_reason('fail')
return 'failure'
ds = None
try:
os.unlink('data/wfs_200_multiplelayers.gfs')
except:
pass
return 'success'
###############################################################################
# Test reading a element specified with a full path in <ElementPath>
def ogr_gml_61():
if not gdaltest.have_gml_reader:
return 'skip'
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/gmlsubfeature.gml').st_mtime
gfs_mtime = os.stat('data/gmlsubfeature.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/gmlsubfeature.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/gmlsubfeature.gml')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('did not get expected geometry column name')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'Object.1' or feat.GetField('foo') != 'bar':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POLYGON ((2 48,2 49,3 49,3 48,2 48))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'Object.2' or feat.GetField('foo') != 'baz':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POLYGON ((2 -48,2 -49,3 -49,3 -48,2 -48))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
return 'success'
###############################################################################
# Test GML_ATTRIBUTES_TO_OGR_FIELDS option
def ogr_gml_62():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.unlink('tmp/gmlattributes.gfs')
except:
pass
shutil.copy('data/gmlattributes.gml', 'tmp/gmlattributes.gml')
# Default behaviour
ds = ogr.Open('tmp/gmlattributes.gml')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
ds = None
# Test GML_ATTRIBUTES_TO_OGR_FIELDS=YES
try:
os.unlink('tmp/gmlattributes.gfs')
except:
pass
# Without and then with .gfs
for i in range(2):
if i == 0:
gdal.SetConfigOption('GML_ATTRIBUTES_TO_OGR_FIELDS', 'YES')
ds = ogr.Open('tmp/gmlattributes.gml')
if i == 0:
gdal.SetConfigOption('GML_ATTRIBUTES_TO_OGR_FIELDS', None)
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldCount() != 4:
gdaltest.post_reason('fail')
print(i)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('element_attr1') != '1' or \
feat.GetField('element2_attr1') != 'a' or \
feat.GetField('element2') != 'foo' or \
feat.IsFieldSet('element3_attr1') :
gdaltest.post_reason('fail')
feat.DumpReadable()
print(i)
return 'fail'
feat = lyr.GetNextFeature()
if feat.IsFieldSet('element_attr1') or \
feat.IsFieldSet('element2_attr1') or \
feat.IsFieldSet('element2') or \
feat.GetField('element3_attr1') != 1:
gdaltest.post_reason('fail')
feat.DumpReadable()
print(i)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('element_attr1') != 'a' or \
feat.IsFieldSet('element2_attr1') or \
feat.IsFieldSet('element2') or \
feat.IsFieldSet('element3_attr1') :
gdaltest.post_reason('fail')
feat.DumpReadable()
print(i)
return 'fail'
feat = None
ds = None
return 'success'
###############################################################################
# Test reading RUIAN VFR files
def ogr_gml_63():
if not gdaltest.have_gml_reader:
return 'skip'
### test ST file type
ds = ogr.Open('data/ruian_st_v1.xml.gz')
# check number of layers
nlayers = ds.GetLayerCount()
if nlayers != 14:
return 'fail'
# check name of first layer
lyr = ds.GetLayer(0)
if lyr.GetName() != 'Staty':
return 'fail'
# check geometry column name
if lyr.GetGeometryColumn() != 'DefinicniBod':
return 'fail'
ds = None
### test OB file type
ds = ogr.Open('data/ruian_ob_v1.xml.gz')
# check number of layers
nlayers = ds.GetLayerCount()
if nlayers != 11:
return 'fail'
# check number of features
nfeatures = 0
for i in range(nlayers):
lyr = ds.GetLayer(i)
nfeatures += lyr.GetFeatureCount()
if nfeatures != 7:
return 'fail'
return 'success'
###############################################################################
# Test multiple instanciation of parser (#5571)
def ogr_gml_64():
if not gdaltest.have_gml_reader:
return 'skip'
for parser in ['XERCES', 'EXPAT']:
for i in range(2):
gdal.SetConfigOption('GML_PARSER', parser)
ds = ogr.Open( 'data/rnf_eg.gml' )
gdal.SetConfigOption('GML_PARSER', None)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat is None:
print(parser)
return 'fail'
return 'success'
###############################################################################
# Test SRSDIMENSION_LOC=GEOMETRY option (#5606)
def ogr_gml_65():
if not gdaltest.have_gml_reader:
return 'skip'
option_expected_list = [ ['SRSDIMENSION_LOC=GEOMETRY', '<ogr:geometryProperty><gml:MultiSurface srsDimension="3"><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList>0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
['SRSDIMENSION_LOC=POSLIST', '<ogr:geometryProperty><gml:MultiSurface><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList srsDimension="3">0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
['SRSDIMENSION_LOC=GEOMETRY,POSLIST', '<ogr:geometryProperty><gml:MultiSurface srsDimension="3"><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList srsDimension="3">0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
]
for (option, expected) in option_expected_list:
filename = '/vsimem/ogr_gml_65.gml'
#filename = 'ogr_gml_65.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename, options = ['FORMAT=GML3',option])
lyr = ds.CreateLayer('lyr')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt("MULTIPOLYGON (((0 1 2,3 4 5,6 7 8,0 1 2)))"))
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL(filename, 'rb')
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
if data.find(expected) < 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != "MULTIPOLYGON (((0 1 2,3 4 5,6 7 8,0 1 2)))":
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3]+"xsd")
return 'success'
###############################################################################
# Test curve geometries
def ogr_gml_66():
if not gdaltest.have_gml_reader:
return 'skip'
filename = '/vsimem/ogr_gml_66.gml'
#filename = 'ogr_gml_66.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename, options = ['FORMAT=GML3'])
lyr = ds.CreateLayer('compoundcurve', geom_type = ogr.wkbCompoundCurve)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CIRCULARSTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('curvepolygon', geom_type = ogr.wkbCurvePolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multisurface', geom_type = ogr.wkbMultiSurface)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTISURFACE (CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multicurve', geom_type = ogr.wkbMultiCurve)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('polygon', geom_type = ogr.wkbPolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('linestring', geom_type = ogr.wkbLineString)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,0 1,1 1,0 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multipolygon', geom_type = ogr.wkbMultiPolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multilinestring', geom_type = ogr.wkbMultiLineString)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('compoundcurve_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('curvepolygon_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multisurface_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTISURFACE (CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multicurve_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Test first with .xsd and then without
for i in range(3):
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('compoundcurve')
if lyr.GetGeomType() != ogr.wkbCompoundCurve:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE ((0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('curvepolygon')
if lyr.GetGeomType() != ogr.wkbCurvePolygon:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multisurface')
if lyr.GetGeomType() != ogr.wkbMultiSurface:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (((0 0,0 1,1 1,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multicurve')
if lyr.GetGeomType() != ogr.wkbMultiCurve:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('polygon')
if lyr.GetGeomType() != ogr.wkbPolygon:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POLYGON ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('linestring')
if lyr.GetGeomType() != ogr.wkbLineString:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (0 0,0 1,1 1,0 0)':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multipolygon')
if lyr.GetGeomType() != ogr.wkbMultiPolygon:
print(lyr.GetGeomType())
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((0 0,0 1,1 1,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multilinestring')
if lyr.GetGeomType() != ogr.wkbMultiLineString:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('compoundcurve_untyped')
if i != 0:
if lyr.GetGeomType() != ogr.wkbCompoundCurve:
print(lyr.GetGeomType())
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE ((0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (0 0,1 1,2 0)':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('curvepolygon_untyped')
if i != 0:
if lyr.GetGeomType() != ogr.wkbCurvePolygon:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POLYGON ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multisurface_untyped')
if i != 0:
if lyr.GetGeomType() != ogr.wkbMultiSurface:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (((0 0,0 1,1 1,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((0 0,0 1,1 1,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0)))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('multicurve_untyped')
if i != 0:
if lyr.GetGeomType() != ogr.wkbMultiCurve:
gdaltest.post_reason('fail')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((0 0,0 1,1 1,0 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE (CIRCULARSTRING (0 0,1 1,2 0))':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
gdal.Unlink(filename[0:-3]+"xsd")
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3]+"gfs")
return 'success'
###############################################################################
# Test boolean, int16, integer64 type
def ogr_gml_67():
if not gdaltest.have_gml_reader:
return 'skip'
filename = '/vsimem/ogr_gml_67.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
fld_defn = ogr.FieldDefn('b1', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('b2', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('bool_list', ogr.OFTIntegerList)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('short', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTInt16)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('float', ogr.OFTReal)
fld_defn.SetSubType(ogr.OFSTFloat32)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('int64', ogr.OFTInteger64)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('int64list', ogr.OFTInteger64List)
lyr.CreateField(fld_defn)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 1)
f.SetField(1, 0)
f.SetFieldIntegerList(2, [1,0])
f.SetField(3, -32768)
f.SetField(4, 1.23)
f.SetField(5, 1)
f.SetFieldInteger64List(6, [1])
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(1234567890123)
f.SetField(5, 1234567890123)
f.SetFieldInteger64List(6, [1, 1234567890123])
lyr.CreateFeature(f)
f = None
ds = None
# Test first with .xsd and then without
for i in range(3):
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('b1')).GetType() != ogr.OFTInteger or \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('b1')).GetSubType() != ogr.OFSTBoolean:
print(i)
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('bool_list')).GetType() != ogr.OFTIntegerList or \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('bool_list')).GetSubType() != ogr.OFSTBoolean:
print(i)
gdaltest.post_reason('fail')
return 'fail'
if i == 0:
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('short')).GetType() != ogr.OFTInteger or \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('short')).GetSubType() != ogr.OFSTInt16:
print(i)
gdaltest.post_reason('fail')
return 'fail'
if i == 0:
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('float')).GetType() != ogr.OFTReal or \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('float')).GetSubType() != ogr.OFSTFloat32:
print(i)
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('int64')).GetType() != ogr.OFTInteger64:
print(i)
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('int64list')).GetType() != ogr.OFTInteger64List:
print(i)
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.GetField('b1') != 1 or f.GetField('b2') != 0 or f.GetFieldAsString('bool_list') != '(2:1,0)' or f.GetField('short') != -32768 or f.GetField('float') != 1.23:
print(i)
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f.GetFID() != 1234567890123 or f.GetField('int64') != 1234567890123 or f.GetField('int64list') != [1, 1234567890123]:
print(i)
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink(filename[0:-3]+"xsd")
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3]+"gfs")
return 'success'
###############################################################################
# Test reading GML with xsd with a choice of geometry properites
def ogr_gml_68():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.Open('data/choicepolygonmultipolygon.gml')
expected_results = [ 'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))',
'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)),((10 0,10 1,11 1,11 0,10 0)))' ]
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbMultiPolygon:
gdaltest.post_reason(' did not get expected layer geometry type' )
return 'fail'
for i in range(2):
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != expected_results[i]:
gdaltest.post_reason('did not get expected geometry' )
print(got_wkt)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test not nullable fields
def ogr_gml_69():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_69.gml')
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone)
field_defn = ogr.FieldDefn('field_not_nullable', ogr.OFTString)
field_defn.SetNullable(0)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('field_nullable', ogr.OFTString)
lyr.CreateField(field_defn)
field_defn = ogr.GeomFieldDefn('geomfield_not_nullable', ogr.wkbPoint)
field_defn.SetNullable(0)
lyr.CreateGeomField(field_defn)
field_defn = ogr.GeomFieldDefn('geomfield_nullable', ogr.wkbPoint)
lyr.CreateGeomField(field_defn)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('field_not_nullable', 'not_null')
f.SetGeomFieldDirectly('geomfield_not_nullable', ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
# Error case: missing geometry
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('field_not_nullable', 'not_null')
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
f = None
# Error case: missing non-nullable field
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
f = None
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_69.gml', open_options = ['EMPTY_AS_NULL=NO'])
lyr = ds.GetLayerByName('test')
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_nullable')).IsNullable() != 1:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_nullable')).IsNullable() != 1:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.Unlink("/vsimem/ogr_gml_69.gml")
gdal.Unlink("/vsimem/ogr_gml_69.xsd")
return 'success'
###############################################################################
# Test default fields (not really supported, but we must do something as we
# support not nullable fields)
def ogr_gml_70():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_70.gml')
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone)
field_defn = ogr.FieldDefn( 'field_string', ogr.OFTString )
field_defn.SetDefault("'a'")
field_defn.SetNullable(0)
lyr.CreateField(field_defn)
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_gml_70.gml')
lyr = ds.GetLayerByName('test')
f = lyr.GetNextFeature()
if f.GetField('field_string') != 'a':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink("/vsimem/ogr_gml_70.gml")
gdal.Unlink("/vsimem/ogr_gml_70.xsd")
return 'success'
###############################################################################
# Test reading WFS 2.0 layer resulting from a join operation
def ogr_gml_71_helper(ds):
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'join_table1_table2':
gdaltest.post_reason('fail')
print(lyr.GetName())
return 'fail'
fields = [ ('table1.gml_id', ogr.OFTString),
('table1.foo', ogr.OFTInteger),
('table1.bar', ogr.OFTInteger),
('table2.gml_id', ogr.OFTString),
('table2.bar', ogr.OFTInteger),
('table2.baz', ogr.OFTString) ]
layer_defn = lyr.GetLayerDefn()
if layer_defn.GetFieldCount() != len(fields):
gdaltest.post_reason('fail')
print(layer_defn.GetFieldCount())
return 'fail'
for i in range(len(fields)):
fld_defn = layer_defn.GetFieldDefn(i)
if fld_defn.GetName() != fields[i][0]:
gdaltest.post_reason('fail')
print(i)
print(fld_defn.GetName())
return 'fail'
if fld_defn.GetType() != fields[i][1]:
gdaltest.post_reason('fail')
print(i)
print(fld_defn.GetType())
return 'fail'
if layer_defn.GetGeomFieldCount() != 2:
gdaltest.post_reason('fail')
print(layer_defn.GetGeomFieldCount())
return 'fail'
if layer_defn.GetGeomFieldDefn(0).GetName() != 'table1.geometry':
gdaltest.post_reason('fail')
print(layer_defn.GetGeomFieldDefn(0).GetName())
return 'fail'
if layer_defn.GetGeomFieldDefn(1).GetName() != 'table2.geometry':
gdaltest.post_reason('fail')
print(layer_defn.GetGeomFieldDefn(1).GetName())
return 'fail'
f = lyr.GetNextFeature()
if f.GetField('table1.gml_id') != 'table1-1' or \
f.GetField('table1.foo') != 1 or \
f.IsFieldSet('table1.bar') or \
f.GetField('table2.gml_id') != 'table2-1' or \
f.GetField('table2.bar') != 2 or \
f.GetField('table2.baz') != 'foo' or \
f.GetGeomFieldRef(0) is not None or \
f.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f.GetField('table1.gml_id') != 'table1-2' or \
f.IsFieldSet('table1.foo') or \
f.GetField('table1.bar') != 2 or \
f.GetField('table2.gml_id') != 'table2-2' or \
f.GetField('table2.bar') != 2 or \
f.GetField('table2.baz') != 'bar' or \
f.GetGeomFieldRef(0).ExportToWkt() != 'POINT (3 50)' or \
f.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2 50)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
return 'success'
def ogr_gml_71():
if not gdaltest.have_gml_reader:
return 'skip'
# With .xsd
try:
os.unlink('data/wfsjointlayer.gfs')
except:
pass
ds = ogr.Open('data/wfsjointlayer.gml')
if ogr_gml_71_helper(ds) != 'success':
gdaltest.post_reason('fail')
return 'fail'
ds = None
try:
os.unlink('data/wfsjointlayer.gfs')
gdaltest.post_reason('fail')
return 'fail'
except:
pass
# With .xsd but that is only partially understood
ds = gdal.OpenEx('data/wfsjointlayer.gml', open_options = ['XSD=data/wfsjointlayer_not_understood.xsd'])
if ogr_gml_71_helper(ds) != 'success':
gdaltest.post_reason('fail')
return 'fail'
ds = None
try:
os.unlink('data/wfsjointlayer.gfs')
except:
gdaltest.post_reason('fail')
return 'fail'
# Without .xsd nor .gfs
shutil.copy('data/wfsjointlayer.gml', 'tmp/wfsjointlayer.gml')
try:
os.unlink('tmp/wfsjointlayer.gfs')
except:
pass
ds = ogr.Open('tmp/wfsjointlayer.gml')
if ogr_gml_71_helper(ds) != 'success':
gdaltest.post_reason('fail')
return 'fail'
ds = None
try:
os.stat('tmp/wfsjointlayer.gfs')
except:
gdaltest.post_reason('fail')
return 'fail'
# With .gfs
ds = ogr.Open('tmp/wfsjointlayer.gml')
if ogr_gml_71_helper(ds) != 'success':
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test name and description
def ogr_gml_72():
if not gdaltest.have_gml_reader:
return 'skip'
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_72.gml', options = ['NAME=name', 'DESCRIPTION=description'])
ds.SetMetadata({ 'NAME': 'ignored', 'DESCRIPTION': 'ignored' })
ds = None
ds = ogr.Open('/vsimem/ogr_gml_72.gml')
if ds.GetMetadata() != { 'NAME': 'name', 'DESCRIPTION': 'description' }:
gdaltest.post_reason('fail')
print(ds.GetMetadata())
return 'fail'
ds = None
gdal.Unlink("/vsimem/ogr_gml_72.gml")
gdal.Unlink("/vsimem/ogr_gml_72.xsd")
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_72.gml')
ds.SetMetadata({'NAME': 'name', 'DESCRIPTION': 'description' })
ds = None
ds = ogr.Open('/vsimem/ogr_gml_72.gml')
if ds.GetMetadata() != { 'NAME': 'name', 'DESCRIPTION': 'description' }:
gdaltest.post_reason('fail')
print(ds.GetMetadata())
return 'fail'
ds = None
gdal.Unlink("/vsimem/ogr_gml_72.gml")
gdal.Unlink("/vsimem/ogr_gml_72.xsd")
return 'success'
###############################################################################
# Read a CSW GetRecordsResponse document
def ogr_gml_73():
if not gdaltest.have_gml_reader:
return 'skip'
try:
os.remove( 'data/cswresults.gfs' )
except:
pass
ds = ogr.Open('data/cswresults.xml')
for i in range(3):
lyr = ds.GetLayer(i)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('4326') < 0:
gdaltest.post_reason('did not get expected SRS')
print(got_wkt)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
if got_wkt != 'POLYGON ((-180 -90,-180 90,180 90,180 -90,-180 -90))':
gdaltest.post_reason('did not get expected geometry')
print(got_wkt)
return 'fail'
ds = None
try:
os.remove( 'data/cswresults.gfs' )
except:
pass
return 'success'
###############################################################################
# Test FORCE_SRS_DETECTION open option
def ogr_gml_74():
if not gdaltest.have_gml_reader:
return 'skip'
ds = gdal.OpenEx('data/expected_gml_gml32.gml', open_options = ['FORCE_SRS_DETECTION=YES'] )
lyr = ds.GetLayer(0)
if lyr.GetSpatialRef() is None:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
return 'success'
###############################################################################
# Test we don't open a WMTS Capabilities doc
def ogr_gml_75():
if not gdaltest.have_gml_reader:
return 'skip'
gdal.FileFromMemBuffer("/vsimem/ogr_gml_75.xml",
"""<?xml version="1.0" encoding="UTF-8"?>
<Capabilities xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://somewhere"
version="1.0.0">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://foo"/>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://foo"/>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
</Capabilities>""")
ds = ogr.Open('/vsimem/ogr_gml_75.xml')
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/ogr_gml_75.xml')
return 'success'
###############################################################################
# Cleanup
def ogr_gml_cleanup():
if not gdaltest.have_gml_reader:
return 'skip'
gdal.SetConfigOption( 'GML_SKIP_RESOLVE_ELEMS', None )
gdal.SetConfigOption( 'GML_SAVE_RESOLVED_TO', None )
gdaltest.clean_tmp()
return ogr_gml_clean_files()
def ogr_gml_clean_files():
try:
os.remove( 'data/bom.gfs' )
except:
pass
try:
os.remove( 'data/utf8.gfs' )
except:
pass
try:
os.remove( 'data/ticket_2349_test_1.gfs' )
except:
pass
try:
os.remove( 'data/citygml.gfs' )
except:
pass
try:
os.remove( 'data/gnis_pop_100.gfs' )
except:
pass
try:
os.remove( 'data/gnis_pop_110.gfs' )
except:
pass
try:
os.remove( 'data/paris_typical_strike_demonstration.gfs' )
except:
pass
try:
os.remove( 'data/global_geometry.gfs' )
except:
pass
try:
os.remove( 'tmp/global_geometry.gfs' )
except:
pass
try:
os.remove( 'tmp/global_geometry.xml' )
except:
pass
try:
os.remove( 'data/curveProperty.gfs' )
except:
pass
try:
os.remove( 'tmp/ogr_gml_26.gml' )
os.remove( 'tmp/ogr_gml_26.xsd' )
except:
pass
try:
os.remove( 'tmp/ogr_gml_27.gml' )
os.remove( 'tmp/ogr_gml_27.xsd' )
except:
pass
try:
os.remove( 'tmp/ogr_gml_28.gml' )
os.remove( 'tmp/ogr_gml_28.gfs' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.sqlite' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.gfs' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.resolved.gml' )
except:
pass
try:
os.remove( 'tmp/GmlTopo-sample.xml' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.sqlite' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.gfs' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.resolved.gml' )
except:
pass
try:
os.remove( 'tmp/sample_gml_face_hole_negative_no.xml' )
except:
pass
try:
os.remove( 'data/wfs_typefeature.gfs' )
except:
pass
try:
os.remove( 'tmp/ogr_gml_51.gml' )
os.remove( 'tmp/ogr_gml_51.xsd' )
except:
pass
try:
os.remove( 'tmp/gmlattributes.gml' )
os.remove( 'tmp/gmlattributes.gfs' )
except:
pass
files = os.listdir('data')
for filename in files:
if len(filename) > 13 and filename[-13:] == '.resolved.gml':
os.unlink('data/' + filename)
return 'success'
gdaltest_list = [
ogr_gml_clean_files,
ogr_gml_1,
ogr_gml_2,
ogr_gml_3,
ogr_gml_4,
ogr_gml_5,
ogr_gml_6,
ogr_gml_7,
ogr_gml_8,
ogr_gml_9,
ogr_gml_10,
ogr_gml_11,
ogr_gml_12,
ogr_gml_13,
ogr_gml_14,
ogr_gml_15,
ogr_gml_16,
ogr_gml_17,
ogr_gml_18,
ogr_gml_19,
ogr_gml_20,
ogr_gml_21,
ogr_gml_21_deegree3,
ogr_gml_21_gml32,
ogr_gml_22,
ogr_gml_23,
ogr_gml_24,
ogr_gml_25,
ogr_gml_26,
ogr_gml_27,
ogr_gml_28,
ogr_gml_29,
ogr_gml_30,
ogr_gml_31,
ogr_gml_32,
ogr_gml_33,
ogr_gml_34,
ogr_gml_35,
ogr_gml_36,
ogr_gml_37,
ogr_gml_38,
ogr_gml_39,
ogr_gml_40,
ogr_gml_41,
ogr_gml_42,
ogr_gml_43,
ogr_gml_44,
ogr_gml_45,
ogr_gml_46,
ogr_gml_47,
ogr_gml_48,
ogr_gml_49,
ogr_gml_50,
ogr_gml_51,
ogr_gml_52,
ogr_gml_53,
ogr_gml_54,
ogr_gml_55,
ogr_gml_56,
ogr_gml_57,
ogr_gml_58,
ogr_gml_59,
ogr_gml_60,
ogr_gml_61,
ogr_gml_62,
ogr_gml_63,
ogr_gml_64,
ogr_gml_65,
ogr_gml_66,
ogr_gml_67,
ogr_gml_68,
ogr_gml_69,
ogr_gml_70,
ogr_gml_71,
ogr_gml_72,
ogr_gml_73,
ogr_gml_74,
ogr_gml_75,
ogr_gml_cleanup ]
disabled_gdaltest_list = [
ogr_gml_clean_files,
ogr_gml_1,
ogr_gml_71,
ogr_gml_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_gml_read' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| []
| []
| []
| [] | [] | python | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.