filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
biotransformers/utils/logger.py | """This module build a general logger module"""
import logging
import os
def logger(module_name: str) -> logging.Logger:
"""Configure the logger with formatter and handlers.
The log level depends on the environment variable `BIO_LOG_LEVEL`.
- 0: NOTSET, will be set to DEBUG
- 1: DEBUG
- 2: INFO (default)
- 3: WARNING
- 4: ERROR
- 5: CRITICAL
https://docs.python.org/3/library/logging.html#levels
Args:
module_name (str): module name
Returns:
[Logger]: instantiate logger object
"""
if module_name.endswith("py"):
module_name = os.path.splitext(module_name)[0]
logger_ = logging.getLogger(module_name)
logger_.propagate = False
log_level = os.environ.get("BIO_LOG_LEVEL", "2")
log_level_int = max(int(log_level) * 10, 10)
logger_.setLevel(log_level_int)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
handler.setLevel(log_level_int)
logger_.addHandler(handler)
return logger_
| [] | [] | [
"BIO_LOG_LEVEL"
] | [] | ["BIO_LOG_LEVEL"] | python | 1 | 0 | |
pkg/karmadactl/get.go | package karmadactl
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/rest"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubectl/pkg/cmd/get"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/interrupt"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/karmadactl/options"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/names"
)
const (
printColumnClusterNum = 1
proxyURL = "/apis/cluster.karmada.io/v1alpha1/clusters/%s/proxy/"
)
var (
getIn = os.Stdin
getOut = os.Stdout
getErr = os.Stderr
podColumns = []metav1.TableColumnDefinition{
{Name: "CLUSTER", Type: "string", Format: "", Priority: 0},
{Name: "ADOPTION", Type: "string", Format: "", Priority: 0},
}
eventColumn = metav1.TableColumnDefinition{Name: "EVENT", Type: "string", Format: "", Priority: 0}
getShort = `Display one or many resources`
)
// NewCmdGet New get command
func NewCmdGet(karmadaConfig KarmadaConfig, parentCommand string) *cobra.Command {
ioStreams := genericclioptions.IOStreams{In: getIn, Out: getOut, ErrOut: getErr}
o := NewCommandGetOptions("karmadactl", ioStreams)
cmd := &cobra.Command{
Use: "get [NAME | -l label | -n namespace] [flags]",
DisableFlagsInUseLine: true,
Short: getShort,
SilenceUsage: true,
Example: getExample(parentCommand),
RunE: func(cmd *cobra.Command, args []string) error {
if err := o.Complete(); err != nil {
return err
}
if err := o.Validate(cmd); err != nil {
return err
}
if err := o.Run(karmadaConfig, cmd, args); err != nil {
return err
}
return nil
},
}
o.GlobalCommandOptions.AddFlags(cmd.Flags())
o.PrintFlags.AddFlags(cmd)
cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", "default", "-n=namespace or -n namespace")
cmd.Flags().StringVarP(&o.LabelSelector, "labels", "l", "", "-l=label or -l label")
cmd.Flags().StringSliceVarP(&o.Clusters, "clusters", "C", []string{}, "-C=member1,member2")
cmd.Flags().StringVar(&o.ClusterNamespace, "cluster-namespace", options.DefaultKarmadaClusterNamespace, "Namespace in the control plane where member cluster are stored.")
cmd.Flags().BoolVarP(&o.AllNamespaces, "all-namespaces", "A", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.")
cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.")
cmd.Flags().BoolVarP(&o.Watch, "watch", "w", o.Watch, "After listing/getting the requested object, watch for changes. Uninitialized objects are excluded if no object name is provided.")
cmd.Flags().BoolVar(&o.WatchOnly, "watch-only", o.WatchOnly, "Watch for changes to the requested object(s), without listing/getting first.")
cmd.Flags().BoolVar(&o.OutputWatchEvents, "output-watch-events", o.OutputWatchEvents, "Output watch event objects when --watch or --watch-only is used. Existing objects are output as initial ADDED events.")
return cmd
}
// CommandGetOptions contains the input to the get command.
type CommandGetOptions struct {
// global flags
options.GlobalCommandOptions
// ClusterNamespace holds the namespace name where the member cluster objects are stored.
ClusterNamespace string
Clusters []string
PrintFlags *get.PrintFlags
ToPrinter func(*meta.RESTMapping, *bool, bool, bool) (printers.ResourcePrinterFunc, error)
IsHumanReadablePrinter bool
CmdParent string
resource.FilenameOptions
Watch bool
WatchOnly bool
ChunkSize int64
OutputWatchEvents bool
LabelSelector string
FieldSelector string
AllNamespaces bool
Namespace string
ExplicitNamespace bool
ServerPrint bool
NoHeaders bool
Sort bool
IgnoreNotFound bool
Export bool
genericclioptions.IOStreams
}
// NewCommandGetOptions returns a GetOptions with default chunk size 500.
func NewCommandGetOptions(parent string, streams genericclioptions.IOStreams) *CommandGetOptions {
return &CommandGetOptions{
PrintFlags: get.NewGetPrintFlags(),
CmdParent: parent,
IOStreams: streams,
ChunkSize: 500,
ServerPrint: true,
}
}
// Complete takes the command arguments and infers any remaining options.
func (g *CommandGetOptions) Complete() error {
newScheme := gclient.NewSchema()
templateArg := ""
if g.PrintFlags.TemplateFlags != nil && g.PrintFlags.TemplateFlags.TemplateArgument != nil {
templateArg = *g.PrintFlags.TemplateFlags.TemplateArgument
}
// human readable printers have special conversion rules, so we determine if we're using one.
if (len(*g.PrintFlags.OutputFormat) == 0 && len(templateArg) == 0) || *g.PrintFlags.OutputFormat == "wide" {
g.IsHumanReadablePrinter = true
}
g.ToPrinter = func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) {
// make a new copy of current flags / opts before mutating
printFlags := g.PrintFlags.Copy()
if mapping != nil {
printFlags.SetKind(mapping.GroupVersionKind.GroupKind())
}
if withNamespace {
_ = printFlags.EnsureWithNamespace()
}
if withKind {
_ = printFlags.EnsureWithKind()
}
printer, err := printFlags.ToPrinter()
if err != nil {
return nil, err
}
printer, err = printers.NewTypeSetter(newScheme).WrapToPrinter(printer, nil)
if err != nil {
return nil, err
}
if outputObjects != nil {
printer = &skipPrinter{delegate: printer, output: outputObjects}
}
if g.ServerPrint {
printer = &get.TablePrinter{Delegate: printer}
}
return printer.PrintObj, nil
}
return nil
}
// Validate checks the set of flags provided by the user.
func (g *CommandGetOptions) Validate(cmd *cobra.Command) error {
if cmdutil.GetFlagBool(cmd, "show-labels") {
outputOption := cmd.Flags().Lookup("output").Value.String()
if outputOption != "" && outputOption != "wide" {
return fmt.Errorf("--show-labels option cannot be used with %s printer", outputOption)
}
}
if g.OutputWatchEvents && !(g.Watch || g.WatchOnly) {
return fmt.Errorf("--output-watch-events option can only be used with --watch or --watch-only")
}
return nil
}
// Obj cluster info
type Obj struct {
Cluster string
Info *resource.Info
}
// WatchObj is a obj that is watched
type WatchObj struct {
Cluster string
r *resource.Result
}
// RBInfo resourcebinding info and print info
var RBInfo map[string]*OtherPrint
// OtherPrint applied is used in the display column
type OtherPrint struct {
Applied interface{}
}
// Run performs the get operation.
func (g *CommandGetOptions) Run(karmadaConfig KarmadaConfig, cmd *cobra.Command, args []string) error {
mux := sync.Mutex{}
var wg sync.WaitGroup
var objs []Obj
var watchObjs []WatchObj
var allErrs []error
if g.AllNamespaces {
g.ExplicitNamespace = false
}
outputOption := cmd.Flags().Lookup("output").Value.String()
if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") {
g.ServerPrint = false
}
clusterInfos := make(map[string]*ClusterInfo)
RBInfo = make(map[string]*OtherPrint)
karmadaRestConfig, err := clusterInfoInit(g, karmadaConfig, clusterInfos)
if err != nil {
return err
}
wg.Add(len(g.Clusters))
for idx := range g.Clusters {
g.setClusterProxyInfo(karmadaRestConfig, g.Clusters[idx], clusterInfos)
f := getFactory(g.Clusters[idx], clusterInfos)
go g.getObjInfo(&wg, &mux, f, g.Clusters[idx], &objs, &watchObjs, &allErrs, args)
}
wg.Wait()
if g.Watch || g.WatchOnly {
return g.watch(watchObjs)
}
if !g.IsHumanReadablePrinter {
// have printed objects in yaml or json format above
return nil
}
// sort objects by resource kind to classify them
sort.Slice(objs, func(i, j int) bool {
return objs[i].Info.Mapping.Resource.String() < objs[j].Info.Mapping.Resource.String()
})
g.printObjs(objs, &allErrs, args)
return utilerrors.NewAggregate(allErrs)
}
// printObjs print objects in multi clusters
func (g *CommandGetOptions) printObjs(objs []Obj, allErrs *[]error, args []string) {
var err error
errs := sets.NewString()
printWithKind := multipleGVKsRequested(objs)
var printer printers.ResourcePrinter
var lastMapping *meta.RESTMapping
// track if we write any output
trackingWriter := &trackingWriterWrapper{Delegate: g.Out}
// output an empty line separating output
separatorWriter := &separatorWriterWrapper{Delegate: trackingWriter}
w := printers.GetNewTabWriter(separatorWriter)
allResourcesNamespaced := !g.AllNamespaces
sameKind := make([]Obj, 0)
for ix := range objs {
mapping := objs[ix].Info.Mapping
sameKind = append(sameKind, objs[ix])
allResourcesNamespaced = allResourcesNamespaced && objs[ix].Info.Namespaced()
printWithNamespace := g.checkPrintWithNamespace(mapping)
if shouldGetNewPrinterForMapping(printer, lastMapping, mapping) {
w.Flush()
w.SetRememberedWidths(nil)
// add linebreaks between resource groups (if there is more than one)
// when it satisfies all following 3 conditions:
// 1) it's not the first resource group
// 2) it has row header
// 3) we've written output since the last time we started a new set of headers
if lastMapping != nil && !g.NoHeaders && trackingWriter.Written > 0 {
separatorWriter.SetReady(true)
}
printer, err = g.ToPrinter(mapping, nil, printWithNamespace, printWithKind)
if err != nil {
if !errs.Has(err.Error()) {
errs.Insert(err.Error())
*allErrs = append(*allErrs, err)
}
continue
}
lastMapping = mapping
}
if ix == len(objs)-1 || objs[ix].Info.Mapping.Resource != objs[ix+1].Info.Mapping.Resource {
table := &metav1.Table{}
allTableRows, mapping, err := g.reconstructionRow(sameKind, table)
if err != nil {
*allErrs = append(*allErrs, err)
return
}
table.Rows = allTableRows
setNoAdoption(mapping)
g.setColumnDefinition(table)
printObj, err := helper.ToUnstructured(table)
if err != nil {
*allErrs = append(*allErrs, err)
return
}
err = printer.PrintObj(printObj, w)
if err != nil {
*allErrs = append(*allErrs, err)
return
}
sameKind = make([]Obj, 0)
}
}
w.Flush()
g.printIfNotFindResource(trackingWriter.Written, allErrs, allResourcesNamespaced)
}
// printIfNotFindResource is sure we output something if we wrote no output, and had no errors, and are not ignoring NotFound
func (g *CommandGetOptions) printIfNotFindResource(written int, allErrs *[]error, allResourcesNamespaced bool) {
if written == 0 && !g.IgnoreNotFound && len(*allErrs) == 0 {
if allResourcesNamespaced {
fmt.Fprintf(g.ErrOut, "No resources found in %s namespace.\n", g.Namespace)
} else {
fmt.Fprintln(g.ErrOut, "No resources found")
}
}
}
// checkPrintWithNamespace check if print objects with namespace
func (g *CommandGetOptions) checkPrintWithNamespace(mapping *meta.RESTMapping) bool {
if mapping != nil && mapping.Scope.Name() == meta.RESTScopeNameRoot {
return false
}
return g.AllNamespaces
}
// getObjInfo get obj info in member cluster
func (g *CommandGetOptions) getObjInfo(wg *sync.WaitGroup, mux *sync.Mutex, f cmdutil.Factory,
cluster string, objs *[]Obj, watchObjs *[]WatchObj, allErrs *[]error, args []string) {
defer wg.Done()
restClient, err := f.RESTClient()
if err != nil {
*allErrs = append(*allErrs, err)
return
}
// check if it is authorized to proxy this member cluster
request := restClient.Get().RequestURI(fmt.Sprintf(proxyURL, cluster) + "api")
if _, err := request.DoRaw(context.TODO()); err != nil {
*allErrs = append(*allErrs, fmt.Errorf("cluster(%s) is inaccessible, please check authorization or network", cluster))
return
}
r := f.NewBuilder().
Unstructured().
NamespaceParam(g.Namespace).DefaultNamespace().AllNamespaces(g.AllNamespaces).
FilenameParam(g.ExplicitNamespace, &g.FilenameOptions).
LabelSelectorParam(g.LabelSelector).
FieldSelectorParam(g.FieldSelector).
RequestChunksOf(g.ChunkSize).
ResourceTypeOrNameArgs(true, args...).
ContinueOnError().
Latest().
Flatten().
TransformRequests(g.transformRequests).
Do()
if g.IgnoreNotFound {
r.IgnoreErrors(apierrors.IsNotFound)
}
if err := r.Err(); err != nil {
*allErrs = append(*allErrs, fmt.Errorf("cluster(%s): %s", cluster, err))
return
}
if g.Watch || g.WatchOnly {
mux.Lock()
watchObjsInfo := WatchObj{
Cluster: cluster,
r: r,
}
*watchObjs = append(*watchObjs, watchObjsInfo)
mux.Unlock()
return
}
if !g.IsHumanReadablePrinter {
if err := g.printGeneric(r); err != nil {
*allErrs = append(*allErrs, fmt.Errorf("cluster(%s): %s", cluster, err))
}
return
}
infos, err := r.Infos()
if err != nil {
*allErrs = append(*allErrs, fmt.Errorf("cluster(%s): %s", cluster, err))
return
}
mux.Lock()
var objInfo Obj
for ix := range infos {
objInfo = Obj{
Cluster: cluster,
Info: infos[ix],
}
*objs = append(*objs, objInfo)
}
mux.Unlock()
}
// reconstructionRow reconstruction tableRow
func (g *CommandGetOptions) reconstructionRow(objs []Obj, table *metav1.Table) ([]metav1.TableRow, *meta.RESTMapping, error) {
var allTableRows []metav1.TableRow
var mapping *meta.RESTMapping
for ix := range objs {
mapping = objs[ix].Info.Mapping
unstr, ok := objs[ix].Info.Object.(*unstructured.Unstructured)
if !ok {
return nil, nil, fmt.Errorf("attempt to decode non-Unstructured object")
}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, table); err != nil {
return nil, nil, err
}
for rowIdx := range table.Rows {
var tempRow metav1.TableRow
rbKey := getRBKey(mapping.GroupVersionKind, table.Rows[rowIdx], objs[ix].Cluster)
tempRow.Cells = append(append(tempRow.Cells, table.Rows[rowIdx].Cells[0], objs[ix].Cluster), table.Rows[rowIdx].Cells[1:]...)
if _, ok := RBInfo[rbKey]; ok {
tempRow.Cells = append(tempRow.Cells, "Y")
} else {
tempRow.Cells = append(tempRow.Cells, "N")
}
table.Rows[rowIdx].Cells = tempRow.Cells
}
allTableRows = append(allTableRows, table.Rows...)
}
return allTableRows, mapping, nil
}
// reconstructObj reconstruct runtime.object row
func (g *CommandGetOptions) reconstructObj(obj runtime.Object, mapping *meta.RESTMapping, cluster string, event string) (*metav1.Table, error) {
table := &metav1.Table{}
var allTableRows []metav1.TableRow
unstr, ok := obj.(*unstructured.Unstructured)
if !ok {
return nil, fmt.Errorf("attempt to decode non-Unstructured object")
}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, table); err != nil {
return nil, err
}
for rowIdx := range table.Rows {
var tempRow metav1.TableRow
rbKey := getRBKey(mapping.GroupVersionKind, table.Rows[rowIdx], cluster)
if g.OutputWatchEvents {
tempRow.Cells = append(append(tempRow.Cells, event, table.Rows[rowIdx].Cells[0], cluster), table.Rows[rowIdx].Cells[1:]...)
} else {
tempRow.Cells = append(append(tempRow.Cells, table.Rows[rowIdx].Cells[0], cluster), table.Rows[rowIdx].Cells[1:]...)
}
if _, ok := RBInfo[rbKey]; ok {
tempRow.Cells = append(tempRow.Cells, "Y")
} else {
tempRow.Cells = append(tempRow.Cells, "N")
}
table.Rows[rowIdx].Cells = tempRow.Cells
}
allTableRows = append(allTableRows, table.Rows...)
table.Rows = allTableRows
setNoAdoption(mapping)
g.setColumnDefinition(table)
return table, nil
}
// watch starts a client-side watch of one or more resources.
func (g *CommandGetOptions) watch(watchObjs []WatchObj) error {
if len(watchObjs) <= 0 {
return fmt.Errorf("not to find obj that is watched")
}
infos, err := watchObjs[0].r.Infos()
if err != nil {
return err
}
var objs []Obj
for ix := range infos {
objs = append(objs, Obj{Cluster: watchObjs[0].Cluster, Info: infos[ix]})
}
if multipleGVKsRequested(objs) {
return fmt.Errorf("watch is only supported on individual resources and resource collections - more than 1 resource was found")
}
info := infos[0]
mapping := info.ResourceMapping()
outputObjects := utilpointer.BoolPtr(!g.WatchOnly)
printer, err := g.ToPrinter(mapping, outputObjects, g.AllNamespaces, false)
if err != nil {
return err
}
writer := printers.GetNewTabWriter(g.Out)
// print the current object
for idx := range watchObjs {
var objsToPrint []runtime.Object
obj, err := watchObjs[idx].r.Object()
if err != nil {
return err
}
isList := meta.IsListType(obj)
if isList {
tmpObj, _ := meta.ExtractList(obj)
objsToPrint = append(objsToPrint, tmpObj...)
} else {
objsToPrint = append(objsToPrint, obj)
}
for _, objToPrint := range objsToPrint {
objrow, err := g.reconstructObj(objToPrint, mapping, watchObjs[idx].Cluster, string(watch.Added))
if err != nil {
return err
}
if idx > 0 {
// only print ColumnDefinitions once
objrow.ColumnDefinitions = nil
}
printObj, err := helper.ToUnstructured(objrow)
if err != nil {
return err
}
if err := printer.PrintObj(printObj, writer); err != nil {
return fmt.Errorf("unable to output the provided object: %v", err)
}
}
}
writer.Flush()
g.watchMultiClusterObj(watchObjs, mapping, outputObjects, printer)
return nil
}
//watchMultiClusterObj watch objects in multi clusters by goroutines
func (g *CommandGetOptions) watchMultiClusterObj(watchObjs []WatchObj, mapping *meta.RESTMapping, outputObjects *bool, printer printers.ResourcePrinterFunc) {
var wg sync.WaitGroup
writer := printers.GetNewTabWriter(g.Out)
wg.Add(len(watchObjs))
for _, watchObj := range watchObjs {
go func(watchObj WatchObj) {
obj, err := watchObj.r.Object()
if err != nil {
panic(err)
}
rv := "0"
isList := meta.IsListType(obj)
if isList {
// the resourceVersion of list objects is ~now but won't return
// an initial watch event
rv, err = meta.NewAccessor().ResourceVersion(obj)
if err != nil {
panic(err)
}
}
if isList {
// we can start outputting objects now, watches started from lists don't emit synthetic added events
*outputObjects = true
} else {
// suppress output, since watches started for individual items emit a synthetic ADDED event first
*outputObjects = false
}
if isList {
// we can start outputting objects now, watches started from lists don't emit synthetic added events
*outputObjects = true
} else {
// suppress output, since watches started for individual items emit a synthetic ADDED event first
*outputObjects = false
}
// print watched changes
w, err := watchObj.r.Watch(rv)
if err != nil {
panic(err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
intr := interrupt.New(nil, cancel)
_ = intr.Run(func() error {
_, err := watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
objToPrint := e.Object
objrow, err := g.reconstructObj(objToPrint, mapping, watchObj.Cluster, string(e.Type))
if err != nil {
return false, err
}
// not need to print ColumnDefinitions
objrow.ColumnDefinitions = nil
printObj, err := helper.ToUnstructured(objrow)
if err != nil {
return false, err
}
if err := printer.PrintObj(printObj, writer); err != nil {
return false, err
}
writer.Flush()
// after processing at least one event, start outputting objects
*outputObjects = true
return false, nil
})
return err
})
}(watchObj)
}
wg.Wait()
}
func (g *CommandGetOptions) printGeneric(r *resource.Result) error {
// we flattened the data from the builder, so we have individual items, but now we'd like to either:
// 1. if there is more than one item, combine them all into a single list
// 2. if there is a single item and that item is a list, leave it as its specific list
// 3. if there is a single item and it is not a list, leave it as a single item
var errs []error
singleItemImplied := false
infos, err := g.extractInfosFromResource(r, &errs, &singleItemImplied)
if err != nil {
return err
}
printer, err := g.ToPrinter(nil, nil, false, false)
if err != nil {
return err
}
var obj runtime.Object
if !singleItemImplied || len(infos) != 1 {
// we have zero or multple items, so coerce all items into a list.
// we don't want an *unstructured.Unstructured list yet, as we
// may be dealing with non-unstructured objects. Compose all items
// into an corev1.List, and then decode using an unstructured scheme.
list := corev1.List{
TypeMeta: metav1.TypeMeta{
Kind: "List",
APIVersion: "v1",
},
ListMeta: metav1.ListMeta{},
}
for _, info := range infos {
list.Items = append(list.Items, runtime.RawExtension{Object: info.Object})
}
listData, err := json.Marshal(list)
if err != nil {
return err
}
converted, err := runtime.Decode(unstructured.UnstructuredJSONScheme, listData)
if err != nil {
return err
}
obj = converted
} else {
obj = infos[0].Object
}
isList := meta.IsListType(obj)
if isList {
items, err := meta.ExtractList(obj)
if err != nil {
return err
}
// take the items and create a new list for display
list := &unstructured.UnstructuredList{
Object: map[string]interface{}{
"kind": "List",
"apiVersion": "v1",
"metadata": map[string]interface{}{},
},
}
if listMeta, err := meta.ListAccessor(obj); err == nil {
list.Object["metadata"] = map[string]interface{}{
"selfLink": listMeta.GetSelfLink(),
"resourceVersion": listMeta.GetResourceVersion(),
}
}
for _, item := range items {
list.Items = append(list.Items, *item.(*unstructured.Unstructured))
}
if err := printer.PrintObj(list, g.Out); err != nil {
errs = append(errs, err)
}
return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
}
if printErr := printer.PrintObj(obj, g.Out); printErr != nil {
errs = append(errs, printErr)
}
return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
}
func (g *CommandGetOptions) extractInfosFromResource(r *resource.Result, errs *[]error, singleItemImplied *bool) ([]*resource.Info, error) {
infos, err := r.IntoSingleItemImplied(singleItemImplied).Infos()
if err != nil {
if *singleItemImplied {
return nil, err
}
*errs = append(*errs, err)
}
if len(infos) == 0 && g.IgnoreNotFound {
return nil, utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(*errs)))
}
return infos, nil
}
type trackingWriterWrapper struct {
Delegate io.Writer
Written int
}
func (t *trackingWriterWrapper) Write(p []byte) (n int, err error) {
t.Written += len(p)
return t.Delegate.Write(p)
}
type separatorWriterWrapper struct {
Delegate io.Writer
Ready bool
}
func (s *separatorWriterWrapper) Write(p []byte) (n int, err error) {
// If we're about to write non-empty bytes and `s` is ready,
// we prepend an empty line to `p` and reset `s.Read`.
if len(p) != 0 && s.Ready {
fmt.Fprintln(s.Delegate)
s.Ready = false
}
return s.Delegate.Write(p)
}
func (s *separatorWriterWrapper) SetReady(state bool) {
s.Ready = state
}
func shouldGetNewPrinterForMapping(printer printers.ResourcePrinter, lastMapping, mapping *meta.RESTMapping) bool {
return printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource
}
// ClusterInfo Information about the member in the karmada cluster.
type ClusterInfo struct {
KubeConfig string
Context string
APIEndpoint string
ClusterSyncMode clusterv1alpha1.ClusterSyncMode
}
func clusterInfoInit(g *CommandGetOptions, karmadaConfig KarmadaConfig, clusterInfos map[string]*ClusterInfo) (*rest.Config, error) {
karmadaRestConfig, err := karmadaConfig.GetRestConfig(g.KarmadaContext, g.KubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to get control plane rest config. context: %s, kube-config: %s, error: %v",
g.KarmadaContext, g.KubeConfig, err)
}
if err := getClusterInKarmada(karmadaRestConfig, clusterInfos); err != nil {
return nil, fmt.Errorf("method getClusterInKarmada get cluster info in karmada failed, err is: %w", err)
}
if err := g.getRBInKarmada(g.Namespace, karmadaRestConfig); err != nil {
return nil, err
}
if len(g.Clusters) <= 0 {
for c := range clusterInfos {
g.Clusters = append(g.Clusters, c)
}
}
return karmadaRestConfig, nil
}
func getFactory(clusterName string, clusterInfos map[string]*ClusterInfo) cmdutil.Factory {
kubeConfigFlags := NewConfigFlags(true).WithDeprecatedPasswordFlag()
// Build member cluster kubeConfigFlags
kubeConfigFlags.APIServer = stringptr(clusterInfos[clusterName].APIEndpoint)
// Use kubeconfig to access member cluster
kubeConfigFlags.KubeConfig = stringptr(clusterInfos[clusterName].KubeConfig)
kubeConfigFlags.Context = stringptr(clusterInfos[clusterName].Context)
kubeConfigFlags.usePersistentConfig = true
matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags)
return cmdutil.NewFactory(matchVersionKubeConfigFlags)
}
func (g *CommandGetOptions) transformRequests(req *rest.Request) {
if !g.ServerPrint || !g.IsHumanReadablePrinter {
return
}
req.SetHeader("Accept", strings.Join([]string{
fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName),
fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName),
"application/json",
}, ","))
}
func (g *CommandGetOptions) getRBInKarmada(namespace string, config *rest.Config) error {
rbList := &workv1alpha2.ResourceBindingList{}
crbList := &workv1alpha2.ClusterResourceBindingList{}
gClient, err := gclient.NewForConfig(config)
if err != nil {
return err
}
if !g.AllNamespaces {
err = gClient.List(context.TODO(), rbList, &client.ListOptions{
Namespace: namespace,
})
} else {
err = gClient.List(context.TODO(), rbList, &client.ListOptions{})
}
if err != nil {
return err
}
if err = gClient.List(context.TODO(), crbList, &client.ListOptions{}); err != nil {
return err
}
for idx := range rbList.Items {
rbKey := rbList.Items[idx].GetName()
val := rbList.Items[idx].Status.AggregatedStatus
for i := range val {
if val[i].Applied && val[i].ClusterName != "" {
newRBKey := fmt.Sprintf("%s-%s", val[i].ClusterName, rbKey)
RBInfo[newRBKey] = &OtherPrint{
Applied: val[i].Applied,
}
}
}
}
for idx := range crbList.Items {
rbKey := crbList.Items[idx].GetName()
val := crbList.Items[idx].Status.AggregatedStatus
for i := range val {
if val[i].Applied && val[i].ClusterName != "" {
newRBKey := fmt.Sprintf("%s-%s", val[i].ClusterName, rbKey)
RBInfo[newRBKey] = &OtherPrint{
Applied: val[i].Applied,
}
}
}
}
return nil
}
// setClusterProxyInfo set proxy information of cluster
func (g *CommandGetOptions) setClusterProxyInfo(karmadaRestConfig *rest.Config, name string, clusterInfos map[string]*ClusterInfo) {
clusterInfos[name].APIEndpoint = karmadaRestConfig.Host + fmt.Sprintf(proxyURL, name)
clusterInfos[name].KubeConfig = g.KubeConfig
clusterInfos[name].Context = g.KarmadaContext
if clusterInfos[name].KubeConfig == "" {
env := os.Getenv("KUBECONFIG")
if env != "" {
clusterInfos[name].KubeConfig = env
} else {
clusterInfos[name].KubeConfig = defaultKubeConfig
}
}
}
// getClusterInKarmada get cluster info in karmada cluster
func getClusterInKarmada(client *rest.Config, clusterInfos map[string]*ClusterInfo) error {
clusterList := &clusterv1alpha1.ClusterList{}
gClient, err := gclient.NewForConfig(client)
if err != nil {
return err
}
if err = gClient.List(context.TODO(), clusterList); err != nil {
return err
}
for i := range clusterList.Items {
cluster := &ClusterInfo{
APIEndpoint: clusterList.Items[i].Spec.APIEndpoint,
ClusterSyncMode: clusterList.Items[i].Spec.SyncMode,
}
clusterInfos[clusterList.Items[i].GetName()] = cluster
}
return nil
}
func getRBKey(gvk schema.GroupVersionKind, row metav1.TableRow, cluster string) string {
resourceName, _ := row.Cells[0].(string)
rbKey := names.GenerateBindingName(gvk.Kind, resourceName)
return fmt.Sprintf("%s-%s", cluster, rbKey)
}
func multipleGVKsRequested(objs []Obj) bool {
if len(objs) < 2 {
return false
}
gvk := objs[0].Info.Mapping.GroupVersionKind
for _, obj := range objs {
if obj.Info.Mapping.GroupVersionKind != gvk {
return true
}
}
return false
}
// setNoAdoption set pod no print adoption
func setNoAdoption(mapping *meta.RESTMapping) {
if mapping != nil && mapping.Resource.Resource == "pods" {
podColumns[printColumnClusterNum].Priority = 1
}
}
// setColumnDefinition set print ColumnDefinition
func (g *CommandGetOptions) setColumnDefinition(table *metav1.Table) {
var tempColumnDefinition []metav1.TableColumnDefinition
if len(table.ColumnDefinitions) > 0 {
if g.OutputWatchEvents {
tempColumnDefinition = append(append(append(tempColumnDefinition, eventColumn, table.ColumnDefinitions[0], podColumns[0]), table.ColumnDefinitions[1:]...), podColumns[1:]...)
} else {
tempColumnDefinition = append(append(append(tempColumnDefinition, table.ColumnDefinitions[0], podColumns[0]), table.ColumnDefinitions[1:]...), podColumns[1:]...)
}
table.ColumnDefinitions = tempColumnDefinition
}
}
// Exists determine if path exists
func Exists(path string) bool {
if _, err := os.Stat(path); err != nil {
return os.IsExist(err)
}
return true
}
func getExample(parentCommand string) string {
example := `
# List all pods in ps output format` + "\n" +
fmt.Sprintf("%s get pods", parentCommand) + `
# List all pods in ps output format with more information (such as node name)` + "\n" +
fmt.Sprintf("%s get pods -o wide", parentCommand) + `
# List all pods of member1 cluster in ps output format` + "\n" +
fmt.Sprintf("%s get pods -C member1", parentCommand) + `
# List a single replicasets controller with specified NAME in ps output format ` + "\n" +
fmt.Sprintf("%s get replicasets nginx", parentCommand) + `
# List deployments in JSON output format, in the "v1" version of the "apps" API group ` + "\n" +
fmt.Sprintf("%s get deployments.v1.apps -o json", parentCommand) + `
# Return only the phase value of the specified resource ` + "\n" +
fmt.Sprintf("%s get -o template deployment/nginx -C member1 --template={{.spec.replicas}}", parentCommand) + `
# List all replication controllers and services together in ps output format ` + "\n" +
fmt.Sprintf("%s get rs,services", parentCommand) + `
# List one or more resources by their type and names ` + "\n" +
fmt.Sprintf("%s get rs/nginx-cb87b6d88 service/kubernetes", parentCommand)
return example
}
// skipPrinter allows conditionally suppressing object output via the output field.
// table objects are suppressed by setting their Rows to nil (allowing column definitions to propagate to the delegate).
// non-table objects are suppressed by not calling the delegate at all.
type skipPrinter struct {
delegate printers.ResourcePrinter
output *bool
}
func (p *skipPrinter) PrintObj(obj runtime.Object, writer io.Writer) error {
if *p.output {
return p.delegate.PrintObj(obj, writer)
}
table, isTable := obj.(*metav1.Table)
if !isTable {
return nil
}
table = table.DeepCopy()
table.Rows = nil
return p.delegate.PrintObj(table, writer)
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
tcdataimporter/api/djangoserver/wsgi.py | """
WSGI config for tcdataimporter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoserver.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
ioctl/config/config.go | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"fmt"
"io/ioutil"
"os"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/pkg/log"
)
// Directories
var (
// ConfigDir is the directory to store config file
ConfigDir string
// DefaultConfigFile is the default config file name
DefaultConfigFile string
)
// Error strings
var (
// ErrConfigNotMatch indicates error for no config matches
ErrConfigNotMatch = fmt.Errorf("No matching config")
// ErrEmptyEndpoint indicates error for empty endpoint
ErrEmptyEndpoint = fmt.Errorf("No endpoint has been set")
)
// Language type used to enumerate supported language of ioctl
type Language int
// Multi-language support
const (
English Language = iota
Chinese
)
// ConfigCmd represents the config command
var ConfigCmd = &cobra.Command{
Use: "config",
Short: "Get, set, or reset configuration for ioctl",
}
// Context represents the current context
type Context struct {
AddressOrAlias string `json:"addressOrAlias" yaml:"addressOrAlias"`
}
// Config defines the config schema
type Config struct {
Wallet string `json:"wallet" yaml:"wallet"`
Endpoint string `json:"endpoint" yaml:"endpoint"`
SecureConnect bool `json:"secureConnect" yaml:"secureConnect"`
Aliases map[string]string `json:"aliases" yaml:"aliases"`
DefaultAccount Context `json:"defaultAccount" yaml:"defaultAccount"`
Explorer string `json:"explorer" yaml:"explorer"`
Language string `json:"language" yaml:"language"`
Nsv2height uint64 `json:"nsv2height" yaml:"nsv2height"`
}
var (
// ReadConfig represents the current config read from local
ReadConfig Config
// Insecure represents the insecure connect option of grpc dial, default is false
Insecure = false
// UILanguage represents the language of ioctl user interface, default is 0 representing English
UILanguage Language
)
func init() {
ConfigDir = os.Getenv("HOME") + "/.config/ioctl/default"
// Create path to config directory
if err := os.MkdirAll(ConfigDir, 0700); err != nil {
log.L().Panic(err.Error())
}
// Path to config file
DefaultConfigFile = ConfigDir + "/config.default"
// Load or reset config file
var err error
ReadConfig, err = LoadConfig()
if err != nil {
if os.IsNotExist(err) {
err = reset() // Config file doesn't exist
}
if err != nil {
log.L().Panic(err.Error())
}
}
// Check completeness of config file
completeness := true
if ReadConfig.Wallet == "" {
ReadConfig.Wallet = ConfigDir
completeness = false
}
if ReadConfig.Language == "" {
ReadConfig.Language = supportedLanguage[0]
completeness = false
}
if ReadConfig.Nsv2height == 0 {
ReadConfig.Nsv2height = config.Default.Genesis.FairbankBlockHeight
}
if !completeness {
err := writeConfig()
if err != nil {
log.L().Panic(err.Error())
}
}
// Set language for ioctl
UILanguage = isSupportedLanguage(ReadConfig.Language)
if UILanguage == -1 {
UILanguage = 0
message := output.StringMessage(fmt.Sprintf("Language %s is not supported, English instead.",
ReadConfig.Language))
fmt.Println(message.Warn())
}
// Init subcommands
ConfigCmd.AddCommand(configGetCmd)
ConfigCmd.AddCommand(configSetCmd)
ConfigCmd.AddCommand(configResetCmd)
}
// LoadConfig loads config file in yaml format
func LoadConfig() (Config, error) {
ReadConfig := Config{
Aliases: make(map[string]string),
}
in, err := ioutil.ReadFile(DefaultConfigFile)
if err == nil {
if err := yaml.Unmarshal(in, &ReadConfig); err != nil {
return ReadConfig, err
}
}
return ReadConfig, err
}
// TranslateInLang returns translation in selected language
func TranslateInLang(translations map[Language]string, lang Language) string {
if tsl, ok := translations[lang]; ok {
return tsl
}
// Assumption: English should always be provided
return translations[English]
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
tests/tools/commands/test_venv.py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import subprocess
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import temporary_dir, touch
from pex.executor import Executor
from pex.testing import run_pex_command
from pex.tools.commands.virtualenv import Virtualenv
from pex.typing import TYPE_CHECKING
from pex.util import named_temporary_file
if TYPE_CHECKING:
from typing import Callable, Tuple, Any, Dict, Optional, Iterable
CreatePexVenv = Callable[[Tuple[str, ...]], Virtualenv]
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
# type: () -> str
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
# N.B.: --unzip just speeds up runs 2+ of the pex file and is otherwise not relevant to
# these tests.
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--unzip",
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
# type: (str) -> CreatePexVenv
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
# type: (*str) -> Virtualenv
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
# The re-created venv should have no ansicolors installed like the prior venv.
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
# The re-created venv should have no pip installed either.
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, # type: Virtualenv
code=None, # type: Optional[str]
extra_args=(), # type: Iterable[str]
**extra_env # type: Any
):
# type: (...) -> Tuple[int, str, str]
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, # type: Virtualenv
package, # type: str
):
# type: (...) -> str
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
# type: (str) -> Dict[str, str]
return dict(line.split(" ", 1) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
# N.B.: `fab -V` output looks like so:
# $ fab -V
# Fabric 2.5.0
# Paramiko 2.7.2
# Invoke 1.4.1
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
# type: (CreatePexVenv) -> None
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
# special mode execute module: -m module
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
# special mode execute code string: -c <str>
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute stdin: -
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute python file: <py file name>
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
goldfinger/metals_api.py | # -*- coding: utf-8 -*-
"""
We return the values based in the base currency.
For example, for 1 USD the return is a number like 0.000634 for Gold (XAU).
To get the gold rate in USD: 1/0.000634= 1577.28 USD
"""
import os
import math
import boto3
import redis
import requests
import pandas as pd
from datetime import date, datetime, timedelta
from helpers import get_today_date, get_days_ago_date, days_diff
from pandas_helpers import redis_to_dataframe
MAX_DAYS = 5
API_URL = 'https://www.metals-api.com/api'
def get_access_key():
ssm = boto3.client('ssm')
return ssm.get_parameter(Name='/goldfinger/api/key', WithDecryption=True)['Parameter']['Value']
def get_latest(currency, *symbols):
"""
"latest" endpoint - request the most recent exchange rate data
https://www.metals-api.com/api/latest
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
"""
symbols = ','.join(symbols)
uri = f'{API_URL}/latest?access_key={access_key}&base={currency}&symbols={symbols}'
return requests.get(uri).json()
def get_timeseries(currency, start_date, end_date, symbol):
"""
"timeseries" endpoint - request exchange rates for a specific period of time
https://www.metals-api.com/api/timeseries
? access_key = YOUR_ACCESS_KEY
& start_date = YYYY-MM-DD
& end_date = YYYY-MM-DD
& base = USD
& symbols = XAU,XAG <-- can actually only be one symbol
"""
uri = f'{API_URL}/timeseries?access_key={access_key}&start_date={start_date}&end_date={end_date}&base={currency}&symbols={symbol}'
return requests.get(uri).json()
def get_historical():
"""
"historical" endpoint - request historical rates for a specific day
https://www.metals-api.com/api/YYYY-MM-DD
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
"""
pass
def get_convert():
"""
"convert" endpoint - convert any amount from one currency to another
using real-time exchange rates
https://www.metals-api.com/api/convert
? access_key = YOUR_ACCESS_KEY
& from = USD
& to = EUR
& amount = 25
append an additional "date" parameter if you want to use
historical rates for your conversion
& date = YYYY-MM-DD
"""
pass
def get_fluctuation():
"""
"fluctuation" endpoint - request any currency's change parameters (margin
and percentage), optionally between two specified dates
https://www.metals-api.com/api/fluctuation
? access_key = YOUR_ACCESS_KEY
& base = USD
& symbols = XAU,XAG
& type = weekly
append an additional "date" parameter if you want to use
historical rates for your conversion
& start_date = YYYY-MM-DD
& end_date = YYYY-MM-DD
"""
pass
def timeseries_to_redis(currency, start_date_str, end_date_str, symbol):
today = datetime.today()
end_date = datetime.strptime(end_date_str, '%Y-%m-%d')
start_date = datetime.strptime(start_date_str, '%Y-%m-%d')
_days_diff = days_diff(start_date_str, end_date_str)
loops = math.ceil(_days_diff / MAX_DAYS)
rates = {}
for loop in range(loops):
start = start_date
end = start_date + timedelta(MAX_DAYS)
if end > today:
end = today
end_str = end.strftime('%Y-%m-%d')
start_str = start.strftime('%Y-%m-%d')
start_date = end
print(f'{start_str} to {end_str}', end='')
# this does a hget each iteration, but I guess that's what a cache is for
if not date_range_in_redis(start, currency, symbol):
# redis does not have the keys in range
ret = get_timeseries(currency, start_str, end_str, symbol)
else:
print('...already in redis')
continue
if not ret['success']:
print(f'Bad response {ret}')
break
rates.update(ret['rates'])
print(rates)
# flatten dictionary
rates_to_date = {
k:v[symbol] for (k,v) in rates.items()
}
return {symbol: rates_to_date}
#end_date = get_today_date()
#start_date = get_days_ago_date(MAX_DAYS)
def date_range_in_redis(start_date, currency, symbol):
key = f'{symbol}-{currency}'
timeseries_data = r.hgetall(key)
timeseries_data = {
k.decode('utf-8'):float(v) for (k,v) in timeseries_data.items()
}
all_dates = set(timeseries_data.keys())
range_dates = set([(start_date + timedelta(days=x)).strftime('%Y-%m-%d') for x in range(MAX_DAYS)])
return range_dates.issubset(all_dates)
if __name__=="__main__":
global r
global access_key
access_key = get_access_key()
running_in_docker= os.environ.get('RUNNING_IN_DOCKER', False)
if running_in_docker:
r = redis.Redis(host='192.168.1.21')
else:
r = redis.Redis(host='127.0.0.1')
yesterday = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
for symbol in ['XAU', 'XAG']:
for currency in ['ZAR', 'USD']:
key = f'{symbol}-{currency}'
series = timeseries_to_redis(currency, '2020-01-01', yesterday, symbol)
print(series)
if series:
try:
r.hmset(key, series[symbol])
print(redis_to_dataframe(symbol))
except redis.exceptions.DataError:
print('empty dictionary')
else:
print('Something went wrong')
r.save()
| [] | [] | [
"RUNNING_IN_DOCKER"
] | [] | ["RUNNING_IN_DOCKER"] | python | 1 | 0 | |
pkg/storage/storage.go | package storage
import (
"context"
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
"runtime"
"sort"
"sync"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/dgraph-io/badger/v2/options"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
"github.com/pyroscope-io/pyroscope/pkg/config"
"github.com/pyroscope-io/pyroscope/pkg/flameql"
"github.com/pyroscope-io/pyroscope/pkg/storage/cache"
"github.com/pyroscope-io/pyroscope/pkg/storage/dict"
"github.com/pyroscope-io/pyroscope/pkg/storage/dimension"
"github.com/pyroscope-io/pyroscope/pkg/storage/labels"
"github.com/pyroscope-io/pyroscope/pkg/storage/segment"
"github.com/pyroscope-io/pyroscope/pkg/storage/tree"
"github.com/pyroscope-io/pyroscope/pkg/structs/merge"
"github.com/pyroscope-io/pyroscope/pkg/util/bytesize"
"github.com/pyroscope-io/pyroscope/pkg/util/disk"
"github.com/pyroscope-io/pyroscope/pkg/util/slices"
)
var (
errOutOfSpace = errors.New("running out of space")
errRetention = errors.New("could not write because of retention settings")
evictInterval = 20 * time.Second
writeBackInterval = time.Second
retentionInterval = time.Minute
badgerGCInterval = 5 * time.Minute
)
type Storage struct {
putMutex sync.Mutex
config *config.Server
segments *cache.Cache
dimensions *cache.Cache
dicts *cache.Cache
trees *cache.Cache
labels *labels.Labels
db *badger.DB
dbTrees *badger.DB
dbDicts *badger.DB
dbDimensions *badger.DB
dbSegments *badger.DB
localProfilesDir string
stop chan struct{}
wg sync.WaitGroup
// prometheus metrics
storageWritesTotal prometheus.Counter
writeBackTotal prometheus.Counter
evictionsTotal prometheus.Counter
retentionCount prometheus.Counter
storageReadsTotal prometheus.Counter
evictionsAllocBytes prometheus.Gauge
evictionsTotalBytes prometheus.Gauge
storageCachesFlushTimer prometheus.Histogram
storageBadgerCloseTimer prometheus.Histogram
evictionsTimer prometheus.Histogram
writeBackTimer prometheus.Histogram
retentionTimer prometheus.Histogram
}
func (s *Storage) newBadger(name string) (*badger.DB, error) {
badgerPath := filepath.Join(s.config.StoragePath, name)
err := os.MkdirAll(badgerPath, 0o755)
if err != nil {
return nil, err
}
badgerOptions := badger.DefaultOptions(badgerPath)
badgerOptions = badgerOptions.WithTruncate(!s.config.BadgerNoTruncate)
badgerOptions = badgerOptions.WithSyncWrites(false)
badgerOptions = badgerOptions.WithCompactL0OnClose(false)
badgerOptions = badgerOptions.WithCompression(options.ZSTD)
badgerLevel := logrus.ErrorLevel
if l, err := logrus.ParseLevel(s.config.BadgerLogLevel); err == nil {
badgerLevel = l
}
badgerOptions = badgerOptions.WithLogger(badgerLogger{name: name, logLevel: badgerLevel})
db, err := badger.Open(badgerOptions)
if err != nil {
return nil, err
}
s.wg.Add(1)
go s.periodicTask(badgerGCInterval, s.badgerGCTask(db))
return db, nil
}
func New(c *config.Server, reg prometheus.Registerer) (*Storage, error) {
s := &Storage{
config: c,
stop: make(chan struct{}),
localProfilesDir: filepath.Join(c.StoragePath, "local-profiles"),
storageWritesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "pyroscope_storage_writes_total",
Help: "number of calls to storage.Put",
}),
storageReadsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "pyroscope_storage_reads_total",
Help: "number of calls to storage.Get",
}),
// Evictions
evictionsTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_evictions_duration_seconds",
Help: "duration of evictions (triggered when there's memory pressure)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
// The following 2 metrics are somewhat broad
// Nevertheless they are still useful to grasp evictions
evictionsAllocBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "pyroscope_storage_evictions_alloc_bytes",
Help: "number of bytes allocated in the heap",
}),
evictionsTotalBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "pyroscope_storage_evictions_total_mem_bytes",
Help: "total number of memory bytes",
}),
storageCachesFlushTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_caches_flush_duration_seconds",
Help: "duration of storage caches flush (triggered when server is closing)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
storageBadgerCloseTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_db_close_duration_seconds",
Help: "duration of db close (triggered when server is closing)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
writeBackTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_writeback_duration_seconds",
Help: "duration of write-back writes (triggered periodically)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
retentionTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_retention_duration_seconds",
Help: "duration of old data deletion",
// TODO what buckets to use here?
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
}
var err error
s.db, err = s.newBadger("main")
if err != nil {
return nil, err
}
s.labels = labels.New(s.db)
s.dbTrees, err = s.newBadger("trees")
if err != nil {
return nil, err
}
s.dbDicts, err = s.newBadger("dicts")
if err != nil {
return nil, err
}
s.dbDimensions, err = s.newBadger("dimensions")
if err != nil {
return nil, err
}
s.dbSegments, err = s.newBadger("segments")
if err != nil {
return nil, err
}
if err = os.MkdirAll(s.localProfilesDir, 0o755); err != nil {
return nil, err
}
hitCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_hits_total",
Help: "total number of cache hits",
}, []string{"name"})
missCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_misses_total",
Help: "total number of cache misses",
}, []string{"name"})
storageReadCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_reads_total",
Help: "total number of cache queries",
}, []string{"name"})
writesToDiskCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_persisted_total",
Help: "number of items persisted from cache to disk",
}, []string{"name"})
s.dimensions = cache.New(s.dbDimensions, "i:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
})
s.dimensions.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*dimension.Dimension).Bytes()
}
s.dimensions.FromBytes = func(k string, v []byte) (interface{}, error) {
return dimension.FromBytes(v)
}
s.dimensions.New = func(k string) interface{} {
return dimension.New()
}
s.segments = cache.New(s.dbSegments, "s:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "segments"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "segments"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "segments"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "segments"}),
})
s.segments.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*segment.Segment).Bytes()
}
s.segments.FromBytes = func(k string, v []byte) (interface{}, error) {
// TODO:
// these configuration params should be saved in db when it initializes
return segment.FromBytes(v)
}
s.segments.New = func(k string) interface{} {
return segment.New()
}
s.dicts = cache.New(s.dbDicts, "d:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
})
s.dicts.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*dict.Dict).Bytes()
}
s.dicts.FromBytes = func(k string, v []byte) (interface{}, error) {
return dict.FromBytes(v)
}
s.dicts.New = func(k string) interface{} {
return dict.New()
}
s.trees = cache.New(s.dbTrees, "t:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "trees"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "trees"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "trees"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "trees"}),
})
s.trees.Bytes = s.treeBytes
s.trees.FromBytes = s.treeFromBytes
s.trees.New = func(k string) interface{} {
return tree.New()
}
memTotal, err := getMemTotal()
if err != nil {
return nil, err
}
s.wg.Add(2)
go s.periodicTask(evictInterval, s.evictionTask(memTotal))
go s.periodicTask(writeBackInterval, s.writeBackTask)
if s.config.Retention > 0 {
s.wg.Add(1)
go s.periodicTask(retentionInterval, s.retentionTask)
}
if err = s.migrate(); err != nil {
return nil, err
}
return s, nil
}
type PutInput struct {
StartTime time.Time
EndTime time.Time
Key *segment.Key
Val *tree.Tree
SpyName string
SampleRate uint32
Units string
AggregationType string
}
func (s *Storage) treeFromBytes(k string, v []byte) (interface{}, error) {
key := segment.FromTreeToDictKey(k)
d, err := s.dicts.GetOrCreate(key)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
return tree.FromBytes(d.(*dict.Dict), v)
}
func (s *Storage) treeBytes(k string, v interface{}) ([]byte, error) {
key := segment.FromTreeToDictKey(k)
d, err := s.dicts.GetOrCreate(key)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
b, err := v.(*tree.Tree).Bytes(d.(*dict.Dict), s.config.MaxNodesSerialization)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
s.dicts.Put(key, d)
return b, nil
}
var OutOfSpaceThreshold = 512 * bytesize.MB
func (s *Storage) Put(po *PutInput) error {
// TODO: This is a pretty broad lock. We should find a way to make these locks more selective.
s.putMutex.Lock()
defer s.putMutex.Unlock()
if err := s.performFreeSpaceCheck(); err != nil {
return err
}
if po.StartTime.Before(s.lifetimeBasedRetentionThreshold()) {
return errRetention
}
logrus.WithFields(logrus.Fields{
"startTime": po.StartTime.String(),
"endTime": po.EndTime.String(),
"key": po.Key.Normalized(),
"samples": po.Val.Samples(),
"units": po.Units,
"aggregationType": po.AggregationType,
}).Debug("storage.Put")
s.storageWritesTotal.Add(1.0)
for k, v := range po.Key.Labels() {
s.labels.Put(k, v)
}
sk := po.Key.SegmentKey()
for k, v := range po.Key.Labels() {
key := k + ":" + v
r, err := s.dimensions.GetOrCreate(key)
if err != nil {
logrus.Errorf("dimensions cache for %v: %v", key, err)
continue
}
r.(*dimension.Dimension).Insert([]byte(sk))
s.dimensions.Put(key, r)
}
r, err := s.segments.GetOrCreate(sk)
if err != nil {
return fmt.Errorf("segments cache for %v: %v", sk, err)
}
st := r.(*segment.Segment)
st.SetMetadata(po.SpyName, po.SampleRate, po.Units, po.AggregationType)
samples := po.Val.Samples()
err = st.Put(po.StartTime, po.EndTime, samples, func(depth int, t time.Time, r *big.Rat, addons []segment.Addon) {
tk := po.Key.TreeKey(depth, t)
res, err := s.trees.GetOrCreate(tk)
if err != nil {
logrus.Errorf("trees cache for %v: %v", tk, err)
return
}
cachedTree := res.(*tree.Tree)
treeClone := po.Val.Clone(r)
for _, addon := range addons {
if res, ok := s.trees.Lookup(po.Key.TreeKey(addon.Depth, addon.T)); ok {
ta := res.(*tree.Tree)
ta.RLock()
treeClone.Merge(ta)
ta.RUnlock()
}
}
cachedTree.Lock()
cachedTree.Merge(treeClone)
cachedTree.Unlock()
s.trees.Put(tk, cachedTree)
})
if err != nil {
return err
}
s.segments.Put(sk, st)
return nil
}
type GetInput struct {
StartTime time.Time
EndTime time.Time
Key *segment.Key
Query *flameql.Query
}
type GetOutput struct {
Tree *tree.Tree
Timeline *segment.Timeline
SpyName string
SampleRate uint32
Units string
}
const averageAggregationType = "average"
func (s *Storage) Get(gi *GetInput) (*GetOutput, error) {
logger := logrus.WithFields(logrus.Fields{
"startTime": gi.StartTime.String(),
"endTime": gi.EndTime.String(),
})
var dimensionKeys func() []dimension.Key
switch {
case gi.Key != nil:
logger = logger.WithField("key", gi.Key.Normalized())
dimensionKeys = s.dimensionKeysByKey(gi.Key)
case gi.Query != nil:
logger = logger.WithField("query", gi.Query)
dimensionKeys = s.dimensionKeysByQuery(gi.Query)
default:
// Should never happen.
return nil, fmt.Errorf("key or query must be specified")
}
logger.Debug("storage.Get")
s.storageReadsTotal.Add(1)
var (
triesToMerge []merge.Merger
lastSegment *segment.Segment
writesTotal uint64
timeline = segment.GenerateTimeline(gi.StartTime, gi.EndTime)
aggregationType = "sum"
)
for _, k := range dimensionKeys() {
// TODO: refactor, store `Key`s in dimensions
parsedKey, err := segment.ParseKey(string(k))
if err != nil {
logrus.Errorf("parse key: %v: %v", string(k), err)
continue
}
key := parsedKey.SegmentKey()
res, ok := s.segments.Lookup(key)
if !ok {
continue
}
st := res.(*segment.Segment)
if st.AggregationType() == averageAggregationType {
aggregationType = averageAggregationType
}
timeline.PopulateTimeline(st)
lastSegment = st
st.Get(gi.StartTime, gi.EndTime, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
if res, ok = s.trees.Lookup(parsedKey.TreeKey(depth, t)); ok {
triesToMerge = append(triesToMerge, res.(*tree.Tree).Clone(r))
writesTotal += writes
}
})
}
resultTrie := merge.MergeTriesSerially(runtime.NumCPU(), triesToMerge...)
if resultTrie == nil {
return nil, nil
}
t := resultTrie.(*tree.Tree)
if writesTotal > 0 && aggregationType == averageAggregationType {
t = t.Clone(big.NewRat(1, int64(writesTotal)))
}
return &GetOutput{
Tree: t,
Timeline: timeline,
SpyName: lastSegment.SpyName(),
SampleRate: lastSegment.SampleRate(),
Units: lastSegment.Units(),
}, nil
}
func (s *Storage) dimensionKeysByKey(key *segment.Key) func() []dimension.Key {
return func() []dimension.Key {
d, ok := s.lookupAppDimension(key.AppName())
if !ok {
return nil
}
l := key.Labels()
if len(l) == 1 {
// No tags specified: return application dimension keys.
return d.Keys
}
dimensions := []*dimension.Dimension{d}
for k, v := range l {
if flameql.IsTagKeyReserved(k) {
continue
}
if d, ok = s.lookupDimensionKV(k, v); ok {
dimensions = append(dimensions, d)
}
}
if len(dimensions) == 1 {
// Tags specified but not found.
return nil
}
return dimension.Intersection(dimensions...)
}
}
func (s *Storage) dimensionKeysByQuery(qry *flameql.Query) func() []dimension.Key {
return func() []dimension.Key { return s.exec(context.TODO(), qry) }
}
func (s *Storage) iterateOverAllSegments(cb func(*segment.Key, *segment.Segment) error) error {
nameKey := "__name__"
var dimensions []*dimension.Dimension
s.labels.GetValues(nameKey, func(v string) bool {
dmInt, ok := s.dimensions.Lookup(nameKey + ":" + v)
if !ok {
return true
}
dimensions = append(dimensions, dmInt.(*dimension.Dimension))
return true
})
for _, rawSk := range dimension.Union(dimensions...) {
sk, _ := segment.ParseKey(string(rawSk))
stInt, ok := s.segments.Lookup(sk.SegmentKey())
if !ok {
continue
}
st := stInt.(*segment.Segment)
if err := cb(sk, st); err != nil {
return err
}
}
return nil
}
func (s *Storage) DeleteDataBefore(threshold time.Time) error {
return s.iterateOverAllSegments(func(sk *segment.Key, st *segment.Segment) error {
var err error
deletedRoot := st.DeleteDataBefore(threshold, func(depth int, t time.Time) {
tk := sk.TreeKey(depth, t)
if delErr := s.trees.Delete(tk); delErr != nil {
err = delErr
}
})
if err != nil {
return err
}
if deletedRoot {
if err := s.deleteSegmentAndRelatedData(sk); err != nil {
return err
}
}
return nil
})
}
type DeleteInput struct {
Key *segment.Key
}
var maxTime = time.Unix(1<<62, 999999999)
func (s *Storage) Delete(di *DeleteInput) error {
dimensions := make([]*dimension.Dimension, len(di.Key.Labels()))
i := 0
for k, v := range di.Key.Labels() {
dInt, ok := s.dimensions.Lookup(k + ":" + v)
if !ok {
return nil
}
dimensions[i] = dInt.(*dimension.Dimension)
i++
}
for _, sk := range dimension.Intersection(dimensions...) {
skk, _ := segment.ParseKey(string(sk))
stInt, ok := s.segments.Lookup(skk.SegmentKey())
if !ok {
continue
}
st := stInt.(*segment.Segment)
var err error
st.Get(zeroTime, maxTime, func(depth int, _, _ uint64, t time.Time, _ *big.Rat) {
treeKey := skk.TreeKey(depth, t)
err = s.trees.Delete(treeKey)
})
if err != nil {
return err
}
if err := s.deleteSegmentAndRelatedData(skk); err != nil {
return err
}
}
return nil
}
func (s *Storage) deleteSegmentAndRelatedData(key *segment.Key) error {
if err := s.dicts.Delete(key.DictKey()); err != nil {
return err
}
if err := s.segments.Delete(key.SegmentKey()); err != nil {
return err
}
for k, v := range key.Labels() {
dInt, ok := s.dimensions.Lookup(k + ":" + v)
if !ok {
continue
}
d := dInt.(*dimension.Dimension)
d.Delete(dimension.Key(key.SegmentKey()))
}
return nil
}
func (s *Storage) Close() error {
close(s.stop)
s.wg.Wait()
func() {
timer := prometheus.NewTimer(prometheus.ObserverFunc(s.storageCachesFlushTimer.Observe))
defer timer.ObserveDuration()
wg := sync.WaitGroup{}
wg.Add(3)
go func() { defer wg.Done(); s.dimensions.Flush() }()
go func() { defer wg.Done(); s.segments.Flush() }()
go func() { defer wg.Done(); s.trees.Flush() }()
wg.Wait()
// dictionary has to flush last because trees write to dictionaries
s.dicts.Flush()
}()
func() {
timer := prometheus.NewTimer(prometheus.ObserverFunc(s.storageBadgerCloseTimer.Observe))
defer timer.ObserveDuration()
wg := sync.WaitGroup{}
wg.Add(5)
go func() { defer wg.Done(); s.dbTrees.Close() }()
go func() { defer wg.Done(); s.dbDicts.Close() }()
go func() { defer wg.Done(); s.dbDimensions.Close() }()
go func() { defer wg.Done(); s.dbSegments.Close() }()
go func() { defer wg.Done(); s.db.Close() }()
wg.Wait()
}()
// this allows prometheus to collect metrics before pyroscope exits
if os.Getenv("PYROSCOPE_WAIT_AFTER_STOP") != "" {
time.Sleep(5 * time.Second)
}
return nil
}
func (s *Storage) GetKeys(cb func(_k string) bool) {
s.labels.GetKeys(cb)
}
func (s *Storage) GetValues(key string, cb func(v string) bool) {
s.labels.GetValues(key, func(v string) bool {
if key != "__name__" || !slices.StringContains(s.config.HideApplications, v) {
return cb(v)
}
return true
})
}
func (s *Storage) GetKeysByQuery(query string, cb func(_k string) bool) error {
parsedQuery, err := flameql.ParseQuery(query)
if err != nil {
return err
}
segmentKey, err := segment.ParseKey(parsedQuery.AppName + "{}")
if err != nil {
return err
}
dimensionKeys := s.dimensionKeysByKey(segmentKey)
resultSet := map[string]bool{}
for _, dk := range dimensionKeys() {
dkParsed, _ := segment.ParseKey(string(dk))
if dkParsed.AppName() == parsedQuery.AppName {
for k := range dkParsed.Labels() {
resultSet[k] = true
}
}
}
resultList := []string{}
for v := range resultSet {
resultList = append(resultList, v)
}
sort.Strings(resultList)
for _, v := range resultList {
if !cb(v) {
break
}
}
return nil
}
func (s *Storage) GetValuesByQuery(label string, query string, cb func(v string) bool) error {
parsedQuery, err := flameql.ParseQuery(query)
if err != nil {
return err
}
segmentKey, err := segment.ParseKey(parsedQuery.AppName + "{}")
if err != nil {
return err
}
dimensionKeys := s.dimensionKeysByKey(segmentKey)
resultSet := map[string]bool{}
for _, dk := range dimensionKeys() {
dkParsed, _ := segment.ParseKey(string(dk))
if v, ok := dkParsed.Labels()[label]; ok {
resultSet[v] = true
}
}
resultList := []string{}
for v := range resultSet {
resultList = append(resultList, v)
}
sort.Strings(resultList)
for _, v := range resultList {
if !cb(v) {
break
}
}
return nil
}
func (s *Storage) DiskUsage() map[string]bytesize.ByteSize {
res := map[string]bytesize.ByteSize{
"main": 0,
"trees": 0,
"dicts": 0,
"dimensions": 0,
"segments": 0,
}
for k := range res {
res[k] = dirSize(filepath.Join(s.config.StoragePath, k))
}
return res
}
func dirSize(path string) (result bytesize.ByteSize) {
filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
result += bytesize.ByteSize(info.Size())
}
return nil
})
return result
}
func (s *Storage) CacheStats() map[string]interface{} {
return map[string]interface{}{
"dimensions_size": s.dimensions.Size(),
"segments_size": s.segments.Size(),
"dicts_size": s.dicts.Size(),
"trees_size": s.trees.Size(),
}
}
var zeroTime time.Time
func (s *Storage) lifetimeBasedRetentionThreshold() time.Time {
var t time.Time
if s.config.Retention != 0 {
t = time.Now().Add(-1 * s.config.Retention)
}
return t
}
func (s *Storage) performFreeSpaceCheck() error {
freeSpace, err := disk.FreeSpace(s.config.StoragePath)
if err == nil {
if freeSpace < OutOfSpaceThreshold {
return errOutOfSpace
}
}
return nil
}
| [
"\"PYROSCOPE_WAIT_AFTER_STOP\""
] | [] | [
"PYROSCOPE_WAIT_AFTER_STOP"
] | [] | ["PYROSCOPE_WAIT_AFTER_STOP"] | go | 1 | 0 | |
test/test.py | import os
import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
if os.environ.get('TRAVIS') is None:
from db_connector import (DBConnector, GitHubData, PackageManagerData,
get_db_connection_string,)
from config import Config
from github import GitHub
from package_managers import PackageManagers
from sendgrid_email import SendGrid
try:
basestring
except NameError:
basestring = str
class TestConfig(unittest.TestCase):
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.config = Config()
def test_initialization(self):
if os.environ.get('TRAVIS') is None:
github_token = os.environ.get('GITHUB_TOKEN')
self.assertTrue(isinstance(github_token, basestring))
sendgrid_api_key = os.environ.get('SENDGRID_API_KEY')
self.assertTrue(isinstance(sendgrid_api_key, basestring))
mysql_db = os.environ.get('MYSQL_DB_URL')
self.assertTrue(isinstance(mysql_db, basestring))
self.assertTrue(isinstance(self.config.github_user, basestring))
self.assertTrue(isinstance(self.config.github_repos, list))
self.assertTrue(isinstance(self.config.package_manager_urls, list))
self.assertTrue(isinstance(self.config.to_email, basestring))
self.assertTrue(isinstance(self.config.from_email, basestring))
self.assertTrue(isinstance(self.config.email_subject, basestring))
self.assertTrue(isinstance(self.config.email_body, basestring))
def test_mysql_db_connection_string(self):
if os.environ.get('TRAVIS'):
return
mysql_str = 'mysql://user:pass@host:port/dbname'
connection_string = get_db_connection_string(mysql_str)
self.assertEqual(connection_string,
'mysql+pymysql://user:pass@host:port/dbname')
def test_sqllite_db_connection_string(self):
if os.environ.get('TRAVIS'):
return
# in memory
sqllite = 'sqlite://'
connection_string = get_db_connection_string(sqllite)
self.assertEqual(connection_string, 'sqlite://')
# relative
sqllite = 'sqlite:///foo.db'
connection_string = get_db_connection_string(sqllite)
self.assertEqual(connection_string, 'sqlite:///foo.db')
# absolute
sqllite = 'sqlite:////foo.db'
connection_string = get_db_connection_string(sqllite)
self.assertEqual(connection_string, 'sqlite:////foo.db')
class TestDBConnector(unittest.TestCase):
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.db = DBConnector()
def test_add_and_delete_data(self):
if os.environ.get('TRAVIS') is None:
github_data_import = GitHubData(
date_updated=datetime.datetime.now(),
language='repo_name',
pull_requests=0,
open_issues=0,
number_of_commits=0,
number_of_branches=0,
number_of_releases=0,
number_of_contributors=0,
number_of_watchers=0,
number_of_stargazers=0,
number_of_forks=0
)
res = self.db.add_data(github_data_import)
self.assertTrue(isinstance(res, GitHubData))
res = self.db.delete_data(res.id, 'github_data')
self.assertTrue(res)
packagedata = PackageManagerData(
date_updated=datetime.datetime.now(),
csharp_downloads=0,
nodejs_downloads=0,
php_downloads=0,
python_downloads=0,
ruby_downloads=0
)
res = self.db.add_data(packagedata)
self.assertTrue(isinstance(res, PackageManagerData))
res = self.db.delete_data(res.id, 'package_manager_data')
self.assertTrue(res)
def test_get_data(self):
if os.environ.get('TRAVIS') is None:
github_data = self.db.get_data(GitHubData)
self.assertTrue(isinstance(github_data, list))
self.assertTrue(isinstance(github_data[0], GitHubData))
class TestGitHub(unittest.TestCase):
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.github = GitHub()
self.db = DBConnector()
self.config = Config()
def test_update_library_data(self):
if os.environ.get('TRAVIS') is None:
res = self.github.update_library_data(self.config.github_user,
self.config.github_repos[0])
self.assertTrue(isinstance(res, GitHubData))
res = self.db.delete_data(res.id, 'github_data')
self.assertTrue(res)
class TestPackageManagers(unittest.TestCase):
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.pm = PackageManagers()
self.db = DBConnector()
self.config = Config()
def test_update_package_manager_data(self):
if os.environ.get('TRAVIS') is None:
res = self.pm.update_package_manager_data(
self.config.package_manager_urls)
self.assertTrue(isinstance(res, PackageManagerData))
res = self.db.delete_data(res.id, 'package_manager_data')
self.assertTrue(res)
class TestSendGridEmail(unittest.TestCase):
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.sg = SendGrid()
self.config = Config()
def test_send_email(self):
if os.environ.get('TRAVIS') is None:
res = self.sg.send_email(
'[email protected]',
self.config.from_email,
self.config.email_subject,
self.config.email_body
)
self.assertEqual(202, res[0])
class TestExportTable(unittest.TestCase):
# Corresponds to schema in `db/data_schema.sql`
header_row = "id,date_updated,language,pull_requests,open_issues,"\
"number_of_commits,number_of_branches,number_of_releases,"\
"number_of_contributors,number_of_watchers,"\
"number_of_stargazers,number_of_forks\n"
def setUp(self):
if os.environ.get('TRAVIS') is None:
self.github = GitHub()
self.db = DBConnector()
self.config = Config()
self.github.update_library_data(self.config.github_user,
self.config.github_repos[0])
self.filename = "./csv/{}.csv".format(GitHubData.__tablename__)
def test_file_export_succeeds(self):
if os.environ.get('TRAVIS') is None:
self.assertFalse(os.path.exists(self.filename))
self.db.export_table_to_csv(GitHubData)
self.assertTrue(os.path.exists(self.filename))
def test_file_export_has_correct_data(self):
if os.environ.get('TRAVIS') is None:
self.db.export_table_to_csv(GitHubData)
with open(self.filename, 'r') as fp:
exported_data = fp.readlines()
# Table has correct header
self.assertEqual(exported_data[0], self.header_row)
# Table exported correct number of rows
num_exported_rows = len(exported_data) - 1 # exclude header
num_db_rows = len(self.db.get_data(GitHubData))
self.assertEqual(num_exported_rows, num_db_rows)
def tearDown(self):
if os.environ.get('TRAVIS') is None:
os.remove(self.filename)
class TestLicenseYear(unittest.TestCase):
def setUp(self):
self.license_file = 'LICENSE.txt'
def test_license_year(self):
copyright_line = ''
with open(self.license_file, 'r') as f:
for line in f:
if line.startswith('Copyright'):
copyright_line = line.strip()
break
self.assertEqual('Copyright (c) 2016-%s SendGrid, Inc.' %
datetime.datetime.now().year, copyright_line)
if __name__ == '__main__':
unittest.main()
| [] | [] | [
"SENDGRID_API_KEY",
"MYSQL_DB_URL",
"TRAVIS",
"GITHUB_TOKEN"
] | [] | ["SENDGRID_API_KEY", "MYSQL_DB_URL", "TRAVIS", "GITHUB_TOKEN"] | python | 4 | 0 | |
database/db_store.py | from .schema import Base, User
import uuid
import json
from datetime import datetime
import os
from sqlalchemy import create_engine, distinct, or_
from sqlalchemy.orm import sessionmaker
class DbStore(object):
def __init__(self, **kwargs):
if kwargs.get('db_type', 'postgresql') == 'mysql':
host = kwargs.get('host', os.environ.get('DB_HOST', 'mysql-dc'))
port = kwargs.get('port', os.environ.get('DB_PORT', '3307'))
user = kwargs.get('user', os.environ.get('DB_USER', 'datacleaning'))
database = kwargs.get('database', os.environ.get('DB_NAME', 'datacleaning'))
password = kwargs.get('password', os.environ.get('DB_PASSWD', 'datacleaning'))
db_connection_url = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(
user, password, host, port, database
)
elif kwargs.get('db_type', 'postgresql') == 'postgresql':
host = kwargs.get('host', os.environ.get('DB_HOST', '192.168.100.103'))
port = kwargs.get('port', os.environ.get('DB_PORT', '10202'))
user = kwargs.get('user', os.environ.get('DB_USER', 'postgres'))
database = kwargs.get('database', os.environ.get('DB_NAME', 'sugar2'))
password = kwargs.get('password', os.environ.get('DB_PASSWD', 'postgres'))
db_connection_url = 'postgresql://{}:{}@{}:{}/{}'.format(
user, password, host, port, database
)
elif kwargs.get('db_type', 'postgresql'):
db_connection_url = 'sqlite:///{}'.format(kwargs.get('file_path'))
self._engine = create_engine(db_connection_url)
self._sessionmaker = sessionmaker(bind=self._engine)
self._session = None
self._define_tables()
def _define_tables(self):
Base.metadata.create_all(bind=self._engine)
def __enter__(self):
self.connect()
def connect(self):
self._session = self._sessionmaker()
def __exit__(self, exception_type, exception_value, traceback):
self.disconnect()
def commit(self):
try:
self._session.commit()
except Exception as ex:
raise ex
def rollback(self):
try:
self._session.rollback()
except Exception as ex:
raise ex
# Ensures that future database queries load fresh data from underlying database
def expire(self):
self._session.expire_all()
def disconnect(self, commit=True):
if self._session is not None:
if commit:
self._session.commit()
else:
self._session.rollback()
self._session.close()
self._session = None
def get_user_by_id(self, user_id):
return self._session.query(User).filter(User.id==user_id).one() | [] | [] | [
"DB_HOST",
"DB_PORT",
"DB_PASSWD",
"DB_NAME",
"DB_USER"
] | [] | ["DB_HOST", "DB_PORT", "DB_PASSWD", "DB_NAME", "DB_USER"] | python | 5 | 0 | |
vulnerabilities/importers/github.py | # Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import asyncio
import os
import dataclasses
import json
from typing import Set
from typing import Tuple
from typing import List
from typing import Mapping
from typing import Optional
import requests
from dephell_specifier import RangeSpecifier
from packageurl import PackageURL
from vulnerabilities.data_source import Advisory
from vulnerabilities.data_source import DataSource
from vulnerabilities.data_source import DataSourceConfiguration
from vulnerabilities.data_source import Reference
from vulnerabilities.data_source import VulnerabilitySeverity
from vulnerabilities.package_managers import MavenVersionAPI
from vulnerabilities.package_managers import NugetVersionAPI
from vulnerabilities.package_managers import ComposerVersionAPI
from vulnerabilities.package_managers import PypiVersionAPI
from vulnerabilities.package_managers import RubyVersionAPI
from vulnerabilities.severity_systems import scoring_systems
# set of all possible values of first '%s' = {'MAVEN','COMPOSER', 'NUGET', 'RUBYGEMS', 'PYPI'}
# second '%s' is interesting, it will have the value '' for the first request,
# since we don't have any value for endCursor at the beginning
# for all the subsequent requests it will have value 'after: "{endCursor}""
query = """
query{
securityVulnerabilities(first: 100, ecosystem: %s, %s) {
edges {
node {
advisory {
identifiers {
type
value
}
summary
references {
url
}
severity
}
package {
name
}
vulnerableVersionRange
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
"""
class GitHubTokenError(Exception):
pass
@dataclasses.dataclass
class GitHubAPIDataSourceConfiguration(DataSourceConfiguration):
endpoint: str
ecosystems: list
class GitHubAPIDataSource(DataSource):
CONFIG_CLASS = GitHubAPIDataSourceConfiguration
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.gh_token = os.environ["GH_TOKEN"]
except KeyError:
raise GitHubTokenError("Environment variable GH_TOKEN is missing")
def __enter__(self):
self.advisories = self.fetch()
def set_api(self, packages):
asyncio.run(self.version_api.load_api(packages))
def updated_advisories(self) -> Set[Advisory]:
return self.batch_advisories(self.process_response())
def fetch(self) -> Mapping[str, List[Mapping]]:
headers = {"Authorization": "token " + self.gh_token}
api_data = {}
for ecosystem in self.config.ecosystems:
api_data[ecosystem] = []
end_cursor_exp = ""
while True:
query_json = {"query": query % (ecosystem, end_cursor_exp)}
resp = requests.post(self.config.endpoint, headers=headers, json=query_json).json()
if resp.get("message") == "Bad credentials":
raise GitHubTokenError("Invalid GitHub token")
end_cursor = resp["data"]["securityVulnerabilities"]["pageInfo"]["endCursor"]
end_cursor_exp = "after: {}".format('"{}"'.format(end_cursor))
api_data[ecosystem].append(resp)
if not resp["data"]["securityVulnerabilities"]["pageInfo"]["hasNextPage"]:
break
return api_data
def set_version_api(self, ecosystem: str) -> None:
versioners = {
"MAVEN": MavenVersionAPI,
"NUGET": NugetVersionAPI,
"COMPOSER": ComposerVersionAPI,
"PIP": PypiVersionAPI,
"RUBYGEMS": RubyVersionAPI,
}
versioner = versioners.get(ecosystem)
if versioner:
self.version_api = versioner()
self.set_api(self.collect_packages(ecosystem))
@staticmethod
def process_name(ecosystem: str, pkg_name: str) -> Optional[Tuple[Optional[str], str]]:
if ecosystem == "MAVEN":
artifact_comps = pkg_name.split(":")
if len(artifact_comps) != 2:
return
ns, name = artifact_comps
return ns, name
if ecosystem == "COMPOSER":
try:
vendor, name = pkg_name.split("/")
except ValueError:
# TODO log this
return None
return vendor, name
if ecosystem == "NUGET" or ecosystem == "PIP" or ecosystem == "RUBYGEMS":
return None, pkg_name
@staticmethod
def extract_references(reference_data):
references = []
for ref in reference_data:
url = ref["url"]
if "GHSA-" in url.upper():
reference = Reference(url=url, reference_id=url.split("/")[-1])
else:
reference = Reference(url=url)
references.append(reference)
return references
def collect_packages(self, ecosystem):
packages = set()
for page in self.advisories[ecosystem]:
for adv in page["data"]["securityVulnerabilities"]["edges"]:
packages.add(adv["node"]["package"]["name"])
return packages
def process_response(self) -> List[Advisory]:
adv_list = []
for ecosystem in self.advisories:
self.set_version_api(ecosystem)
pkg_type = self.version_api.package_type
for resp_page in self.advisories[ecosystem]:
for adv in resp_page["data"]["securityVulnerabilities"]["edges"]:
name = adv["node"]["package"]["name"]
if self.process_name(ecosystem, name):
ns, pkg_name = self.process_name(ecosystem, name)
aff_range = adv["node"]["vulnerableVersionRange"]
aff_vers, unaff_vers = self.categorize_versions(
aff_range, self.version_api.get(name)
)
affected_purls = {
PackageURL(name=pkg_name, namespace=ns, version=version, type=pkg_type)
for version in aff_vers
}
unaffected_purls = {
PackageURL(name=pkg_name, namespace=ns, version=version, type=pkg_type)
for version in unaff_vers
}
else:
affected_purls = set()
unaffected_purls = set()
cve_ids = set()
vuln_references = self.extract_references(adv["node"]["advisory"]["references"])
vuln_desc = adv["node"]["advisory"]["summary"]
for identifier in adv["node"]["advisory"]["identifiers"]:
# collect CVEs
if identifier["type"] == "CVE":
cve_ids.add(identifier["value"])
# attach the GHSA with severity score
if identifier["type"] == "GHSA":
for ref in vuln_references:
if ref.reference_id == identifier["value"]:
ref.severities = [
VulnerabilitySeverity(
system=scoring_systems["cvssv3.1_qr"],
value=adv["node"]["advisory"]["severity"],
)
]
# Each Node has only one GHSA, hence exit after attaching
# score to this GHSA
break
for cve_id in cve_ids:
adv_list.append(
Advisory(
vulnerability_id=cve_id,
summary=vuln_desc,
impacted_package_urls=affected_purls,
resolved_package_urls=unaffected_purls,
vuln_references=vuln_references,
)
)
return adv_list
@staticmethod
def categorize_versions(
version_range: str, all_versions: Set[str]
) -> Tuple[Set[str], Set[str]]: # nopep8
version_range = RangeSpecifier(version_range)
affected_versions = {version for version in all_versions if version in version_range}
return (affected_versions, all_versions - affected_versions)
| [] | [] | [
"GH_TOKEN"
] | [] | ["GH_TOKEN"] | python | 1 | 0 | |
api/images/models.py | from django.db import models
import pandas as pd
import numpy as np
import keras
import tensorflow as tf
from keras.preprocessing.image import img_to_array
from django.conf import settings
from keras.preprocessing import image
from tensorflow.keras.models import load_model
import os
from PIL import Image as Img
# from tensorflow.python import ops
import tensorflow.compat.v1 as tfv1
# from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, decode_predictions, preprocess_input
# Create your models here.
class Image(models.Model):
picture = models.ImageField()
classified = models.CharField(max_length=200, blank=True)
uploaded = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Image classified at {}".format(self.uploaded.strftime('%Y-%m-%d %H:%M'))
def save(self, *args, **kwargs):
LABELS = ['avogado','apple','banana','bean_sprouts','cabbage','carrot',
'chicken','eggs','grape','green_onions','milk','miso','onion','orange',
'pineapple','peach','potatoes','salt','shrimp','strawberry','tofu','yogurt']
img_pil = Img.open(self.picture)
img_path = os.path.join(os.path.dirname(settings.BASE_DIR), 'media_root/' + str(self.picture))
img_pil.save(img_path, "JPEG")
img = image.load_img(img_path, target_size=(224,224))
img_array = image.img_to_array(img)
to_pred = np.expand_dims(img_array, axis=0) #(1, 225, 225, 3)
try:
file_model = os.path.join(os.path.dirname(settings.BASE_DIR), 'model/vgg16_sample_22.h5')
graph = tfv1.get_default_graph()
with graph.as_default():
model = load_model(file_model)
print(dict(zip(LABELS, np.squeeze(model.predict(to_pred)).tolist())))
pred = LABELS[np.argmax(model.predict(to_pred))]
self.classified = str(pred)
print(f'classified as {pred}')
except Exception as e:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(e).__name__, e.args)
print(message)
print('failed to classify')
self.classified = 'failed to classify'
if os.path.exists(img_path):
os.remove(img_path)
else:
print("The file does not exist")
super().save(*args, **kwargs) | [] | [] | [] | [] | [] | python | null | null | null |
cli/pkg/helm/helm_helper.go | // +build !nokubectl
// Copyright © 2019 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helm
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
kubeclient "helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/storage"
"helm.sh/helm/v3/pkg/storage/driver"
appsv1 "k8s.io/api/apps/v1"
kyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/keptn/keptn/cli/pkg/logging"
keptnutils "github.com/keptn/kubernetes-utils/pkg"
"k8s.io/client-go/tools/clientcmd"
)
// Helper provides helper functions for common Helm operations
type Helper struct {
}
// NewHelper creates a Helper
func NewHelper() Helper {
return Helper{}
}
// DownloadChart downloads a Helm chart using the provided repo URL
func (c Helper) DownloadChart(chartRepoURL string) (*chart.Chart, error) {
resp, err := http.Get(chartRepoURL)
if err != nil {
return nil, errors.New("error retrieving Keptn Helm Chart at " + chartRepoURL + ": " + err.Error())
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.New("error retrieving Keptn Helm Chart at " + chartRepoURL + ": " + err.Error())
}
ch, err := keptnutils.LoadChart(bytes)
if err != nil {
return nil, errors.New("error retrieving Keptn Helm Chart at " + chartRepoURL + ": " + err.Error())
}
return ch, err
}
func newActionConfig(config *rest.Config, namespace string) (*action.Configuration, error) {
logFunc := func(format string, v ...interface{}) {
fmt.Sprintf(format, v)
}
restClientGetter := newConfigFlags(config, namespace)
kubeClient := &kubeclient.Client{
Factory: cmdutil.NewFactory(restClientGetter),
Log: logFunc,
}
client, err := kubeClient.Factory.KubernetesClientSet()
if err != nil {
return nil, err
}
s := driver.NewSecrets(client.CoreV1().Secrets(namespace))
s.Log = logFunc
return &action.Configuration{
RESTClientGetter: restClientGetter,
Releases: storage.Init(s),
KubeClient: kubeClient,
Log: logFunc,
}, nil
}
func newConfigFlags(config *rest.Config, namespace string) *genericclioptions.ConfigFlags {
return &genericclioptions.ConfigFlags{
Namespace: &namespace,
APIServer: &config.Host,
CAFile: &config.CAFile,
BearerToken: &config.BearerToken,
}
}
// GetHistory returns the history for a Helm release
func (c Helper) GetHistory(releaseName, namespace string) ([]*release.Release, error) {
logging.PrintLog(fmt.Sprintf("Check availability of Helm release %s in namespace %s", releaseName, namespace), logging.VerboseLevel)
config, err := clientcmd.BuildConfigFromFlags("", getKubeConfig())
if err != nil {
return nil, err
}
cfg, err := newActionConfig(config, namespace)
if err != nil {
return nil, err
}
histClient := action.NewHistory(cfg)
return histClient.Run(releaseName)
}
// UpgradeChart upgrades/installs the provided chart
func (c Helper) UpgradeChart(ch *chart.Chart, releaseName, namespace string, vals map[string]interface{}) error {
if len(ch.Templates) > 0 {
logging.PrintLog(fmt.Sprintf("Start upgrading Helm Chart %s in namespace %s", releaseName, namespace), logging.InfoLevel)
config, err := clientcmd.BuildConfigFromFlags("", getKubeConfig())
if err != nil {
return err
}
cfg, err := newActionConfig(config, namespace)
if err != nil {
return err
}
histClient := action.NewHistory(cfg)
var release *release.Release
if _, err = histClient.Run(releaseName); err == driver.ErrReleaseNotFound {
iCli := action.NewInstall(cfg)
iCli.Namespace = namespace
iCli.ReleaseName = releaseName
iCli.Wait = true
release, err = iCli.Run(ch, vals)
} else {
iCli := action.NewUpgrade(cfg)
iCli.Namespace = namespace
iCli.Wait = true
iCli.ReuseValues = true
release, err = iCli.Run(releaseName, ch, vals)
}
if err != nil {
return fmt.Errorf("Error when installing/upgrading Helm Chart %s in namespace %s: %s",
releaseName, namespace, err.Error())
}
if release != nil {
logging.PrintLog(release.Manifest, logging.VerboseLevel)
if err := waitForDeploymentsOfHelmRelease(release.Manifest); err != nil {
return err
}
} else {
logging.PrintLog("Release is nil", logging.InfoLevel)
}
logging.PrintLog(fmt.Sprintf("Finished upgrading Helm Chart %s in namespace %s", releaseName, namespace), logging.InfoLevel)
} else {
logging.PrintLog("Upgrade not done since this is an empty Helm Chart", logging.InfoLevel)
}
return nil
}
func getKubeConfig() string {
if os.Getenv("KUBECONFIG") != "" {
return keptnutils.ExpandTilde(os.Getenv("KUBECONFIG"))
}
return filepath.Join(
keptnutils.UserHomeDir(), ".kube", "config",
)
}
// UninstallRelease uninstalls the provided release
func (c Helper) UninstallRelease(releaseName, namespace string) error {
logging.PrintLog(fmt.Sprintf("Start uninstalling Helm release %s in namespace %s", releaseName, namespace), logging.InfoLevel)
config, err := clientcmd.BuildConfigFromFlags("", getKubeConfig())
if err != nil {
return err
}
cfg, err := newActionConfig(config, namespace)
if err != nil {
return err
}
iCli := action.NewUninstall(cfg)
_, err = iCli.Run(releaseName)
if err != nil {
return fmt.Errorf("Error when uninstalling Helm release %s in namespace %s: %s",
releaseName, namespace, err.Error())
}
return nil
}
func getDeployments(helmManifest string) []*appsv1.Deployment {
deployments := []*appsv1.Deployment{}
dec := kyaml.NewYAMLToJSONDecoder(strings.NewReader(helmManifest))
for {
var dpl appsv1.Deployment
err := dec.Decode(&dpl)
if err == io.EOF {
break
}
if err != nil {
continue
}
if keptnutils.IsDeployment(&dpl) {
deployments = append(deployments, &dpl)
}
}
return deployments
}
func waitForDeploymentsOfHelmRelease(helmManifest string) error {
depls := getDeployments(helmManifest)
for _, depl := range depls {
if err := keptnutils.WaitForDeploymentToBeRolledOut(false, depl.Name, depl.Namespace); err != nil {
return fmt.Errorf("Error when waiting for deployment %s in namespace %s: %s", depl.Name, depl.Namespace, err.Error())
}
}
return nil
}
| [
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
kivy/input/providers/probesysfs.py | '''
Auto Create Input Provider Config Entry for Available MT Hardware (linux only).
===============================================================================
Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device.
The device discovery is done by this provider. However, the reading of
input can be performed by other providers like: hidinput, mtdev and
linuxwacom. mtdev is used prior to other providers. For more
information about mtdev, check :py:class:`~kivy.input.providers.mtdev`.
Here is an example of auto creation::
[input]
# using mtdev
device_%(name)s = probesysfs,provider=mtdev
# using hidinput
device_%(name)s = probesysfs,provider=hidinput
# using mtdev with a match on name
device_%(name)s = probesysfs,provider=mtdev,match=acer
# using hidinput with custom parameters to hidinput (all on one line)
%(name)s = probesysfs,
provider=hidinput,param=min_pressure=1,param=max_pressure=99
# you can also match your wacom touchscreen
touch = probesysfs,match=E3 Finger,provider=linuxwacom,
select_all=1,param=mode=touch
# and your wacom pen
pen = probesysfs,match=E3 Pen,provider=linuxwacom,
select_all=1,param=mode=pen
By default, ProbeSysfs module will enumerate hardware from the /sys/class/input
device, and configure hardware with ABS_MT_POSITION_X capability. But for
example, the wacom screen doesn't support this capability. You can prevent this
behavior by putting select_all=1 in your config line. Add use_mouse=1 to also
include touchscreen hardware that offers core pointer functionality.
'''
__all__ = ('ProbeSysfsHardwareProbe', )
import os
from os.path import sep
if 'KIVY_DOC' in os.environ:
ProbeSysfsHardwareProbe = None
else:
import ctypes
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.providers.mouse import MouseMotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.config import _is_rpi
EventLoop = None
# See linux/input.h
ABS_MT_POSITION_X = 0x35
_cache_input = None
_cache_xinput = None
class Input(object):
def __init__(self, path):
query_xinput()
self.path = path
@property
def device(self):
base = os.path.basename(self.path)
return os.path.join("/dev", "input", base)
@property
def name(self):
path = os.path.join(self.path, "device", "name")
return read_line(path)
def get_capabilities(self):
path = os.path.join(self.path, "device", "capabilities", "abs")
line = "0"
try:
line = read_line(path)
except (IOError, OSError):
return []
capabilities = []
long_bit = ctypes.sizeof(ctypes.c_long) * 8
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
for i in range(long_bit)]
capabilities[:0] = subcapabilities
return capabilities
def has_capability(self, capability):
capabilities = self.get_capabilities()
return len(capabilities) > capability and capabilities[capability]
@property
def is_mouse(self):
return self.device in _cache_xinput
def getout(*args):
try:
return Popen(args, stdout=PIPE).communicate()[0]
except OSError:
return ''
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
_cache_xinput = []
devids = getout('xinput', '--list', '--id-only')
for did in devids.splitlines():
devprops = getout('xinput', '--list-props', did)
evpath = None
for prop in devprops.splitlines():
prop = prop.strip()
if (prop.startswith(b'Device Enabled') and
prop.endswith(b'0')):
evpath = None
break
if prop.startswith(b'Device Node'):
try:
evpath = prop.split('"')[1]
except Exception:
evpath = None
if evpath:
_cache_xinput.append(evpath)
def get_inputs(path):
global _cache_input
if _cache_input is None:
event_glob = os.path.join(path, "event*")
_cache_input = [Input(x) for x in glob(event_glob)]
return _cache_input
def read_line(path):
f = open(path)
try:
return f.readline().strip()
finally:
f.close()
class ProbeSysfsHardwareProbe(MotionEventProvider):
def __new__(self, device, args):
# hack to not return an instance of this provider.
# :)
instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
instance.__init__(device, args)
def __init__(self, device, args):
super(ProbeSysfsHardwareProbe, self).__init__(device, args)
self.provider = 'mtdev'
self.match = None
self.input_path = '/sys/class/input'
self.select_all = True if _is_rpi else False
self.use_mouse = False
self.use_regex = False
self.args = []
args = args.split(',')
for arg in args:
if arg == '':
continue
arg = arg.split('=', 1)
# ensure it's a key = value
if len(arg) != 2:
Logger.error('ProbeSysfs: invalid parameters %s, not'
' key=value format' % arg)
continue
key, value = arg
if key == 'match':
self.match = value
elif key == 'provider':
self.provider = value
elif key == 'use_regex':
self.use_regex = bool(int(value))
elif key == 'select_all':
self.select_all = bool(int(value))
elif key == 'use_mouse':
self.use_mouse = bool(int(value))
elif key == 'param':
self.args.append(value)
else:
Logger.error('ProbeSysfs: unknown %s option' % key)
continue
self.probe()
def should_use_mouse(self):
return (self.use_mouse or
not any(p for p in EventLoop.input_providers
if isinstance(p, MouseMotionEventProvider)))
def probe(self):
global EventLoop
from kivy.base import EventLoop
inputs = get_inputs(self.input_path)
Logger.debug('ProbeSysfs: using probesysfs!')
use_mouse = self.should_use_mouse()
if not self.select_all:
inputs = [x for x in inputs if
x.has_capability(ABS_MT_POSITION_X) and
(use_mouse or not x.is_mouse)]
for device in inputs:
Logger.debug('ProbeSysfs: found device: %s at %s' % (
device.name, device.device))
# must ignore ?
if self.match:
if self.use_regex:
if not match(self.match, device.name, IGNORECASE):
Logger.debug('ProbeSysfs: device not match the'
' rule in config, ignoring.')
continue
else:
if self.match not in device.name:
continue
Logger.info('ProbeSysfs: device match: %s' % device.device)
d = device.device
devicename = self.device % dict(name=d.split(sep)[-1])
provider = MotionEventFactory.get(self.provider)
if provider is None:
Logger.info('ProbeSysfs: Unable to find provider %s' %
self.provider)
Logger.info('ProbeSysfs: fallback on hidinput')
provider = MotionEventFactory.get('hidinput')
if provider is None:
Logger.critical('ProbeSysfs: no input provider found'
' to handle this device !')
continue
instance = provider(devicename, '%s,%s' % (
device.device, ','.join(self.args)))
if instance:
EventLoop.add_input_provider(instance)
MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
buildSrc/src/main/groovy/com/google/firebase/gradle/plugins/SdkUtil.java | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.firebase.gradle.plugins;
import com.android.build.gradle.LibraryExtension;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
public final class SdkUtil {
public static File getSdkDir(Project project) {
Properties properties = new Properties();
File localProperties = project.getRootProject().file("local.properties");
if (localProperties.exists()) {
try (FileInputStream fis = new FileInputStream(localProperties)) {
properties.load(fis);
} catch (IOException ex) {
throw new GradleException("Could not load local.properties", ex);
}
}
String sdkDir = properties.getProperty("sdk.dir");
if (sdkDir != null) {
return project.file(sdkDir);
}
String androidHome = System.getenv("ANDROID_HOME");
if (androidHome == null) {
throw new GradleException("No sdk.dir or ANDROID_HOME set.");
}
return project.file(androidHome);
}
public static File getAndroidJar(Project project) {
LibraryExtension android = project.getExtensions().findByType(LibraryExtension.class);
if (android == null) {
throw new GradleException("Project " + project.getPath() + " is not an android library.");
}
return new File(
getSdkDir(project),
String.format("/platforms/%s/android.jar", android.getCompileSdkVersion()));
}
}
| [
"\"ANDROID_HOME\""
] | [] | [
"ANDROID_HOME"
] | [] | ["ANDROID_HOME"] | java | 1 | 0 | |
cmd/src/search.go | package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
)
func init() {
// flagSet := flag.NewFlagSet("search", flag.ExitOnError)
// var (
// jsonFlag = flagSet.Bool("json", false, "Whether or not to output results as JSON")
// explainJSONFlag = flagSet.Bool("explain-json", false, "Explain the JSON output schema and exit.")
// apiFlags = newAPIFlags(flagSet)
// lessFlag = flagSet.Bool("less", true, "Pipe output to 'less -R' (only if stdout is terminal, and not json flag)")
// )
// handler := func(args []string) error {
// flagSet.Parse(args)
// if *explainJSONFlag {
// fmt.Println(searchJSONExplanation)
// return nil
// }
// if flagSet.NArg() != 1 {
// return &usageError{errors.New("expected exactly one argument: the search query")}
// }
// queryString := flagSet.Arg(0)
// // For pagination, pipe our own output to 'less -R'
// if *lessFlag && !*jsonFlag && isatty.IsTerminal(os.Stdout.Fd()) {
// cmdPath, err := os.Executable()
// if err != nil {
// return err
// }
// srcCmd := exec.Command(cmdPath, append([]string{"search"}, args...)...)
// // Because we do not want the default "no color when piping" behavior to take place.
// srcCmd.Env = envSetDefault(os.Environ(), "COLOR", "t")
// srcStderr, err := srcCmd.StderrPipe()
// if err != nil {
// return err
// }
// srcStdout, err := srcCmd.StdoutPipe()
// if err != nil {
// return err
// }
// if err := srcCmd.Start(); err != nil {
// return err
// }
// lessCmd := exec.Command("less", "-R")
// lessCmd.Stdin = io.MultiReader(srcStdout, srcStderr)
// lessCmd.Stderr = os.Stderr
// lessCmd.Stdout = os.Stdout
// return lessCmd.Run()
// }
// query := `fragment FileMatchFields on FileMatch {
// repository {
// name
// url
// }
// file {
// name
// path
// url
// commit {
// oid
// }
// }
// lineMatches {
// preview
// lineNumber
// offsetAndLengths
// limitHit
// }
// }
// fragment CommitSearchResultFields on CommitSearchResult {
// messagePreview {
// value
// highlights{
// line
// character
// length
// }
// }
// diffPreview {
// value
// highlights {
// line
// character
// length
// }
// }
// commit {
// repository {
// name
// }
// oid
// url
// subject
// author {
// date
// person {
// displayName
// }
// }
// }
// }
// fragment RepositoryFields on Repository {
// name
// url
// externalURLs {
// serviceType
// url
// }
// }
// query ($query: String!) {
// search(query: $query) {
// results {
// results{
// __typename
// ... on FileMatch {
// ...FileMatchFields
// }
// ... on CommitSearchResult {
// ...CommitSearchResultFields
// }
// ... on Repository {
// ...RepositoryFields
// }
// }
// limitHit
// cloning {
// name
// }
// missing {
// name
// }
// timedout {
// name
// }
// resultCount
// elapsedMilliseconds
// }
// }
// }
// `
// var result struct {
// Search struct {
// Results searchResults
// }
// }
// return (&apiRequest{
// query: query,
// vars: map[string]interface{}{
// "query": nullString(queryString),
// },
// result: &result,
// done: func() error {
// improved := searchResultsImproved{
// SourcegraphEndpoint: cfg.Endpoint,
// Query: queryString,
// searchResults: result.Search.Results,
// }
// // HACK: temporary workaround for a bug where ElapsedMilliseconds is nonsensical
// // when results == 0; Remove this when the bug is fixed and enough time has passed
// // (internal tracking issue: https://github.com/sourcegraph/sourcegraph/issues/12625)
// if len(improved.Results) == 0 {
// improved.ElapsedMilliseconds = 0
// }
// if *jsonFlag {
// // Print the formatted JSON.
// f, err := marshalIndent(improved)
// if err != nil {
// return err
// }
// fmt.Println(string(f))
// return nil
// }
// tmpl, err := parseTemplate(searchResultsTemplate)
// if err != nil {
// return err
// }
// if err := execTemplate(tmpl, improved); err != nil {
// return err
// }
// return nil
// },
// flags: apiFlags,
// }).do()
// }
// Register the command.
// commands = append(commands, &command{
// flagSet: flagSet,
// handler: handler,
// usageFunc: func() {
// fmt.Fprintf(flag.CommandLine.Output(), "Usage of 'src %s':\n", flagSet.Name())
// flagSet.PrintDefaults()
// fmt.Println(usage)
// },
// })
}
// searchResults represents the data we get back from the GraphQL search request.
type searchResults struct {
Results []map[string]interface{}
LimitHit bool
Cloning, Missing, Timedout []map[string]interface{}
ResultCount int
ElapsedMilliseconds int
}
// searchResultsImproved is a superset of what the GraphQL API returns. It
// contains the query responsible for producing the results, which is nice for
// most consumers.
type searchResultsImproved struct {
SourcegraphEndpoint string
Query string
searchResults
}
func envSetDefault(env []string, key, value string) []string {
set := false
for _, kv := range env {
if strings.HasPrefix(kv, key+"=") {
set = true
break
}
}
if !set {
env = append(env, key+"="+value)
}
return env
}
func searchHighlightPreview(preview interface{}, start, end string) string {
if start == "" {
start = ansiColors["search-match"]
}
if end == "" {
end = ansiColors["nc"]
}
p := preview.(map[string]interface{})
value := p["value"].(string)
var highlights []highlight
for _, highlightObject := range p["highlights"].([]interface{}) {
h := highlightObject.(map[string]interface{})
line := int(h["line"].(float64))
character := int(h["character"].(float64))
length := int(h["length"].(float64))
highlights = append(highlights, highlight{line, character, length})
}
return applyHighlights(value, highlights, start, end)
}
type highlight struct {
line int // the 1-indexed line number
character int // the 1-indexed character on the line.
length int // the 1-indexed length of the highlight, in characters.
}
func applyHighlights(input string, highlights []highlight, start, end string) string {
var result []rune
lines := strings.Split(input, "\n")
for lineNumber, line := range lines {
lineNumber++
for characterIndex, character := range []rune(line + "\n") {
for _, highlight := range highlights {
if highlight.line == lineNumber {
if characterIndex == highlight.character {
result = append(result, []rune(start)...)
} else if characterIndex == highlight.character+highlight.length {
result = append(result, []rune(end)...)
}
}
}
result = append(result, character)
}
}
return string(result)
}
var searchTemplateFuncs = map[string]interface{}{
"searchSequentialLineNumber": func(lineMatches []interface{}, index int) bool {
prevIndex := index - 1
if prevIndex < 0 {
return true
}
prevLineNumber := lineMatches[prevIndex].(map[string]interface{})["lineNumber"]
lineNumber := lineMatches[index].(map[string]interface{})["lineNumber"]
return prevLineNumber.(float64) == lineNumber.(float64)-1
},
"searchHighlightMatch": func(match interface{}) string {
m := match.(map[string]interface{})
preview := m["preview"].(string)
var highlights []highlight
for _, offsetAndLength := range m["offsetAndLengths"].([]interface{}) {
ol := offsetAndLength.([]interface{})
offset := int(ol[0].(float64))
length := int(ol[1].(float64))
highlights = append(highlights, highlight{line: 1, character: offset, length: length})
}
return applyHighlights(preview, highlights, ansiColors["search-match"], ansiColors["nc"])
},
"searchHighlightPreview": func(preview interface{}) string {
return searchHighlightPreview(preview, "", "")
},
"searchHighlightDiffPreview": func(diffPreview interface{}) string {
p := diffPreview.(map[string]interface{})
diff := p["value"].(string)
useColordiff, err := strconv.ParseBool(os.Getenv("COLORDIFF"))
if err != nil {
useColordiff = true
}
if colorDisabled || !useColordiff {
// Only highlight the matches.
return searchHighlightPreview(diffPreview, "", "")
}
path, err := exec.LookPath("colordiff")
if err != nil {
// colordiff not installed; only highlight the matches.
return searchHighlightPreview(diffPreview, "", "")
}
// First highlight the matches, but use a special "end of match" token
// instead of no color (so that we don't terminate colors that colordiff
// adds).
uniqueStartOfMatchToken := "pXRdMhZbgnPL355429nsO4qFgX86LfXTSmqH4Nr3#*(@)!*#()@!APPJB8ZRutvZ5fdL01273i6OdzLDm0UMC9372891skfJTl2c52yR1v"
uniqueEndOfMatchToken := "v1Ry25c2lTJfks1982739CMU0mDLzdO6i37210Ldf5ZvtuRZ8BJPPA!@)(#*!)@(*#3rN4HqmSTXfL68XgFq4Osn924553LPngbZhMdRXp"
diff = searchHighlightPreview(diffPreview, uniqueStartOfMatchToken, uniqueEndOfMatchToken)
// Now highlight our diff with colordiff.
var buf bytes.Buffer
cmd := exec.Command(path)
cmd.Stdin = strings.NewReader(diff)
cmd.Stdout = &buf
if err := cmd.Run(); err != nil {
fmt.Println("warning: colordiff failed to colorize diff:", err)
return diff
}
colorized := buf.String()
var final []string
for _, line := range strings.Split(colorized, "\n") {
// Find where the start-of-match token is in the line.
somToken := strings.Index(line, uniqueStartOfMatchToken)
// Find which ANSI codes are to the left of our start-of-match token.
indices := ansiRegexp.FindAllStringIndex(line, -1)
matches := ansiRegexp.FindAllString(line, -1)
var left []string
for k, index := range indices {
if index[0] < somToken && index[1] < somToken {
left = append(left, matches[k])
}
}
// Replace our start-of-match token with the color we wish.
line = strings.Replace(line, uniqueStartOfMatchToken, ansiColors["search-match"], 1)
// Replace our end-of-match token with the color terminator,
// and start all colors that were previously started to the left.
line = strings.Replace(line, uniqueEndOfMatchToken, ansiColors["nc"]+strings.Join(left, ""), 1)
final = append(final, line)
}
return strings.Join(final, "\n")
},
"searchMaxRepoNameLength": func(results []map[string]interface{}) int {
max := 0
for _, r := range results {
if r["__typename"] != "Repository" {
continue
}
if name := r["name"].(string); len(name) > max {
max = len(name)
}
}
return max
},
}
const searchResultsTemplate = `{{- /* ignore this line for template formatting sake */ -}}
{{- /* The first results line */ -}}
{{- color "logo" -}}✱{{- color "nc" -}}
{{- " " -}}
{{- if eq .ResultCount 0 -}}
{{- color "warning" -}}
{{- else -}}
{{- color "success" -}}
{{- end -}}
{{- .ResultCount -}}{{if .LimitHit}}+{{end}} results{{- color "nc" -}}
{{- " for " -}}{{- color "search-query"}}"{{.Query}}"{{color "nc" -}}
{{- " in " -}}{{color "success"}}{{msDuration .ElapsedMilliseconds}}{{color "nc" -}}
{{- /* The cloning / missing / timed out repos warnings */ -}}
{{- with .Cloning}}{{color "warning"}}{{"\n"}}({{len .}}) still cloning:{{color "nc"}} {{join (repoNames .) ", "}}{{end -}}
{{- with .Missing}}{{color "warning"}}{{"\n"}}({{len .}}) missing:{{color "nc"}} {{join (repoNames .) ", "}}{{end -}}
{{- with .Timedout}}{{color "warning"}}{{"\n"}}({{len .}}) timed out:{{color "nc"}} {{join (repoNames .) ", "}}{{end -}}
{{"\n"}}
{{- /* Rendering of results */ -}}
{{- range .Results -}}
{{- if ne .__typename "Repository" -}}
{{- /* The border separating results */ -}}
{{- color "search-border"}}{{"--------------------------------------------------------------------------------\n"}}{{color "nc"}}
{{- end -}}
{{- /* File match rendering. */ -}}
{{- if eq .__typename "FileMatch" -}}
{{- /* Link to the result */ -}}
{{- color "search-border"}}{{"("}}{{color "nc" -}}
{{- color "search-link"}}{{$.SourcegraphEndpoint}}{{.file.url}}{{color "nc" -}}
{{- color "search-border"}}{{")\n"}}{{color "nc" -}}
{{- color "nc" -}}
{{- /* Repository and file name */ -}}
{{- color "search-repository"}}{{.repository.name}}{{color "nc" -}}
{{- " › " -}}
{{- color "search-filename"}}{{.file.name}}{{color "nc" -}}
{{- color "success"}}{{" ("}}{{len .lineMatches}}{{" matches)"}}{{color "nc" -}}
{{- "\n" -}}
{{- color "search-border"}}{{"--------------------------------------------------------------------------------\n"}}{{color "nc"}}
{{- /* Line matches */ -}}
{{- $lineMatches := .lineMatches -}}
{{- range $index, $match := $lineMatches -}}
{{- if not (searchSequentialLineNumber $lineMatches $index) -}}
{{- color "search-border"}}{{" ------------------------------------------------------------------------------\n"}}{{color "nc"}}
{{- end -}}
{{- " "}}{{color "search-line-numbers"}}{{pad (addFloat $match.lineNumber 1) 6 " "}}{{color "nc" -}}
{{- color "search-border"}}{{" | "}}{{color "nc"}}{{searchHighlightMatch $match}}
{{- end -}}
{{- end -}}
{{- /* Commit (type:diff, type:commit) result rendering. */ -}}
{{- if eq .__typename "CommitSearchResult" -}}
{{- /* Link to the result */ -}}
{{- color "search-border"}}{{"("}}{{color "nc" -}}
{{- color "search-link"}}{{$.SourcegraphEndpoint}}{{.commit.url}}{{color "nc" -}}
{{- color "search-border"}}{{")\n"}}{{color "nc" -}}
{{- color "nc" -}}
{{- /* Repository > author name "commit subject" (time ago) */ -}}
{{- color "search-repository"}}{{.commit.repository.name}}{{color "nc" -}}
{{- " › " -}}
{{- color "search-commit-author"}}{{.commit.author.person.displayName}}{{color "nc" -}}
{{- " " -}}
{{- color "search-commit-subject"}}"{{.commit.subject}}"{{color "nc" -}}
{{- " "}}
{{- color "search-commit-date"}}{{"("}}{{humanizeRFC3339 .commit.author.date}}{{")" -}}{{color "nc" -}}
{{- "\n" -}}
{{- color "search-border"}}{{"--------------------------------------------------------------------------------\n"}}{{color "nc"}}
{{- if .messagePreview -}}
{{- /* type:commit rendering */ -}}
{{indent (searchHighlightPreview .messagePreview) " "}}
{{- end -}}
{{- if .diffPreview -}}
{{- /* type:diff rendering */ -}}
{{indent (searchHighlightDiffPreview .diffPreview) " "}}
{{- end -}}
{{- end -}}
{{- /* Repository (type:repo) result rendering. */ -}}
{{- if eq .__typename "Repository" -}}
{{- /* Link to the result */ -}}
{{- color "success"}}{{padRight .name (searchMaxRepoNameLength $.Results) " "}}{{color "nc" -}}
{{- color "search-border"}}{{" ("}}{{color "nc" -}}
{{- color "search-repository"}}{{$.SourcegraphEndpoint}}{{.url}}{{color "nc" -}}
{{- color "search-border"}}{{")\n"}}{{color "nc" -}}
{{- color "nc" -}}
{{- end -}}
{{- end -}}
`
const searchJSONExplanation = `Explanation of 'src search -json' output:
'src search -json' outputs the exact same results that are retrieved from
Sourcegraph's GraphQL API (see https://about.sourcegraph.com/docs/features/api/)
At a high-level there are three result types:
- 'FileMatch': the type of result you get without any 'type:' modifiers.
- 'CommitSearchResult': the type of result you get with a 'type:commit' or
'type:diff' modifier.
- 'Repository': the type of result you get with a 'type:repo' modifier.
All three of these result types have different fields available. They can be
differentiated by using the '__typename' field.
The link below shows the GraphQL query that this program internally
executes when querying for search results. On this page, you can hover over
any field in the GraphQL panel on the left to get documentation about the field
itself.
If you have any questions, feedback, or suggestions, please contact us
([email protected]) or file an issue! :)
https://sourcegraph.com/api/console#%7B%22query%22%3A%22fragment%20FileMatchFields%20on%20FileMatch%20%7B%5Cn%20%20repository%20%7B%5Cn%20%20%20%20name%5Cn%20%20%20%20url%5Cn%20%20%7D%5Cn%20%20file%20%7B%5Cn%20%20%20%20name%5Cn%20%20%20%20path%5Cn%20%20%20%20url%5Cn%20%20%20%20commit%20%7B%5Cn%20%20%20%20%20%20oid%5Cn%20%20%20%20%7D%5Cn%20%20%7D%5Cn%20%20lineMatches%20%7B%5Cn%20%20%20%20preview%5Cn%20%20%20%20lineNumber%5Cn%20%20%20%20offsetAndLengths%5Cn%20%20%20%20limitHit%5Cn%20%20%7D%5Cn%7D%5Cn%5Cnfragment%20CommitSearchResultFields%20on%20CommitSearchResult%20%7B%5Cn%20%20messagePreview%20%7B%5Cn%20%20%20%20value%5Cn%20%20%20%20highlights%20%7B%5Cn%20%20%20%20%20%20line%5Cn%20%20%20%20%20%20character%5Cn%20%20%20%20%20%20length%5Cn%20%20%20%20%7D%5Cn%20%20%7D%5Cn%20%20diffPreview%20%7B%5Cn%20%20%20%20value%5Cn%20%20%20%20highlights%20%7B%5Cn%20%20%20%20%20%20line%5Cn%20%20%20%20%20%20character%5Cn%20%20%20%20%20%20length%5Cn%20%20%20%20%7D%5Cn%20%20%7D%5Cn%20%20commit%20%7B%5Cn%20%20%20%20repository%20%7B%5Cn%20%20%20%20%20%20name%5Cn%20%20%20%20%7D%5Cn%20%20%20%20oid%5Cn%20%20%20%20url%5Cn%20%20%20%20subject%5Cn%20%20%20%20author%20%7B%5Cn%20%20%20%20%20%20date%5Cn%20%20%20%20%20%20person%20%7B%5Cn%20%20%20%20%20%20%20%20displayName%5Cn%20%20%20%20%20%20%7D%5Cn%20%20%20%20%7D%5Cn%20%20%7D%5Cn%7D%5Cn%5Cnfragment%20RepositoryFields%20on%20Repository%20%7B%5Cn%20%20name%5Cn%20%20url%5Cn%20%20externalURLs%20%7B%5Cn%20%20%20%20serviceType%5Cn%20%20%20%20url%5Cn%20%20%7D%5Cn%7D%5Cn%5Cnquery%20(%24query%3A%20String!)%20%7B%5Cn%20%20search(query%3A%20%24query)%20%7B%5Cn%20%20%20%20results%20%7B%5Cn%20%20%20%20%20%20results%20%7B%5Cn%20%20%20%20%20%20%20%20__typename%5Cn%20%20%20%20%20%20%20%20...%20on%20FileMatch%20%7B%5Cn%20%20%20%20%20%20%20%20%20%20...FileMatchFields%5Cn%20%20%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20%20%20...%20on%20CommitSearchResult%20%7B%5Cn%20%20%20%20%20%20%20%20%20%20...CommitSearchResultFields%5Cn%20%20%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20%20%20...%20on%20Repository%20%7B%5Cn%20%20%20%20%20%20%20%20%20%20...RepositoryFields%5Cn%20%20%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20limitHit%5Cn%20%20%20%20%20%20cloning%20%7B%5Cn%20%20%20%20%20%20%20%20name%5Cn%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20missing%20%7B%5Cn%20%20%20%20%20%20%20%20name%5Cn%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20timedout%20%7B%5Cn%20%20%20%20%20%20%20%20name%5Cn%20%20%20%20%20%20%7D%5Cn%20%20%20%20%20%20resultCount%5Cn%20%20%20%20%20%20elapsedMilliseconds%5Cn%20%20%20%20%7D%5Cn%20%20%7D%5Cn%7D%5Cn%22%2C%22variables%22%3A%22%7B%5Cn%20%20%5C%22query%5C%22%3A%20%5C%22repogroup%3Asample%20error%5C%22%5Cn%7D%22%7D
`
| [
"\"COLORDIFF\""
] | [] | [
"COLORDIFF"
] | [] | ["COLORDIFF"] | go | 1 | 0 | |
scripts/resolve_duplicates.py | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FAIRshake.settings")
import django
django.setup()
from FAIRshakeAPI import models
def merge_attr(obj, attr, *dups):
P = set([getattr(dup, attr) for dup in [obj]+list(dups)])
if len(P) > 1:
selections = {str(i): p for i, p in enumerate(P)}
for i, p in enumerate(P):
print('(%d%s) %s' % (i, '*' if p == getattr(obj, attr) else '', p))
inp = input()
if inp == '':
selection = getattr(obj, attr)
elif selections.get(inp) is not None:
selection = selections[inp]
else:
selection = inp
else:
selection = getattr(obj, attr)
setattr(obj, attr, selection)
def merge_many_attr(obj, attr, *dups):
for dup in dups:
for child in getattr(dup, attr).all():
getattr(obj, attr).add(child)
def merge_dups(primary, *dups):
for attr in ['title', 'url', 'description', 'image', 'tags', 'type', 'fairmetrics']:
merge_attr(primary, attr, *dups)
for attr in ['authors', 'rubrics', 'projects', 'assessments']:
merge_many_attr(primary, attr, *dups)
for dup in dups:
dup.delete()
# Find and merge full duplicates digital objects (objects that serialize out to be the same)
import json
from django.core import serializers
def custom_json(*objs):
JSON = json.loads(serializers.serialize('json', objs))
for obj in JSON:
del obj['pk']
return json.dumps(JSON)
potential_full_dups = {}
for obj in models.DigitalObject.objects.all():
j = custom_json(obj)
potential_full_dups[j] = potential_full_dups.get(j, []) + [obj]
full_dups = {
j: dups
for j, dups in potential_full_dups.items()
if len(dups) > 1
}
full_dups
for _, dups in full_dups.items():
obj = dups[0]
merge_dups(dups[0], *dups[1:])
# Find and potentially merge url duplicate digital objects (objects with identical urls)
potential_url_dups = {}
for obj in models.DigitalObject.objects.all():
if obj.url != '':
potential_url_dups[obj.url] = potential_url_dups.get(obj.url, []) + [obj]
url_dups = {
url: dups
for url, dups in potential_url_dups.items()
if len(dups) > 1
}
for url, dups in url_dups.items():
print(
'Merge',
*dups,
'Y/n',
sep='\n',
)
resp = input()
if resp.lower() == 'y' or resp == '':
merge_dups(dups[0], *dups[1:])
elif resp.lower() == 'n':
continue
else:
for rs in [r.split(',') for r in resp.split(' ')]:
merge_dups(rs[0], *rs[1:])
| [] | [] | [] | [] | [] | python | 0 | 0 | |
common/common.go | package common
import (
"bytes"
"errors"
"fmt"
"os"
"regexp"
"io/ioutil"
"strconv"
"strings"
"path/filepath"
"encoding/json"
)
const (
TEMPLATED_STRING_REGEXP = `\{\{\.[[:alnum:][:punct:][:print:]]+?\}\}`
INTERPOLATED_STRING_REGEXP = `%(?:[#v]|[%EGUTXbcdefgopqstvx])`
)
var templatedStringRegexp, interpolatedStringRegexp *regexp.Regexp
func ParseStringList(stringList string, delimiter string) []string {
stringArray := strings.Split(stringList, delimiter)
var parsedStrings []string
for _, aString := range stringArray {
if aString != "" {
parsedStrings = append(parsedStrings, strings.Trim(strings.Trim(aString, " "), "\""))
}
}
return parsedStrings
}
func CreateTmpFile(content string) (*os.File, error) {
tmpFile, err := ioutil.TempFile("", "")
if err != nil {
return nil, err
}
ioutil.WriteFile(tmpFile.Name(), []byte(content), 0666)
return tmpFile, nil
}
func CheckFile(fileName string) (string, string, error) {
fileInfo, err := os.Stat(fileName)
if err != nil {
return "", "", err
}
if !fileInfo.Mode().IsRegular() {
return "", "", fmt.Errorf("i18n4go: Non-regular source file %s (%s)\n", fileInfo.Name(), fileInfo.Mode().String())
}
return filepath.Base(fileName), filepath.Dir(fileName), nil
}
func CopyFileContents(src, dst string) error {
err := CreateOutputDirsIfNeeded(filepath.Dir(dst))
if err != nil {
return err
}
byteArray, err := ioutil.ReadFile(src)
if err != nil {
return err
}
return ioutil.WriteFile(dst, byteArray, 0644)
}
func GetAbsFileInfo(fileNamePath string) (os.FileInfo, error) {
var absFilePath = fileNamePath
if !filepath.IsAbs(absFilePath) {
absFilePath = filepath.Join(os.Getenv("PWD"), absFilePath)
}
file, err := os.OpenFile(absFilePath, os.O_RDONLY, 0)
defer file.Close()
if err != nil {
return nil, err
}
return file.Stat()
}
func FindFilePath(filename string) (string, error) {
fileInfo, err := os.Stat(filename)
if err != nil {
return "", err
}
path := filename[0 : len(filename)-len(fileInfo.Name())]
return path, nil
}
func CreateOutputDirsIfNeeded(outputDirname string) error {
_, err := os.Stat(outputDirname)
if os.IsNotExist(err) {
err = os.MkdirAll(outputDirname, 0755)
if err != nil {
return err
}
}
return nil
}
func UnescapeHTML(byteArray []byte) []byte {
byteArray = bytes.Replace(byteArray, []byte("\\u003c"), []byte("<"), -1)
byteArray = bytes.Replace(byteArray, []byte("\\u003e"), []byte(">"), -1)
byteArray = bytes.Replace(byteArray, []byte("\\u0026"), []byte("&"), -1)
return byteArray
}
func SaveStrings(printer PrinterInterface, options Options, stringInfos map[string]StringInfo, outputDirname string, fileName string) error {
if !options.DryRunFlag {
err := CreateOutputDirsIfNeeded(outputDirname)
if err != nil {
printer.Println(err)
return err
}
}
i18nStringInfos := make([]I18nStringInfo, len(stringInfos))
i := 0
for _, stringInfo := range stringInfos {
i18nStringInfos[i] = I18nStringInfo{ID: stringInfo.Value, Translation: stringInfo.Value}
i++
}
jsonData, err := json.MarshalIndent(i18nStringInfos, "", " ")
if err != nil {
printer.Println(err)
return err
}
jsonData = UnescapeHTML(jsonData)
// Use the full path of the filename in the output dir, so that files with the same name don't overwrite each other
outputFilename := filepath.Join(outputDirname, strings.Replace(fileName, string(os.PathSeparator), "-", -1))
if len(stringInfos) != 0 {
printer.Println("Saving extracted i18n strings to file:", outputFilename)
}
if !options.DryRunFlag && len(i18nStringInfos) != 0 {
file, err := os.Create(outputFilename)
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
file.Write(jsonData)
}
return nil
}
func SaveStringsInPo(printer PrinterInterface, options Options, stringInfos map[string]StringInfo, outputDirname string, fileName string) error {
if len(stringInfos) != 0 {
printer.Println("Creating and saving i18n strings to .po file:", fileName)
}
if !options.DryRunFlag && len(stringInfos) != 0 {
err := CreateOutputDirsIfNeeded(outputDirname)
if err != nil {
printer.Println(err)
return err
}
file, err := os.Create(filepath.Join(outputDirname, fileName[strings.LastIndex(fileName, string(os.PathSeparator))+1:len(fileName)]))
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
for _, stringInfo := range stringInfos {
file.Write([]byte("# filename: " + strings.Split(fileName, ".en.po")[0] +
", offset: " + strconv.Itoa(stringInfo.Offset) +
", line: " + strconv.Itoa(stringInfo.Line) +
", column: " + strconv.Itoa(stringInfo.Column) + "\n"))
file.Write([]byte("msgid " + strconv.Quote(stringInfo.Value) + "\n"))
file.Write([]byte("msgstr " + strconv.Quote(stringInfo.Value) + "\n"))
file.Write([]byte("\n"))
}
}
return nil
}
func SaveI18nStringsInPo(printer PrinterInterface, options Options, i18nStrings []I18nStringInfo, fileName string) error {
printer.Println("i18n4go: creating and saving i18n strings to .po file:", fileName)
if !options.DryRunFlag && len(i18nStrings) != 0 {
file, err := os.Create(fileName)
defer file.Close()
if err != nil {
printer.Println(err)
return err
}
for _, stringInfo := range i18nStrings {
file.Write([]byte("msgid " + strconv.Quote(stringInfo.ID) + "\n"))
file.Write([]byte("msgstr " + strconv.Quote(stringInfo.Translation) + "\n"))
file.Write([]byte("\n"))
}
}
return nil
}
func SaveI18nStringInfos(printer PrinterInterface, options Options, i18nStringInfos []I18nStringInfo, fileName string) error {
jsonData, err := json.MarshalIndent(i18nStringInfos, "", " ")
if err != nil {
printer.Println(err)
return err
}
jsonData = UnescapeHTML(jsonData)
if !options.DryRunFlag && len(i18nStringInfos) != 0 {
err := ioutil.WriteFile(fileName, jsonData, 0644)
if err != nil {
printer.Println(err)
return err
}
}
return nil
}
func LoadI18nStringInfos(fileName string) ([]I18nStringInfo, error) {
_, err := os.Stat(fileName)
if os.IsNotExist(err) {
return nil, err
}
content, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
var i18nStringInfos []I18nStringInfo
err = json.Unmarshal(content, &i18nStringInfos)
if err != nil {
return nil, err
}
return i18nStringInfos, nil
}
func CreateI18nStringInfoMap(i18nStringInfos []I18nStringInfo) (map[string]I18nStringInfo, error) {
inputMap := make(map[string]I18nStringInfo, len(i18nStringInfos))
for _, i18nStringInfo := range i18nStringInfos {
if _, ok := inputMap[i18nStringInfo.ID]; !ok {
inputMap[i18nStringInfo.ID] = i18nStringInfo
} else {
return nil, errors.New("Duplicated key found: " + i18nStringInfo.ID)
}
}
return inputMap, nil
}
func CopyI18nStringInfoMap(i18nStringInfoMap map[string]I18nStringInfo) map[string]I18nStringInfo {
copyMap := make(map[string]I18nStringInfo, len(i18nStringInfoMap))
for key, value := range i18nStringInfoMap {
copyMap[key] = value
}
return copyMap
}
func GetTemplatedStringArgs(aString string) []string {
re, err := getTemplatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling templated string Regexp: %s\n", err.Error())
return []string{}
}
matches := re.FindAllStringSubmatch(aString, -1)
var stringMatches []string
for _, match := range matches {
stringMatch := match[0]
stringMatch = stringMatch[3 : len(stringMatch)-2]
stringMatches = append(stringMatches, stringMatch)
}
return stringMatches
}
func IsTemplatedString(aString string) bool {
re, err := getTemplatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling templated string Regexp: %s\n", err.Error())
return false
}
return re.Match([]byte(aString))
}
func IsInterpolatedString(aString string) bool {
re, err := getInterpolatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling interpolated string Regexp: %s\n", err.Error())
return false
}
return re.Match([]byte(aString))
}
func ConvertToTemplatedString(aString string) string {
if !IsInterpolatedString(aString) {
return aString
}
re, err := getInterpolatedStringRegexp()
if err != nil {
fmt.Printf("i18n4go: Error compiling interpolated string Regexp: %s\n", err.Error())
return ""
}
matches := re.FindAllStringSubmatch(aString, -1)
templatedString := aString
for i, match := range matches {
argName := "{{.Arg" + strconv.Itoa(i) + "}}"
templatedString = strings.Replace(templatedString, match[0], argName, 1)
}
return templatedString
}
func I18nStringInfoMapValues2Array(i18nStringInfosMap map[string]I18nStringInfo) []I18nStringInfo {
var i18nStringInfos []I18nStringInfo
for _, i18nStringInfo := range i18nStringInfosMap {
i18nStringInfos = append(i18nStringInfos, i18nStringInfo)
}
return i18nStringInfos
}
// Private
func getTemplatedStringRegexp() (*regexp.Regexp, error) {
var err error
if templatedStringRegexp == nil {
templatedStringRegexp, err = regexp.Compile(TEMPLATED_STRING_REGEXP)
}
return templatedStringRegexp, err
}
func getInterpolatedStringRegexp() (*regexp.Regexp, error) {
var err error
if interpolatedStringRegexp == nil {
interpolatedStringRegexp, err = regexp.Compile(INTERPOLATED_STRING_REGEXP)
}
return interpolatedStringRegexp, err
}
| [
"\"PWD\""
] | [] | [
"PWD"
] | [] | ["PWD"] | go | 1 | 0 | |
pkg/cmd/clients/fake/fake_factory.go | package fake
import (
"io"
"os"
"github.com/jenkins-x/jx/pkg/kustomize"
"k8s.io/client-go/dynamic"
"github.com/jenkins-x/jx/pkg/cmd/clients"
"github.com/jenkins-x/jx/pkg/util"
"github.com/jenkins-x/jx/pkg/builds"
v1fake "github.com/jenkins-x/jx/pkg/client/clientset/versioned/fake"
kservefake "github.com/knative/serving/pkg/client/clientset/versioned/fake"
apifake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
"k8s.io/client-go/kubernetes/fake"
gojenkins "github.com/jenkins-x/golang-jenkins"
"github.com/jenkins-x/jx/pkg/io/secrets"
"github.com/jenkins-x/jx/pkg/vault"
certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
fake_certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/fake"
vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned"
fake_vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned/fake"
"github.com/heptio/sonobuoy/pkg/client"
sonoboy_dynamic "github.com/heptio/sonobuoy/pkg/dynamic"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/table"
fake_vault "github.com/jenkins-x/jx/pkg/vault/fake"
kserve "github.com/knative/serving/pkg/client/clientset/versioned"
"github.com/pkg/errors"
tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
tektonfake "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
fake_metricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
prowjobclient "k8s.io/test-infra/prow/client/clientset/versioned"
fake_prowjobclient "k8s.io/test-infra/prow/client/clientset/versioned/fake"
)
// FakeFactory points to a fake factory implementation
type FakeFactory struct {
Batch bool
delegate clients.Factory
namespace string
kubeConfig kube.Kuber
impersonateUser string
bearerToken string
secretLocation secrets.SecretLocation
offline bool
// cached fake clients
apiClient apiextensionsclientset.Interface
jxClient versioned.Interface
kubeClient kubernetes.Interface
kserveClient kserve.Interface
tektonClient tektonclient.Interface
prowJobClient prowjobclient.Interface
dyncClient dynamic.Interface
}
var _ clients.Factory = (*FakeFactory)(nil)
// NewFakeFactory creates a fake factory which uses fake k8s clients for testing
func NewFakeFactory() clients.Factory {
f := &FakeFactory{
namespace: "jx",
}
f.kubeConfig = kube.NewKubeConfig()
return f
}
// NewFakeFactoryFromClients creates a fake factory which uses fake k8s clients for testing
func NewFakeFactoryFromClients(apiClient apiextensionsclientset.Interface,
jxClient versioned.Interface,
kubeClient kubernetes.Interface,
tektonClient tektonclient.Interface,
dyncClient dynamic.Interface) *FakeFactory {
f := &FakeFactory{
namespace: "jx",
apiClient: apiClient,
jxClient: jxClient,
kubeClient: kubeClient,
tektonClient: tektonClient,
dyncClient: dyncClient,
}
f.kubeConfig = kube.NewKubeConfig()
return f
}
// SetDelegateFactory sets the delegate factory
func (f *FakeFactory) SetDelegateFactory(factory clients.Factory) {
f.delegate = factory
}
// GetDelegateFactory returns the delegate factory
func (f *FakeFactory) GetDelegateFactory() clients.Factory {
if f.delegate == nil {
f.delegate = clients.NewFactory()
}
return f.delegate
}
// SetNamespace sets the default namespace
func (f *FakeFactory) SetNamespace(ns string) {
f.namespace = ns
}
// SetBatch sets batch
func (f *FakeFactory) SetBatch(batch bool) {
f.Batch = batch
}
// SetOffline sets offline
func (f *FakeFactory) SetOffline(offline bool) {
f.offline = offline
}
// ImpersonateUser returns a new factory impersonating the given user
func (f *FakeFactory) ImpersonateUser(user string) clients.Factory {
copy := *f
copy.impersonateUser = user
return ©
}
// WithBearerToken returns a new factory with bearer token
func (f *FakeFactory) WithBearerToken(token string) clients.Factory {
copy := *f
copy.bearerToken = token
return ©
}
// CreateJenkinsClient creates a new Jenkins client
func (f *FakeFactory) CreateJenkinsClient(kubeClient kubernetes.Interface, ns string, handles util.IOFileHandles) (gojenkins.JenkinsClient, error) {
return f.GetDelegateFactory().CreateJenkinsClient(kubeClient, ns, handles)
}
// CreateCustomJenkinsClient creates a new Jenkins client for the given custom Jenkins App
func (f *FakeFactory) CreateCustomJenkinsClient(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string, handles util.IOFileHandles) (gojenkins.JenkinsClient, error) {
return f.GetDelegateFactory().CreateCustomJenkinsClient(kubeClient, ns, jenkinsServiceName, handles)
}
// GetJenkinsURL gets the Jenkins URL for the given namespace
func (f *FakeFactory) GetJenkinsURL(kubeClient kubernetes.Interface, ns string) (string, error) {
return f.GetDelegateFactory().GetJenkinsURL(kubeClient, ns)
}
// GetCustomJenkinsURL gets a custom jenkins App service URL
func (f *FakeFactory) GetCustomJenkinsURL(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string) (string, error) {
return f.GetDelegateFactory().GetCustomJenkinsURL(kubeClient, ns, jenkinsServiceName)
}
// CreateJenkinsAuthConfigService creates a new Jenkins authentication configuration service
func (f *FakeFactory) CreateJenkinsAuthConfigService(namespace string, jenkinsServiceName string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.JenkinsAuthConfigFile, namespace, kube.ValueKindJenkins, "")
}
// CreateChartmuseumAuthConfigService creates a new Chartmuseum authentication configuration service
func (f *FakeFactory) CreateChartmuseumAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.ChartmuseumAuthConfigFile, namespace, kube.ValueKindChartmuseum, serviceKind)
}
// CreateIssueTrackerAuthConfigService creates a new issuer tracker configuration service
func (f *FakeFactory) CreateIssueTrackerAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.IssuesAuthConfigFile, namespace, kube.ValueKindIssue, serviceKind)
}
// CreateChatAuthConfigService creates a new chat configuration service
func (f *FakeFactory) CreateChatAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.ChatAuthConfigFile, namespace, kube.ValueKindChat, serviceKind)
}
// CreateAddonAuthConfigService creates a new addon auth configuration service
func (f *FakeFactory) CreateAddonAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.AddonAuthConfigFile, namespace, kube.ValueKindAddon, serviceKind)
}
// CreateGitAuthConfigService creates a new git auth configuration service
func (f *FakeFactory) CreateGitAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.GitAuthConfigFile, namespace, kube.ValueKindGit, serviceKind)
}
// CreateAuthConfigService creates a new service which loads/saves the auth config from/to different sources depending
// on the current secrets location and cluster context. The sources can be vault, kubernetes secrets or local file.
func (f *FakeFactory) CreateAuthConfigService(fileName string, namespace string,
serverKind string, serviceKind string) (auth.ConfigService, error) {
configService := auth.NewMemoryAuthConfigService()
username := "fake-username"
url := "https://fake-server.org"
kind := serviceKind
if serverKind == kube.ValueKindGit {
kind = gits.KindGitFake
}
config := &auth.AuthConfig{
Servers: []*auth.AuthServer{
{
URL: url,
Users: []*auth.UserAuth{
{
Username: username,
ApiToken: "fake-token",
},
},
Kind: kind,
Name: serviceKind,
CurrentUser: username,
},
},
CurrentServer: url,
PipeLineUsername: username,
PipeLineServer: url,
}
configService.SetConfig(config)
return configService, nil
}
// SecretsLocation indicates the location where the secrets are stored
func (f *FakeFactory) SecretsLocation() secrets.SecretsLocationKind {
return secrets.FileSystemLocationKind
}
// SetSecretsLocation configures the secrets location. It will persist the value in a config map
// if the persist flag is set.
func (f *FakeFactory) SetSecretsLocation(location secrets.SecretsLocationKind, persist bool) error {
return nil
}
// ResetSecretsLocation resets the location of the secrets stored in memory
func (f *FakeFactory) ResetSecretsLocation() {
f.secretLocation = nil
}
// CreateSystemVaultClient gets the system vault client for managing the secrets
func (f *FakeFactory) CreateSystemVaultClient(namespace string) (vault.Client, error) {
return fake_vault.NewFakeVaultClient(), nil
}
// CreateVaultClient returns the given vault client for managing secrets
// Will use default values for name and namespace if nil values are applied
func (f *FakeFactory) CreateVaultClient(name string, namespace string) (vault.Client, error) {
return fake_vault.NewFakeVaultClient(), nil
}
// CreateKubeClient creates a new Kubernetes client
func (f *FakeFactory) CreateKubeClient() (kubernetes.Interface, string, error) {
if f.kubeClient == nil {
f.kubeClient = fake.NewSimpleClientset()
}
return f.kubeClient, f.namespace, nil
}
// CreateJXClient creates a new Kubernetes client for Jenkins X CRDs
func (f *FakeFactory) CreateJXClient() (versioned.Interface, string, error) {
if f.jxClient == nil {
f.jxClient = v1fake.NewSimpleClientset()
}
return f.jxClient, f.namespace, nil
}
// CreateApiExtensionsClient creates a new Kubernetes ApiExtensions client
func (f *FakeFactory) CreateApiExtensionsClient() (apiextensionsclientset.Interface, error) {
if f.apiClient == nil {
f.apiClient = apifake.NewSimpleClientset()
}
return f.apiClient, nil
}
// CreateProwJobClient creates a new Kubernetes client for ProwJob resources
func (f *FakeFactory) CreateProwJobClient() (prowjobclient.Interface, string, error) {
if f.prowJobClient == nil {
f.prowJobClient = fake_prowjobclient.NewSimpleClientset()
}
return f.prowJobClient, f.namespace, nil
}
// CreateKnativeServeClient create a new Kubernetes client for Knative serve resources
func (f *FakeFactory) CreateKnativeServeClient() (kserve.Interface, string, error) {
if f.kserveClient == nil {
f.kserveClient = kservefake.NewSimpleClientset()
}
return f.kserveClient, f.namespace, nil
}
// CreateTektonClient create a new Kubernetes client for Tekton resources
func (f *FakeFactory) CreateTektonClient() (tektonclient.Interface, string, error) {
if f.tektonClient == nil {
f.tektonClient = tektonfake.NewSimpleClientset()
}
return f.tektonClient, f.namespace, nil
}
// CreateDynamicClient creates a new Kubernetes Dynamic client
func (f *FakeFactory) CreateDynamicClient() (dynamic.Interface, string, error) {
if f.dyncClient == nil {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
f.dyncClient, err = dynamic.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return f.dyncClient, ns, err
}
return f.dyncClient, f.namespace, nil
}
// CreateMetricsClient creates a new Kubernetes metrics client
func (f *FakeFactory) CreateMetricsClient() (metricsclient.Interface, error) {
return fake_metricsclient.NewSimpleClientset(), nil
}
// CreateGitProvider creates a new Git provider
func (f *FakeFactory) CreateGitProvider(gitURL string, message string, authConfigSvc auth.ConfigService,
gitKind string, ghOwner string, batchMode bool, gitter gits.Gitter, handles util.IOFileHandles) (gits.GitProvider, error) {
return f.GetDelegateFactory().CreateGitProvider(gitURL, message, authConfigSvc, gitKind, ghOwner, batchMode, gitter, handles)
}
// CreateKubeConfig creates the kubernetes configuration
func (f *FakeFactory) CreateKubeConfig() (*rest.Config, error) {
return f.GetDelegateFactory().CreateKubeConfig()
}
// CreateTable creates a new table
func (f *FakeFactory) CreateTable(out io.Writer) table.Table {
return table.CreateTable(out)
}
// IsInCDPipeline we should only load the git / issue tracker API tokens if the current pod
// is in a pipeline and running as the Jenkins service account
func (f *FakeFactory) IsInCDPipeline() bool {
// TODO should we let RBAC decide if we can see the Secrets in the dev namespace?
// or we should test if we are in the cluster and get the current ServiceAccount name?
buildNumber := builds.GetBuildNumber()
return buildNumber != "" || os.Getenv("PIPELINE_KIND") != ""
}
// function to tell if we are running incluster
func (f *FakeFactory) IsInCluster() bool {
_, err := rest.InClusterConfig()
return err == nil
}
// CreateComplianceClient creates a new Sonobuoy compliance client
func (f *FakeFactory) CreateComplianceClient() (*client.SonobuoyClient, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "compliance client failed to load the Kubernetes configuration")
}
skc, err := sonoboy_dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, errors.Wrap(err, "compliance dynamic client failed to be created")
}
return client.NewSonobuoyClient(config, skc)
}
// CreateVaultOperatorClient creates a new vault operator client
func (f *FakeFactory) CreateVaultOperatorClient() (vaultoperatorclient.Interface, error) {
return fake_vaultoperatorclient.NewSimpleClientset(), nil
}
// CreateHelm creates a new Helm client
func (f *FakeFactory) CreateHelm(verbose bool,
helmBinary string,
noTiller bool,
helmTemplate bool) helm.Helmer {
return f.GetDelegateFactory().CreateHelm(verbose,
helmBinary,
noTiller,
helmTemplate)
}
// CreateCertManagerClient creates a new Kuberntes client for cert-manager resources
func (f *FakeFactory) CreateCertManagerClient() (certmngclient.Interface, error) {
return fake_certmngclient.NewSimpleClientset(), nil
}
// CreateLocalGitAuthConfigService creates a new service which loads/saves the auth config from/to a local file.
func (f *FakeFactory) CreateLocalGitAuthConfigService() (auth.ConfigService, error) {
return f.GetDelegateFactory().CreateLocalGitAuthConfigService()
}
// CreateKustomizer creates a Kustomizer client
func (f *FakeFactory) CreateKustomizer() kustomize.Kustomizer {
return f.GetDelegateFactory().CreateKustomizer()
}
| [
"\"PIPELINE_KIND\""
] | [] | [
"PIPELINE_KIND"
] | [] | ["PIPELINE_KIND"] | go | 1 | 0 | |
grpc-gateway/server/main.go | package main
import (
"fmt"
"log"
"net"
"net/http"
"os"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
pb "github.com/jun06t/prometheus-sample/grpc-gateway/proto"
)
var (
endpoint = ":8080"
promAddr = ":9100"
)
func init() {
ep := os.Getenv("ENDPOINT")
if ep != "" {
endpoint = ep
}
pa := os.Getenv("PROMETHEUS_METRICS_ADDR")
if pa != "" {
promAddr = pa
}
}
func main() {
fmt.Println("Listen gRPC Address:", endpoint)
lis, err := net.Listen("tcp", endpoint)
if err != nil {
log.Fatal(err)
}
s := grpc.NewServer(
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
)
// Register your gRPC service implementations.
pb.RegisterAliveServiceServer(s, new(aliveService))
pb.RegisterUserServiceServer(s, new(userService))
// After all your registrations, make sure all of the Prometheus metrics are initialized.
grpc_prometheus.Register(s)
runPrometheus()
err = s.Serve(lis)
if err != nil {
log.Fatal(err)
}
}
// runPrometheus runs prometheus metrics server. This is non-blocking function.
func runPrometheus() {
mux := http.NewServeMux()
// Enable histogram
grpc_prometheus.EnableHandlingTimeHistogram()
mux.Handle("/metrics", promhttp.Handler())
go func() {
fmt.Println("Prometheus metrics bind address", promAddr)
log.Fatal(http.ListenAndServe(promAddr, mux))
}()
}
| [
"\"ENDPOINT\"",
"\"PROMETHEUS_METRICS_ADDR\""
] | [] | [
"ENDPOINT",
"PROMETHEUS_METRICS_ADDR"
] | [] | ["ENDPOINT", "PROMETHEUS_METRICS_ADDR"] | go | 2 | 0 | |
src/main/java/functional/tests/core/mobile/settings/MobileSettings.java | package functional.tests.core.mobile.settings;
import functional.tests.core.enums.DeviceType;
import functional.tests.core.enums.PlatformType;
import functional.tests.core.extensions.SystemExtension;
import functional.tests.core.log.LoggerBase;
import functional.tests.core.mobile.device.android.AndroidDevice;
import functional.tests.core.settings.Settings;
import functional.tests.core.utils.Aapt;
import functional.tests.core.utils.Archive;
import functional.tests.core.utils.FileSystem;
import functional.tests.core.utils.OSUtils;
import io.appium.java_client.remote.AutomationName;
import org.apache.commons.io.FileUtils;
import org.openqa.selenium.ScreenOrientation;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.HashMap;
/**
* MobileSettings.
* Read settings from config file to MobileSettings object.
* Config file is specified via appConfig VM option in tests based on this framework.
* For example: -DappConfig=resources/config/cuteness/cuteness.emu.default.api23.properties
*/
public class MobileSettings extends Settings {
private static LoggerBase loggerBase = LoggerBase.getLogger("MobileSettings");
private Aapt aapt;
String appiumVersion;
public boolean restartRealDevice;
public boolean isRealDevice;
public Double platformVersion;
public String packageId;
public String deviceId;
public String automationName;
public String appiumLogLevel;
public String appiumLogFile;
public String testAppFriendlyName;
public ScreenOrientation orientation;
public LoggerBase log;
public SettingsIOS ios;
public SettingsAndroid android;
public DeviceType deviceType;
public boolean reuseDevice;
public boolean usePrebuiltWDA;
/**
* Init settings.
*/
public MobileSettings() {
this.deviceId = this.properties.getProperty("udid");
this.initSettings();
}
/**
* Init Android specific settings.
*
* @return Android settings.
*/
private SettingsAndroid initSettingsAndroid() {
// Aapt need so know OS Type.
this.aapt = new Aapt(this);
this.android = new SettingsAndroid();
loggerBase.separatorAndroid();
this.android.maxEmuCount = Integer.parseInt(OSUtils.getEnvironmentVariable("MAX_EMU_COUNT", "1"));
loggerBase.info("Maximum number of parallel emulators: " + this.android.maxEmuCount);
if (this.deviceType == DeviceType.Emulator) {
// Main port is 5 next two numbers comes from platform version and last one is like minor version * 2
this.deviceId = AndroidDevice.getEmulatorId(this.platformVersion);
}
loggerBase.info("Device Id: " + this.deviceId);
// Set testAppName
String name = this.testAppFileName.toLowerCase().replace("-release", "").replace("-debug", "");
this.testAppName = this.testAppFileName.substring(0, name.indexOf(".")).toLowerCase();
loggerBase.info("TestApp Name: " + this.testAppName);
this.packageId = this.aapt.getPackage();
loggerBase.info("TestApp Package Id: " + this.packageId);
this.android.defaultActivity = this.getDefaultActivity();
loggerBase.info("Default Activity: " + this.android.defaultActivity);
this.android.appWaitActivity = this.getAppWaitActivity();
loggerBase.info("App Wait Activity: " + this.android.appWaitActivity);
this.android.appWaitPackage = this.getAppWaitPackage();
loggerBase.info("App Wait Package: " + this.android.appWaitPackage);
this.testAppFriendlyName = this.aapt.getApplicationLabel();
loggerBase.info("TestApp Friendly Name: " + this.testAppFriendlyName);
if (this.deviceType == DeviceType.Emulator) {
this.android.emulatorOptions = this.properties.getProperty("emulatorOptions");
loggerBase.info("Emulator Options: " + this.android.emulatorOptions);
this.android.emulatorCreateOptions = this.properties.getProperty("emulatorCreateOptions");
loggerBase.info("Emulator Create Options: " + this.android.emulatorCreateOptions);
}
this.android.memoryMaxUsageLimit = this.getMemoryMaxUsageLimit();
loggerBase.info("Memory Usage Max Limit: "
+ (this.android.memoryMaxUsageLimit > -1 ? this.android.memoryMaxUsageLimit : "not set"));
this.android.appLaunchTimeLimit = this.getappLaunchTimeLimit();
loggerBase.info("App Launch Time Limit: "
+ (this.android.appLaunchTimeLimit > -1 ? this.android.appLaunchTimeLimit : "not set"));
// Set isRealDevice
this.isRealDevice = this.deviceType == DeviceType.Android;
this.android.isRealDevice = this.isRealDevice;
return this.android;
}
/**
* Set the performance storage.
*/
private void setPerfStorage() {
this.perfDir = this.perfDir + File.separator + this.testAppName + File.separator + this.deviceName;
loggerBase.info("Performance storage for device: " + this.perfDir);
}
/**
* Init iOS specific settings.
*
* @return iOS settings.
*/
private SettingsIOS initSettingsIOS() {
this.ios = new SettingsIOS();
loggerBase.separatorIOS();
String wdaLocalPortAsString = OSUtils.getEnvironmentVariable("WDA_LOCAL_PORT",
String.valueOf(OSUtils.getFreePort(8000, 9000)));
this.ios.wdaLocalPort = Integer.parseInt(wdaLocalPortAsString);
loggerBase.info("WDA_LOCAL_PORT: " + wdaLocalPortAsString);
this.ios.maxSimCount = Integer.parseInt(OSUtils.getEnvironmentVariable("MAX_SIM_COUNT", "1"));
loggerBase.info("Maximum number of parallel iOS Simulators: " + this.ios.maxSimCount);
loggerBase.info("Device Id: " + this.deviceId);
this.ios.acceptAlerts = this.propertyToBoolean("acceptAlerts", false);
loggerBase.info("Auto Accept Alerts: " + this.ios.acceptAlerts);
// Set isRealDevice
if (this.deviceType == DeviceType.Simulator) {
this.isRealDevice = false;
this.ios.testAppArchive = this.properties.getProperty("testAppArchive");
loggerBase.info("TestApp Archive: " + this.ios.testAppArchive);
this.testAppName = this.ios.testAppArchive.substring(0, this.ios.testAppArchive.indexOf("."));
this.ios.simulatorType = this.properties.getProperty("simulatorType");
loggerBase.info("Simulator Type: " + this.ios.simulatorType);
this.extractApp();
} else {
this.isRealDevice = true;
this.ios.xCode8ConfigFile = BASE_RESOURCE_DIR +
File.separator + "xcode" + File.separator + "xcode8config.xcconfig";
this.testAppName = this.testAppFileName.replace(".ipa", "");
this.setupDevelopmentTeam();
loggerBase.info("xCode 8 config file. Initialized if it is real device " + this.ios.xCode8ConfigFile);
}
// TODO(dtopuzov): Find better way to get testAppFriendlyName.
this.testAppFriendlyName = this.testAppName;
loggerBase.info("TestApp Friendly Name: " + this.testAppFriendlyName);
this.testAppName = this.testAppName.toLowerCase();
loggerBase.info("TestApp Name: " + this.testAppName);
this.packageId = this.getIOSPackageId();
loggerBase.info("TestApp Package Id: " + this.packageId);
this.ios.isRealDevice = this.isRealDevice;
return this.ios;
}
private static HashMap<String, Double> emulatorMap() {
HashMap<String, Double> emulatorMap = new HashMap<>();
emulatorMap.put("P", 9.0);
emulatorMap.put("P Preview", 9.0);
emulatorMap.put("7.1.1", 7.1);
return emulatorMap;
}
/**
* Init common settings.
*/
public void initSettings() {
super.initSettings();
this.restartRealDevice = this.propertyToBoolean("restartRealDevice", false);
Double correctPlatformVersion = emulatorMap().get(this.properties.getProperty("platformVersion").trim());
if (correctPlatformVersion != null) {
this.platformVersion = correctPlatformVersion;
} else {
this.platformVersion = Double.parseDouble(this.properties.getProperty("platformVersion").trim());
}
this.appiumVersion = this.properties.getProperty("appiumVersion");
this.automationName = this.getAutomationName();
this.appiumLogLevel = this.properties.getProperty("appiumLogLevel", "warn");
this.appiumLogFile = this.baseLogDir + File.separator + "appium.log";
this.orientation = this.getScreenOrientation();
// If defaultTimeout is not specified set it to 30 sec.
this.defaultTimeout = this.convertPropertyToInt("defaultTimeout", 30);
this.shortTimeout = this.defaultTimeout / 5;
// If deviceBootTimeout is not specified set it equal to defaultTimeout
this.deviceBootTimeout = this.convertPropertyToInt("deviceBootTimeout", 300);
this.deviceType = this.getDeviceType();
String usePreBuildWDAEnv = System.getenv("USE_PREBUILT_WDA");
if (usePreBuildWDAEnv != null && !usePreBuildWDAEnv.isEmpty()){
this.usePrebuiltWDA = usePreBuildWDAEnv.equals("true");
}
if (this.platform == PlatformType.Android) {
loggerBase = LoggerBase.getLogger("AndroidSettings");
this.android = this.initSettingsAndroid();
} else if (this.platform == PlatformType.iOS) {
loggerBase = LoggerBase.getLogger("IOSSettings");
this.ios = this.initSettingsIOS();
}
String deviceToken = System.getenv("DEVICE_TOKEN");
if (deviceToken != null && !deviceToken.isEmpty()) {
loggerBase.info("DEVICE_TOKEN: " + deviceToken);
this.deviceId = deviceToken;
this.reuseDevice = true;
// Check if device token is undefined
if (deviceToken.equalsIgnoreCase("undefined")) {
String error = "Invalid device token was passed!";
loggerBase.fatal(error);
SystemExtension.interruptProcess(error);
}
} else {
loggerBase.info("DEVICE_TOKEN not set!");
}
loggerBase.info("Reuse device: " + this.reuseDevice);
loggerBase.info("Device Id: " + this.deviceId);
this.setPerfStorage();
loggerBase.info("Platform Version: " + this.platformVersion);
loggerBase.info("Device Type: " + this.deviceType);
loggerBase.info("Real device: " + this.isRealDevice);
if (this.isRealDevice) {
loggerBase.info("Restart real device: " + this.restartRealDevice);
}
loggerBase.info("Appium Version: " + this.appiumVersion);
loggerBase.info("Appium Log File: " + this.appiumLogFile);
loggerBase.info("Appium Log Level: " + this.appiumLogLevel);
loggerBase.info("Automation Name: " + this.automationName);
loggerBase.info("Restart app Between Tests: " + this.restartApp);
if (this.orientation != null) {
loggerBase.info("Screen Orientation: " + this.orientation);
}
loggerBase.separator();
}
/**
* Extract test application.
* For iOS Simulator test app (*.app) must be packaged in tgz archive.
* This method will extract the archive.
*/
private void extractApp() {
// Make sure no old app is available.
try {
FileSystem.deletePath(BASE_TEST_APP_DIR + File.separator + this.testAppFileName);
// Extract archive.
File tgzPath = new File(BASE_TEST_APP_DIR + File.separator + this.ios.testAppArchive);
File dir = new File(BASE_TEST_APP_DIR);
Archive.extractArchive(tgzPath, dir);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Get screen orientation settings.
* <p>
* * @return ScreenOrientation.
*/
private ScreenOrientation getScreenOrientation() {
String orientation = this.properties.getProperty("orientation", "none");
if (orientation.toLowerCase().contains("port")) {
return ScreenOrientation.PORTRAIT;
} else if (orientation.toLowerCase().contains("land")) {
return ScreenOrientation.LANDSCAPE;
} else {
return null;
}
}
/**
* Get default activity setting (automatically from apk file).
*
* @return default activity.
*/
private String getDefaultActivity() {
return this.aapt.getLaunchableActivity();
}
/**
* Get appWaitActivity setting.
* If not specified by default it is equal to defaultActivity.
* When Appium start app under test it will wait until appWaitActivity is loaded.
*
* @return default activity.
*/
private String getAppWaitActivity() {
String appWaitActivityString = this.properties.getProperty("appWaitActivity");
if (appWaitActivityString == null) {
this.android.appWaitActivity = this.android.defaultActivity;
} else {
this.android.appWaitActivity = appWaitActivityString;
}
return this.android.appWaitActivity;
}
/**
* Get appWaitPackage setting.
* If not specified by default it is equal to packageId.
* When Appium start app under test it will wait until appWaitPackage is loaded.
*
* @return default activity.
*/
private String getAppWaitPackage() {
String appWaitPackageString = this.properties.getProperty("appWaitPackage");
if (appWaitPackageString == null) {
this.android.appWaitPackage = this.packageId;
} else {
this.android.appWaitPackage = appWaitPackageString;
}
return this.android.appWaitPackage;
}
/**
* Get iOS bundle identifier of app under test.
*
* @return iOS bundle identifier of app under test.
*/
private String getIOSPackageId() {
String result = null;
String plistPath = this.getPlistPath();
File file = new File(plistPath);
if (file.exists()) {
String command = "/usr/libexec/PlistBuddy -c 'Print CFBundleIdentifier' " + plistPath;
result = OSUtils.runProcess(command).trim();
} else {
loggerBase.error("File " + plistPath + " does not exist.");
}
return result;
}
/**
* Get path of Info.plist of iOS app under test.
* Info.plist holds information for app under test.
*
* @return path to Info.plist
*/
private String getPlistPath() {
String plistPath = null;
if (this.deviceType == DeviceType.Simulator) {
plistPath = BASE_TEST_APP_DIR + File.separator + this.testAppFileName + File.separator + "Info.plist";
} else if (this.deviceType == DeviceType.iOS) {
String ipaPath = BASE_TEST_APP_DIR + File.separator + this.testAppFileName;
OSUtils.runProcess("unzip -o " + ipaPath + " -d " + BASE_TEST_APP_DIR);
String appName = OSUtils.runProcess("ls " + BASE_TEST_APP_DIR + File.separator + "Payload").trim();
plistPath = BASE_TEST_APP_DIR +
File.separator + "Payload" + File.separator + appName + File.separator + "Info.plist";
}
return plistPath;
}
/**
* Get max memory usage limit setting.
*
* @return max memory usage limit in kB.
*/
private int getMemoryMaxUsageLimit() {
String value = this.properties.getProperty("memoryMaxUsageLimit");
if (value != null && value.equals("")) {
return Integer.parseInt(value);
} else {
return -1;
}
}
/**
* Get launch timeout limit setting.
*
* @return launch timeout limit in milliseconds.
*/
private int getappLaunchTimeLimit() {
String value = this.properties.getProperty("appLaunchTimeLimit");
if (value != null && value.equals("")) {
return Integer.parseInt(value);
} else {
return -1;
}
}
/**
* Set developer team settings.
* Required for signing of apps that run on physical iOS devices.
*/
private void setupDevelopmentTeam() {
File file = new File(this.ios.xCode8ConfigFile);
FileSystem.ensureFolderExists(file.getParent());
try {
file.createNewFile();
} catch (IOException e) {
e.printStackTrace();
}
String teamId = System.getenv("DEVELOPMENT_TEAM");
String fileContext = String.format("DEVELOPMENT_TEAM=%s\nCODE_SIGN_IDENTITY=iPhone Developer", teamId);
try {
FileUtils.write(file, fileContext, Charset.defaultCharset());
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Get device type (simulator, emulator, Android or iOS device).
*
* @return DeviceType value.
*/
private DeviceType getDeviceType() {
String deviceTypeString = this.properties.getProperty("deviceType");
if (deviceTypeString.equalsIgnoreCase("android")) {
return DeviceType.Android;
} else if (deviceTypeString.equalsIgnoreCase("ios")) {
return DeviceType.iOS;
} else if (deviceTypeString.toLowerCase().contains("emu")) {
return DeviceType.Emulator;
} else if (deviceTypeString.toLowerCase().contains("sim")) {
return DeviceType.Simulator;
} else {
return DeviceType.Other;
}
}
/**
* Get automation name setting.
*
* @return Name of automation technology.
*/
private String getAutomationName() {
String automationNameString = this.properties.getProperty("automationName");
if (automationNameString != null) {
this.automationName = automationNameString.trim();
} else {
if (this.platform == PlatformType.Android) {
if (this.platformVersion < 5.0) {
this.automationName = "UiAutomator1";
} else {
this.automationName = AutomationName.ANDROID_UIAUTOMATOR2;
}
} else if (this.platform == PlatformType.iOS) {
this.automationName = AutomationName.IOS_XCUI_TEST;
}
}
return this.automationName;
}
}
| [
"\"USE_PREBUILT_WDA\"",
"\"DEVICE_TOKEN\"",
"\"DEVELOPMENT_TEAM\""
] | [] | [
"DEVICE_TOKEN",
"DEVELOPMENT_TEAM",
"USE_PREBUILT_WDA"
] | [] | ["DEVICE_TOKEN", "DEVELOPMENT_TEAM", "USE_PREBUILT_WDA"] | java | 3 | 0 | |
sdk/aot/azure-aot-graalvm-samples/src/main/java/com/azure/aot/graalvm/samples/formrecognizer/FormRecognizerSample.java | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.aot.graalvm.samples.formrecognizer;
import com.azure.ai.formrecognizer.DocumentAnalysisClient;
import com.azure.ai.formrecognizer.DocumentAnalysisClientBuilder;
import com.azure.ai.formrecognizer.models.AnalyzeResult;
import com.azure.ai.formrecognizer.models.AnalyzedDocument;
import com.azure.ai.formrecognizer.models.DocumentField;
import com.azure.ai.formrecognizer.models.DocumentFieldType;
import com.azure.ai.formrecognizer.models.DocumentOperationResult;
import com.azure.core.credential.AzureKeyCredential;
import com.azure.core.util.polling.SyncPoller;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.time.LocalDate;
import java.util.List;
import java.util.Map;
/**
* A sample to demonstrate Form Recognizer's functionality to recognize receipts using GraalVM.
*/
public class FormRecognizerSample {
private static final String AZURE_FORM_RECOGNIZER_ENDPOINT = System.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT");
private static final String AZURE_FORM_RECOGNIZER_KEY = System.getenv("AZURE_FORM_RECOGNIZER_KEY");
public static void runSample() throws IOException {
System.out.println("\n================================================================");
System.out.println(" Starting Form Recognizer Sample");
System.out.println("================================================================");
// Instantiate a client that will be used to call the service.
DocumentAnalysisClient client = new DocumentAnalysisClientBuilder()
.credential(new AzureKeyCredential(AZURE_FORM_RECOGNIZER_KEY))
.endpoint(AZURE_FORM_RECOGNIZER_ENDPOINT)
.buildClient();
InputStream resourceAsStream = FormRecognizerSample.class.getClassLoader().getResourceAsStream("contoso-allinone.jpg");
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[4096];
while ((nRead = resourceAsStream.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
byte[] fileContent = buffer.toByteArray();
InputStream targetStream = new ByteArrayInputStream(fileContent);
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeReceiptPoller =
client.beginAnalyzeDocument("prebuilt-receipt", targetStream, fileContent.length);
AnalyzeResult receiptResults = analyzeReceiptPoller.getFinalResult();
for (int i = 0; i < receiptResults.getDocuments().size(); i++) {
AnalyzedDocument analyzedReceipt = receiptResults.getDocuments().get(i);
Map<String, DocumentField> receiptFields = analyzedReceipt.getFields();
System.out.printf("----------- Analyzing receipt info %d -----------%n", i);
DocumentField merchantNameField = receiptFields.get("MerchantName");
if (merchantNameField != null) {
if (DocumentFieldType.STRING == merchantNameField.getType()) {
String merchantName = merchantNameField.getValueString();
System.out.printf("Merchant Name: %s, confidence: %.2f%n",
merchantName, merchantNameField.getConfidence());
}
}
DocumentField merchantPhoneNumberField = receiptFields.get("MerchantPhoneNumber");
if (merchantPhoneNumberField != null) {
if (DocumentFieldType.PHONE_NUMBER == merchantPhoneNumberField.getType()) {
String merchantAddress = merchantPhoneNumberField.getValuePhoneNumber();
System.out.printf("Merchant Phone number: %s, confidence: %.2f%n",
merchantAddress, merchantPhoneNumberField.getConfidence());
}
}
DocumentField merchantAddressField = receiptFields.get("MerchantAddress");
if (merchantAddressField != null) {
if (DocumentFieldType.STRING == merchantAddressField.getType()) {
String merchantAddress = merchantAddressField.getValueString();
System.out.printf("Merchant Address: %s, confidence: %.2f%n",
merchantAddress, merchantAddressField.getConfidence());
}
}
DocumentField transactionDateField = receiptFields.get("TransactionDate");
if (transactionDateField != null) {
if (DocumentFieldType.DATE == transactionDateField.getType()) {
LocalDate transactionDate = transactionDateField.getValueDate();
System.out.printf("Transaction Date: %s, confidence: %.2f%n",
transactionDate, transactionDateField.getConfidence());
}
}
DocumentField receiptItemsField = receiptFields.get("Items");
if (receiptItemsField != null) {
System.out.printf("Receipt Items: %n");
if (DocumentFieldType.LIST == receiptItemsField.getType()) {
List<DocumentField> receiptItems = receiptItemsField.getValueList();
receiptItems.stream()
.filter(receiptItem -> DocumentFieldType.MAP == receiptItem.getType())
.map(formField -> formField.getValueMap())
.forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> {
if ("Name".equals(key)) {
if (DocumentFieldType.STRING == formField.getType()) {
String name = formField.getValueString();
System.out.printf("Name: %s, confidence: %.2fs%n",
name, formField.getConfidence());
}
}
if ("Quantity".equals(key)) {
if (DocumentFieldType.FLOAT == formField.getType()) {
Float quantity = formField.getValueFloat();
System.out.printf("Quantity: %f, confidence: %.2f%n",
quantity, formField.getConfidence());
}
}
if ("Price".equals(key)) {
if (DocumentFieldType.FLOAT == formField.getType()) {
Float price = formField.getValueFloat();
System.out.printf("Price: %f, confidence: %.2f%n",
price, formField.getConfidence());
}
}
if ("TotalPrice".equals(key)) {
if (DocumentFieldType.FLOAT == formField.getType()) {
Float totalPrice = formField.getValueFloat();
System.out.printf("Total Price: %f, confidence: %.2f%n",
totalPrice, formField.getConfidence());
}
}
}));
}
}
}
System.out.println("\n================================================================");
System.out.println(" Form Recognizer Sample Complete");
System.out.println("================================================================");
}
}
| [
"\"AZURE_FORM_RECOGNIZER_ENDPOINT\"",
"\"AZURE_FORM_RECOGNIZER_KEY\""
] | [] | [
"AZURE_FORM_RECOGNIZER_KEY",
"AZURE_FORM_RECOGNIZER_ENDPOINT"
] | [] | ["AZURE_FORM_RECOGNIZER_KEY", "AZURE_FORM_RECOGNIZER_ENDPOINT"] | java | 2 | 0 | |
go/stellar/bundle/boxer_test.go | package bundle
import (
"context"
"crypto/sha256"
"encoding/base64"
"errors"
"os"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/client/go/protocol/stellar1"
"github.com/stretchr/testify/require"
)
const v2 = stellar1.BundleVersion_V2
func testBundle(t *testing.T) stellar1.Bundle {
secretKey := stellar1.SecretKey("SDGCPMBQHYAIWM3PQOEKWICDMLVT7REJ24J26QEYJYGB6FJRPTKDULQX")
newBundle, err := New(secretKey, "test")
require.NoError(t, err)
newBundle.Accounts[0].IsPrimary = true
return *newBundle
}
func TestBundleRoundtrip(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 1)
bundle := testBundle(t)
t.Logf("puk seed (hex): %v", toB64(pukSeed[:]))
t.Logf("puk gen: %v", pukGen)
boxed, err := BoxAndEncode(&bundle, pukGen, pukSeed)
require.NoError(t, err)
t.Logf("outer enc b64: %v", boxed.EncParentB64)
t.Logf("outer vis b64: %v", boxed.VisParentB64)
t.Logf("enc.N b64: %v", toB64(boxed.EncParent.N[:]))
t.Logf("enc.E b64: %v", toB64(boxed.EncParent.E))
require.Equal(t, v2, boxed.FormatVersionParent)
require.NotEmpty(t, boxed.VisParentB64)
require.NotEmpty(t, boxed.EncParentB64)
require.Len(t, boxed.AcctBundles, 1)
require.True(t, len(boxed.EncParentB64) > 100)
require.NotZero(t, boxed.EncParent.N)
require.Equal(t, 2, boxed.EncParent.V)
require.True(t, len(boxed.EncParent.E) > 100)
require.Equal(t, pukGen, boxed.EncParent.Gen)
bundle2, version, decodedPukGen, accountGens, err := DecodeAndUnbox(m, ring, boxed.toBundleEncodedB64())
require.NoError(t, err)
require.Equal(t, v2, version)
require.Equal(t, pukGen, decodedPukGen)
require.Nil(t, bundle2.Prev)
require.NotNil(t, bundle2.OwnHash)
require.Equal(t, bundle.Revision, bundle2.Revision)
require.Equal(t, len(bundle.Accounts), len(bundle2.Accounts))
for i, acct := range bundle.Accounts {
acct2 := bundle2.Accounts[i]
require.Equal(t, acct.AccountID, acct2.AccountID)
require.Equal(t, acct.Mode, acct2.Mode)
require.Equal(t, acct.Name, acct2.Name)
require.Equal(t, acct.IsPrimary, acct2.IsPrimary)
require.Equal(t, acct.AcctBundleRevision, acct2.AcctBundleRevision)
signers1 := bundle.AccountBundles[acct.AccountID].Signers
signers2 := bundle2.AccountBundles[acct2.AccountID].Signers
require.Equal(t, signers1, signers2)
require.True(t, len(signers2) == 1) // exactly one signer
require.True(t, len(signers2[0]) > 0)
require.Equal(t, keybase1.PerUserKeyGeneration(1), accountGens[acct.AccountID])
}
}
func TestBundlePrevs(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 1)
b1 := testBundle(t)
// encode and decode b1 to populate OwnHash
b1Boxed, err := BoxAndEncode(&b1, pukGen, pukSeed)
require.NoError(t, err)
b1Decoded, _, _, _, err := DecodeAndUnbox(m, ring, b1Boxed.toBundleEncodedB64())
require.NoError(t, err)
// make a change, and verify hashes are correct
b2 := b1Decoded.DeepCopy()
b2.Accounts[0].Name = "apples"
b2.Prev = b1Decoded.OwnHash
b2.OwnHash = nil
b2.Revision++
b2Boxed, err := BoxAndEncode(&b2, pukGen, pukSeed)
require.NoError(t, err)
b2Decoded, _, _, _, err := DecodeAndUnbox(m, ring, b2Boxed.toBundleEncodedB64())
require.NoError(t, err)
require.Equal(t, "apples", b2Decoded.Accounts[0].Name, "change carried thru")
require.NotNil(t, b2Decoded.Prev)
require.Equal(t, b2Decoded.Prev, b1Decoded.OwnHash, "b2 prevs to b1")
// change the keys and do it again
pukSeed, pukGen = ring.makeGen(t, 2)
b3 := b2Decoded.DeepCopy()
b3.Accounts[0].Name = "bananas"
b3.Prev = b2Decoded.OwnHash
b3.OwnHash = nil
b3.Revision++
b3Boxed, err := BoxAndEncode(&b3, pukGen, pukSeed)
require.NoError(t, err)
b3Decoded, _, bundleGen, accountGens, err := DecodeAndUnbox(m, ring, b3Boxed.toBundleEncodedB64())
require.NoError(t, err)
require.Equal(t, "bananas", b3Decoded.Accounts[0].Name, "change carried thru")
require.NotNil(t, b3Decoded.Prev)
require.Equal(t, b3Decoded.Prev, b2Decoded.OwnHash, "b3 prevs to b2")
require.Equal(t, keybase1.PerUserKeyGeneration(2), bundleGen)
for _, acct := range b3Decoded.Accounts {
require.Equal(t, keybase1.PerUserKeyGeneration(2), accountGens[acct.AccountID])
}
}
func TestBundleRoundtripCorruptionEnc(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
bundle := testBundle(t)
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 4)
boxed, err := BoxAndEncode(&bundle, pukGen, pukSeed)
require.NoError(t, err)
replaceWith := "a"
if boxed.EncParentB64[85] == 'a' {
replaceWith = "b"
}
boxed.EncParentB64 = boxed.EncParentB64[:85] + replaceWith + boxed.EncParentB64[86:]
_, _, _, _, err = DecodeAndUnbox(m, ring, boxed.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "stellar bundle secret box open failed")
}
func TestBundleRoundtripCorruptionVis(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
bundle := testBundle(t)
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 3)
boxed, err := BoxAndEncode(&bundle, pukGen, pukSeed)
require.NoError(t, err)
replaceWith := "a"
if boxed.VisParentB64[85] == 'a' {
replaceWith = "b"
}
boxed.VisParentB64 = boxed.VisParentB64[:85] + replaceWith + boxed.VisParentB64[86:]
_, _, _, _, err = DecodeAndUnbox(m, ring, boxed.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "visible hash mismatch")
}
func TestBoxAndEncodeCatchesMalformedBundles(t *testing.T) {
bundle := testBundle(t)
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 3)
// put a different account and secret in the AccountBundle
newAcctID, newSecret, err := randomStellarKeypair()
require.NoError(t, err)
newAB := map[stellar1.AccountID]stellar1.AccountBundle{
newAcctID: {
AccountID: newAcctID,
Signers: []stellar1.SecretKey{newSecret},
},
}
bundle.AccountBundles = newAB
// encode should error because the bundle is invalid
_, err = BoxAndEncode(&bundle, pukGen, pukSeed)
require.Contains(t, err.Error(), "account in AccountBundles not in Accounts")
}
type accountCan struct {
accountID stellar1.AccountID
encB64 string
}
type canned struct {
pukSeedB64 string
pukGen int
encParentB64 string
visParentB64 string
accounts []accountCan
}
func (c *canned) puk(t *testing.T) (puk libkb.PerUserKeySeed) {
bs, err := base64.StdEncoding.DecodeString(c.pukSeedB64)
require.NoError(t, err)
require.Equal(t, len(puk), len(bs))
copy(puk[:], bs)
return puk
}
func (c *canned) gen() keybase1.PerUserKeyGeneration {
return keybase1.PerUserKeyGeneration(c.pukGen)
}
func (c *canned) toBundleEncodedB64() BundleEncoded {
benc := BundleEncoded{
EncParent: c.encParentB64,
VisParent: c.visParentB64,
AcctBundles: make(map[stellar1.AccountID]string),
}
for _, acct := range c.accounts {
benc.AcctBundles[acct.accountID] = acct.encB64
}
return benc
}
func (c *canned) ring(t *testing.T) *pukRing {
pukSeed := c.puk(t)
pukGen := c.gen()
return &pukRing{
map[keybase1.PerUserKeyGeneration]libkb.PerUserKeySeed{
pukGen: pukSeed,
},
}
}
var cans = []canned{
// this one is valid
{"R81SkpClcSUPMzch6UAstOhS+hbZi4R43HzbRiLQ46o=",
3,
"hKFlxPCoH32GB0er08rZF1B2sfZNME3/35sFKWBVSCQTFKiTAuGSe2mK9AKCWMLkXcTvoJojtyu56hBwGbXQCqqSL8eY1sb8UGG7SsuvNBr27hzUtosJmb9tCT0uikOY8YFPYAtWbmqHB9QvqeBEtysd7ZDDJPG0cJ9lckvj9rSAE/wuhcVlHMAWfbOvGvOLDf56VVK46Ms7bGTSedTKHj8IPpF48RF1GrDlvZQRgqD8ydwtqMGZ1ZkqF+DKKXaEQaIhY47L50Ynna7Qzm8ZCEujsuo5W3EKtZtY6XG0RYx7AzdhkXKzFVDmINVHxkZbQi66QpWjZ2VuA6FuxBh9I1ef+UMA9u3rOYAqPzeVm6hlam5ZX62hdgI=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgBgkhqzuIynMJrhIeuSOPTCoS5QutvwDXZr7fHVuCmpipaXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCCbBuOilM5oKBvC1JaEzJoq8l1W1picV5MxkTEOrPEnpalpc1ByaW1hcnnCpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKUc3fqMOAYv9m6ycpnpQA4CSriSQoPVNbvsXXEgv4WsixkaTThkKGMlYuWvTJdHeqPdYXo/Mw156xq8MaIbzDTriFplFNcLhNYdi8f1ViHqvVIecX2frU/BOtIsAlqknnhl4+Z1u2kUnZYI5pZRvUV5H1loSWC4tBmEoCgK1S6XrLx1POOIiKkH8EFMXVrB6+BjbieW1w8HTXNp0jWbKkq+QoKz8MCjZ2VuA6FuxBibtfMD40wYqnfPsGePn1RphpzcEqv7mZehdgE="},
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKX3zqweGMWyl4vOC8ht0jngDnTEpWqGBePh7okF9S003QStjI9Td1d+urqj/k4etwgLiPO8LHaaQ7o2WAwuSXakHJVJ2xIq+T+MYnoobbx5sArk4wsg7AWTX+uw/rXoyN7P9ZAgDSH+4oZ6/0j8dwR6RAoc9c+dog8xnW3eTkeSlj2KYx6XnEOOlOcCBCKsZePi9eoN92CxvB5MRN6tR7w3PH4yynqjZ2VuA6FuxBha4JmmVyysST3avwMxayHfTExp7tnLwHOhdgE="},
}},
// bad decryption puk
{"1111111111111111111111111111111111111111111=",
3,
"hKFlxPCoH32GB0er08rZF1B2sfZNME3/35sFKWBVSCQTFKiTAuGSe2mK9AKCWMLkXcTvoJojtyu56hBwGbXQCqqSL8eY1sb8UGG7SsuvNBr27hzUtosJmb9tCT0uikOY8YFPYAtWbmqHB9QvqeBEtysd7ZDDJPG0cJ9lckvj9rSAE/wuhcVlHMAWfbOvGvOLDf56VVK46Ms7bGTSedTKHj8IPpF48RF1GrDlvZQRgqD8ydwtqMGZ1ZkqF+DKKXaEQaIhY47L50Ynna7Qzm8ZCEujsuo5W3EKtZtY6XG0RYx7AzdhkXKzFVDmINVHxkZbQi66QpWjZ2VuA6FuxBh9I1ef+UMA9u3rOYAqPzeVm6hlam5ZX62hdgI=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgBgkhqzuIynMJrhIeuSOPTCoS5QutvwDXZr7fHVuCmpipaXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCCbBuOilM5oKBvC1JaEzJoq8l1W1picV5MxkTEOrPEnpalpc1ByaW1hcnnCpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKUc3fqMOAYv9m6ycpnpQA4CSriSQoPVNbvsXXEgv4WsixkaTThkKGMlYuWvTJdHeqPdYXo/Mw156xq8MaIbzDTriFplFNcLhNYdi8f1ViHqvVIecX2frU/BOtIsAlqknnhl4+Z1u2kUnZYI5pZRvUV5H1loSWC4tBmEoCgK1S6XrLx1POOIiKkH8EFMXVrB6+BjbieW1w8HTXNp0jWbKkq+QoKz8MCjZ2VuA6FuxBibtfMD40wYqnfPsGePn1RphpzcEqv7mZehdgE="},
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKX3zqweGMWyl4vOC8ht0jngDnTEpWqGBePh7okF9S003QStjI9Td1d+urqj/k4etwgLiPO8LHaaQ7o2WAwuSXakHJVJ2xIq+T+MYnoobbx5sArk4wsg7AWTX+uw/rXoyN7P9ZAgDSH+4oZ6/0j8dwR6RAoc9c+dog8xnW3eTkeSlj2KYx6XnEOOlOcCBCKsZePi9eoN92CxvB5MRN6tR7w3PH4yynqjZ2VuA6FuxBha4JmmVyysST3avwMxayHfTExp7tnLwHOhdgE="},
}},
// this one has two primary accounts
{"zZZijzv+D622csZjyzgZHt/avWYJaHH0S42rO29uBh4=",
3,
"hKFlxPBByHsjg6VR42RdYX5UsALFZ5XTG348GsP+J1ubWC2Iv+49/NjZtexC9rqQaIVU0yz/oKmJfpBBlB6m3EDjkca/5yszpDPf1WKPQR7tzJMAPpwmbXKC3dWZGO/elRgvGiH3rvq1SVMn5Od20Gkn81rn0w4M2VtiXl23dUqgTPV3zxgnWYgi+qz2MYBQUOCDiIXRQCEoz9uryF36GuI0RhmM5r14zfPTo2Ru6hDqN2FN17aJ/D7xTBiIdQUAlN6cUZS/nQEEEwdxJmlFTzXgoIR7puO0sC9Q1PWMKTfRrCzhUkV/VVyWEMZiQR1VbWii58OjZ2VuA6FuxBh7HP8NWh2qAIdc8bX/gka07BmIGJ6N1BGhdgI=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgYsYYlXdhMYNv+TjdE9jLU/9InY7g9UFovmMZyVX43SipaXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCAHEbHL2jsIn5lJJCTkKvM24zKMf+Cu7k52bSSNCaOXsqlpc1ByaW1hcnnDpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKXNijkx4xEPPfrWDSmKzCvDxsaGaqOU+AGRJ21tXT/YiIYVy+Xhqn8ZDA8q7b6NhOLvQQKXao8RaVTyJz2ZPfF4JFdhtB4NW2FvVibNShKFMhpiB2JKKLQ1pe5SWTctKeCMySQEOSuRPYw5h1agWuFSO6G41bjc6tqxSN1Dy3X0NNiTSlV2t+vTKEIxPpslUkraHzX7QzLx0L/UHWgTNSKlDQUFoP2jZ2VuA6FuxBiN3rakOKyhdiI11EnaQh+DJr+OaiDpCwOhdgE="},
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKU/uVJvah1L9M9DXNzqfHkCafEpxeVPaZ+qi7/yxVrYPxaERZe3vtVpSSj/ubXOCps8PQdNGxryD9IpOHc7nz+a+jfGCrl1j5ka6cLaTtVRVPULm4zpFmtj3AG3OMMx3SERt+9nzwCzFMOhsWkF0qsUJmJb147619qHyYXVX9xBhfadKOnam91qDpeezjfkIJNfsc6wNz1Gq/lnw3NFJsWJKhLRPoyjZ2VuA6FuxBjyJpAx80CdDilh3Aa4kpgr2XpPm90HpwShdgE="},
}},
// this one has version 1
{"WkgRG8Kn+kJ+9E3UOr+2AL/28+1FAHFcuwaXFNn63Bc=",
3,
"hKFlxPDOMmhJALH9j+DVPBfgO5o5XoR0e0Wzoohc38H98QuRiDvZdlMSXXVmnXaeESLsdFVvmNBX7LNj8AQ3tsisxuGzUEPnDCIvBQOVqWb0YzCg8hvT5TxuxFaFYr+b7JP+/8JaDfO2ZMZHmwh0bYSy+cveFjmJQu9dqJPrFkaI5M2qE3k9V2d9RDn279l+/tKkXaTADI5si9e8+6ZwccuD+w8YTBhF6pu92Ums9sYwlu1NJhljnzrpnyZHwlkPEuz9bx6gc9flSsTsM+F14z+1/3Mw7dK6/5o3heU6Dp5DRVyYzDm89+Y380nqsdswUItkpDCjZ2VuA6FuxBghv5/O3avrFmYsqX/yOIimwsQV24wATFmhdgE=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgJ46Gf8Q34Oz+SY5WASuyNRbtK9amEwfZeh+cwYMkhu6paXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCBT04slGTYXYwS3F/NIMocy4hzfdbd6QbuAcu+fQLdk16lpc1ByaW1hcnnCpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKVmAT4zpHkRMmxvopFs5dNpPdbLP4Xbuv2vSb3Nb5v+X+5mJCOy+/viCSlFabN0hiLvRA9SNdbhmB0nGGEqr4KTpwI1Igi9kpDHct4WfaO5JKxM9z/c4CKEU+Yp83MwhrfvINFMu/9hvxWfYpIISSQUelfJExn1j+IHaTQje4+bpetdZ8L8aaq0i1JslDBhzuTSut1vDJTOs5IaFUdjmNWlIMFWB8+jZ2VuA6FuxBhp5v8W3hvbFv5pD0YMwazOaadZabL+w9ahdgE="},
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKWaCv93H++U6REJYFrDJ7lIkxkIWxQui3TnWgknVao+Ch2mwBXwlrtJIfTLtisMiDe41Rhg5W2/MVuUahM4SP4/7bch/jaco294xDvt9qSy7gV3Tn6y3kQ4D9sbydP5fTX5b/2TbAsYaxVnNBI9gmWz9WnA1i/oMUVb+z64MdWgBFsYb+3NKq+ckOFBx7lWz06W84XVwFOQkfEa9Z1lHO7dZnnbYR2jZ2VuA6FuxBjfxSfGM/YdSvKrrFMopBR2ZqN+/Ekg3WqhdgE="},
}},
// parent visible hash mismatch
{"33o4U14XU4NFNg99xV3DrW9+JFqux+Qy+9bKPO0CZqw=",
3,
"hKFlxPB8pYYB1ikZVIbr42li9xe7uWpnwXj6UzvNl84o1a9BQp32tMp9Os5STGSinCxeKJWThWDEaIkQAhbsIWM5I2f/y5Bakw8qPKdXnPvXzIFdIAKPaL5YOfeH4YqINUy1vLKtmXE4oNN/Re1GI2JJfLHoiwGdfkZ5BwNf8HBMPo+7NQoK72vhmawKoVZ/mhLZWhyWg2o2WLxnI6SvB23zr9S8DuzorjdFyPjMKLoW1cQEvjL89XC3Gh0jVQqtiyY4ztsezSaaK631EgCggKax9TgOMcadtjbSDak4Z4369iPxdPDarifQEH3tCgfQEqfm/FajZ2VuA6FuxBhNSgpvLKdBI1+uyn+AxVitwsUWFOvFv1OhdgI=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgitIh2ZzE+Ee+B5sOTGh6Zykb3HzJm0AGEStUlbqv3gipaXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCBha3OfRg0rSijM9oc4jHDzkf6U1T1QA/70ZYyOCclIbqlpc1ByaW1hcnnCpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKVK+UYzZvupFCxmCET7llfg+lz0WgoZjM1A4QwfTrn0SQiyJWUnq25ZHS4yCCkMHt0RZ9nkeNKLdBCW/zbGWl1PazWmRhHrQtnWNxYKn0loHacMxo8XPZrYMJxOzMmAQERYAdKLCCKbX7aSw0fM2f0O0vUakB6G9iMuBkSqLjnItRrw7IDq1nVKfrBKWGA7QndnA+cLU5QRnlc4X3tGnQLqqKiWLDajZ2VuA6FuxBjM4wCrCdDp+PUJMQaXZfid1okMls3wI3ahdgE="},
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKURh2JrnpBAnRNW2lDvx63Sd5bMMqUvI5bkvkn2CNWcMn+FcPRCK+75EZWv0Jnq+KT6Xp0r4Qm8INe27bLQJa5XOB8JB04XY5zGaAwHPY3hddUUmTFfn9CmY46SGY4bSQ5xejPIlsl+VBLgnM+4ZiuRp3o13YNjn9tBdWOHrJmb7c+err863f43Ttw6L1XxbOvwl81VIA/auHkw1znXr8D2ZleOP92jZ2VuA6FuxBgOiuJnWDcJvlZxaI6MyQCMI3leSfE54GWhdgE="},
}},
// account-level hash mismatch
{"AdhjUfTyNZvZnyWuLrXCJ2XgpfErFwyqmRkg8ZEjAHg=",
3,
"hKFlxPAxslJzbyrnYaG4LJUi0hw9jKh6GaM99keZJb7K36pN1whf/bN5iFxrC9J0KQeW7OQAXEp973OdfO2azxzzh3if/SQ9wQN00XQMWLN2Cb0Je98+z9wFWsWdh918Rit5x5hNi61PvTUxQCsahGeO1BqWJxvC3P3XzwBBg2CaesB07KfDZSB5kBP6mruluGgATE4WLmk8LmoyQL4CA6DYpRaWl5Xr6e5tAj5JFzd9wnSCIiKPukONnJqszqfZaF+ZVUznQ1q9MfjIM3huQrOb7wC/NPKtoM+xsTPgCjmIfLZBwY6lD8xiUdaTNGFH6zNvHdCjZ2VuA6FuxBgpHteB4lY/BqjThfRNbn/TNKRfvl0cXv2hdgI=",
"g6hhY2NvdW50c5KFqWFjY291bnRJRNk4R0FXWjdIVlBLUkdDSDJLUDY0NzVYVjZIQTJDQUY0NE1YV1dFNVJLVjRMTU1HQjZGTk5TRVBOUEWyYWNjdEJ1bmRsZVJldmlzaW9uAbFlbmNBY2N0QnVuZGxlSGFzaMQgH3M1uaaaaa1r684bDx18YGFfqAPCRhgktz/Y3lBUPAapaXNQcmltYXJ5w6Rtb2RlAYWpYWNjb3VudElE2ThHQlBMTEhPS1BSRlNDRTZGUDZEMzdBVVVOWUNKMlRTUE9ITUhBUjU0VEJETFdXUElaQ0Q0UkwzR7JhY2N0QnVuZGxlUmV2aXNpb24BsWVuY0FjY3RCdW5kbGVIYXNoxCAxAujlppppoT+KYBEoHY76Uq1v9rPcf4lbhFW4v+p14alpc1ByaW1hcnnCpG1vZGUBpHByZXbAqHJldmlzaW9uAQ==",
[]accountCan{
{"GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE", "hKFlxKWW2fR2MgA3VlejatZlBJ057v0+YFBdSQGuh3V1LkA4XDCYx3fC8+N9FZN7HOlQd1eBYO4gT/7wgg3fk9k2K9BVHCbXAJeKiv8DMV9SbJ7ZWQnXG9BAT1ZQApv4BBVMWTvY9uhao07IU5amC58KC6xlaRZg1BwkRuk4H83ahdvMXWLpzFMlwobtjoD0tiG7u6YGAw3Bpr2N5JUDxdjUAHbdCUSl0BOjZ2VuA6FuxBgk5XMAnRJBS1C7J7g82tAmH85+b2JMWgehdgE="},
{"GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G", "hKFlxKUSt6xJveOZtTct6TMN9kBo5/1qmoFIHtpCLJzkYrnu0ciOODqDtwnUmHlbYfT7GjRdyN2jo7xfdw6D2jtTKAyCCGUbehlXLFP2x0+wNo6oiUCCiP5/uk3SuR/QfFtrh1aussrZgx2GzW76ZLEXlVKm6tQ+B+/ARQHeQobUaB2jpTrB9HwsO/ZhEllEyWDtRMtaOzlBl3yXPo9e8E41Wq3mFXQRbMWjZ2VuA6FuxBiKSBv9DEiFTGGcAGUcimPbFWeIFm0n99GhdgE="},
}},
}
func TestCanningFacility(t *testing.T) {
if os.Getenv("KEYBASE_CANNING_FACILITY") != "1" {
t.Skip("this is not really a test but a tool for creating cans for tests")
}
a1 := stellar1.AccountID("GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE")
s1 := stellar1.SecretKey("SBV2JNAJA65LMCZ5HYDXAYWRQK25CD2DZB25YZVNX3OLPALN2EVKO2V2")
a2 := stellar1.AccountID("GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G")
s2 := stellar1.SecretKey("SDZJUFUKEQQU77DCF2VH72XB4S427EGKF6BSOSPUIKLTBCTCMXQQ7JU5")
bundleLocal := stellar1.Bundle{
Revision: 1,
Prev: nil,
Accounts: []stellar1.BundleEntry{{
AccountID: a1,
Mode: stellar1.AccountMode_USER,
IsPrimary: true,
Name: "p1",
AcctBundleRevision: 1,
EncAcctBundleHash: nil,
}, {
AccountID: a2,
Mode: stellar1.AccountMode_USER,
IsPrimary: false,
Name: "p2",
AcctBundleRevision: 1,
EncAcctBundleHash: nil,
}},
AccountBundles: map[stellar1.AccountID]stellar1.AccountBundle{
a1: {
AccountID: a1,
Signers: []stellar1.SecretKey{s1},
},
a2: {
AccountID: a2,
Signers: []stellar1.SecretKey{s2},
},
},
}
ring := newPukRing()
pukSeed, pukGen := ring.makeGen(t, 3)
boxed, err := BoxAndEncode(&bundleLocal, pukGen, pukSeed)
require.NoError(t, err)
t.Logf(spew.Sdump(boxed))
t.Logf("puk seed: %v", toB64(pukSeed[:]))
t.Logf("puk gen: %v", pukGen)
t.Logf("nonce: %v", toB64(boxed.EncParent.N[:]))
t.Logf("enc E: %v", toB64(boxed.EncParent.E))
t.Logf("\nEncParentB64: %v", boxed.EncParentB64)
t.Logf("VisParentB64: %v\n", boxed.VisParentB64)
for acctID, encodedAcct := range boxed.AcctBundles {
t.Logf("account: %v, EncB64: %v", acctID, encodedAcct.EncB64)
}
cipherpack, err := base64.StdEncoding.DecodeString(boxed.EncParentB64)
require.NoError(t, err)
encHash := sha256.Sum256(cipherpack)
t.Logf("actual own hash: %v", toB64(encHash[:]))
// decode it back again and take a look,
// especially for generating expected errors
benc := BundleEncoded{
EncParent: boxed.EncParentB64,
VisParent: boxed.VisParentB64,
AcctBundles: make(map[stellar1.AccountID]string),
}
for acctID, encodedAcct := range boxed.AcctBundles {
benc.AcctBundles[acctID] = encodedAcct.EncB64
}
m := libkb.NewMetaContext(context.Background(), nil)
decodedBundle, _, _, _, err := DecodeAndUnbox(m, ring, benc)
t.Logf("decoded: %+v, err: %v", decodedBundle, err)
}
func toB64(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
func TestCanned(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
// valid can
c := cans[0]
bundle, _, _, _, err := DecodeAndUnbox(m, c.ring(t), c.toBundleEncodedB64())
require.NoError(t, err)
require.Equal(t, "yJwcMuMxwpFuxt0A+7zYT2iev/1wVB5OeNdJzDSlBDo=", toB64(bundle.OwnHash))
// hashes match for the first account
a1BundleHash := bundle.AccountBundles["GAWZ7HVPKRGCH2KP6475XV6HA2CAF44MXWWE5RKV4LMMGB6FNNSEPNPE"].OwnHash
require.Equal(t, "BgkhqzuIynMJrhIeuSOPTCoS5QutvwDXZr7fHVuCmpg=", toB64(a1BundleHash))
require.Equal(t, "BgkhqzuIynMJrhIeuSOPTCoS5QutvwDXZr7fHVuCmpg=", toB64(bundle.Accounts[0].EncAcctBundleHash))
// hashes match for the second account
a2BundleHash := bundle.AccountBundles["GBPLLHOKPRFSCE6FP6D37AUUNYCJ2TSPOHMHAR54TBDLWWPIZCD4RL3G"].OwnHash
require.Equal(t, "mwbjopTOaCgbwtSWhMyaKvJdVtaYnFeTMZExDqzxJ6U=", toB64(a2BundleHash))
require.Equal(t, "mwbjopTOaCgbwtSWhMyaKvJdVtaYnFeTMZExDqzxJ6U=", toB64(bundle.Accounts[1].EncAcctBundleHash))
}
func TestCantOpenWithTheWrongKey(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
c := cans[1]
pukSeed := c.puk(t)
pukGen := c.gen()
ring := &pukRing{
map[keybase1.PerUserKeyGeneration]libkb.PerUserKeySeed{
pukGen: pukSeed,
},
}
_, _, _, _, err := DecodeAndUnbox(m, ring, c.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "secret box open failed")
}
func TestCannedUnboxInvariantViolationMultiplePrimary(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
c := cans[2]
_, _, _, _, err := DecodeAndUnbox(m, c.ring(t), c.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "multiple primary accounts")
}
func TestCannedCryptV1(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
c := cans[3]
_, _, _, _, err := DecodeAndUnbox(m, c.ring(t), c.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "stellar secret bundle encryption version 1 has been retired")
}
func TestCannedBundleHashMismatch(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
c := cans[4]
_, _, _, _, err := DecodeAndUnbox(m, c.ring(t), c.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "corrupted bundle: visible hash mismatch")
}
func TestCannedAccountHashMismatch(t *testing.T) {
m := libkb.NewMetaContext(context.Background(), nil)
c := cans[5]
_, _, _, _, err := DecodeAndUnbox(m, c.ring(t), c.toBundleEncodedB64())
require.Error(t, err)
require.Contains(t, err.Error(), "account bundle and parent entry hash mismatch")
}
// TestBoxAccountBundle checks boxing an account bundle and that DecodeAndUnbox
// gets back to the initial bundle.
func TestBoxAccountBundle(t *testing.T) {
b, err := NewInitial("abc")
require.NoError(t, err)
require.NotNil(t, b)
ring := newPukRing()
seed, gen := ring.makeGen(t, 1)
boxed, err := BoxAndEncode(b, gen, seed)
require.NoError(t, err)
require.NotNil(t, boxed, "BoxAndEncode() should return something")
require.Equal(t, stellar1.BundleVersion_V2, boxed.FormatVersionParent, "should be V2")
require.NotEmpty(t, boxed.VisParentB64)
require.NotEmpty(t, boxed.EncParentB64)
require.Equal(t, 2, boxed.EncParent.V)
require.NotEmpty(t, boxed.EncParent.E)
require.NotZero(t, boxed.EncParent.N)
require.Equal(t, gen, boxed.EncParent.Gen)
require.Len(t, boxed.AcctBundles, 1)
m := libkb.NewMetaContext(context.Background(), nil)
bundle, version, pukGen, accountGens, err := DecodeAndUnbox(m, ring, boxed.toBundleEncodedB64())
require.NoError(t, err)
require.NotNil(t, bundle)
require.Equal(t, stellar1.BundleVersion_V2, version)
require.Len(t, bundle.Accounts, 1)
require.Equal(t, stellar1.AccountMode_USER, bundle.Accounts[0].Mode)
require.Equal(t, pukGen, keybase1.PerUserKeyGeneration(1))
acctBundle, ok := bundle.AccountBundles[bundle.Accounts[0].AccountID]
require.True(t, ok)
acctBundleOriginal, ok := b.AccountBundles[bundle.Accounts[0].AccountID]
require.True(t, ok)
require.Equal(t, acctBundle.Signers[0], acctBundleOriginal.Signers[0])
for _, acct := range bundle.Accounts {
require.Equal(t, keybase1.PerUserKeyGeneration(1), accountGens[acct.AccountID])
}
}
// pukRing is a convenience type for puks in these tests.
type pukRing struct {
puks map[keybase1.PerUserKeyGeneration]libkb.PerUserKeySeed
}
func newPukRing() *pukRing {
return &pukRing{puks: make(map[keybase1.PerUserKeyGeneration]libkb.PerUserKeySeed)}
}
func (p *pukRing) makeGen(t *testing.T, gen int) (libkb.PerUserKeySeed, keybase1.PerUserKeyGeneration) {
puk, err := libkb.GeneratePerUserKeySeed()
require.NoError(t, err)
pgen := keybase1.PerUserKeyGeneration(gen)
p.puks[pgen] = puk
return puk, pgen
}
// SeedByGeneration makes pukRing implement PukFinder.
func (p *pukRing) SeedByGeneration(m libkb.MetaContext, generation keybase1.PerUserKeyGeneration) (libkb.PerUserKeySeed, error) {
puk, ok := p.puks[generation]
if ok {
return puk, nil
}
return libkb.PerUserKeySeed{}, errors.New("not found")
}
| [
"\"KEYBASE_CANNING_FACILITY\""
] | [] | [
"KEYBASE_CANNING_FACILITY"
] | [] | ["KEYBASE_CANNING_FACILITY"] | go | 1 | 0 | |
pliers/tests/extractors/api/test_clarifai_extractors.py | from os.path import join
from ...utils import get_test_data_path
from pliers.extractors import ClarifaiAPIExtractor
from pliers.stimuli import ImageStim
from pliers.extractors.base import merge_results
import numpy as np
import pytest
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
result = ClarifaiAPIExtractor().transform(stim).to_df()
assert result['apple'][0] > 0.5
assert result.ix[:, 5][0] > 0.0
result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()
assert result.shape == (1, 9)
result = ClarifaiAPIExtractor(
min_value=0.9).transform(stim).to_df(object_id=False)
assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])
concepts = ['cat', 'dog']
result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)
result = result.to_df()
assert result.shape == (1, 6)
assert 'cat' in result.columns and 'dog' in result.columns
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor_batch():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
stim2 = ImageStim(join(image_dir, 'obama.jpg'))
ext = ClarifaiAPIExtractor()
results = ext.transform([stim, stim2])
results = merge_results(results)
assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \
results['ClarifaiAPIExtractor#apple'][1] > 0.5
# This takes too long to execute
# video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))
# results = ExtractorResult.merge_stims(ext.transform(video))
# assert 'Lego' in results.columns and 'robot' in results.columns
| [] | [] | [] | [] | [] | python | 0 | 0 | |
main.go | package main
import (
"encoding/base64"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
_ "net/http/pprof"
"github.com/anacrolix/torrent"
"github.com/asdine/storm"
Engine "github.com/deranjer/goTorrent/engine"
Settings "github.com/deranjer/goTorrent/settings"
Storage "github.com/deranjer/goTorrent/storage"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/mmcdole/gofeed"
"github.com/sirupsen/logrus"
)
var (
//Logger does logging for the entire project
Logger = logrus.New()
//Authenticated stores the value of the result of the client that connects to the server
Authenticated = false
APP_ID = os.Getenv("APP_ID")
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
func serveHome(w http.ResponseWriter, r *http.Request) {
s1, _ := template.ParseFiles("templates/home.tmpl")
s1.ExecuteTemplate(w, "base", map[string]string{"APP_ID": APP_ID})
}
func handleAuthentication(conn *websocket.Conn, db *storm.DB) {
msg := Engine.Message{}
err := conn.ReadJSON(&msg)
conn.WriteJSON(msg) //TODO just for testing, remove
payloadData, ok := msg.Payload.(map[string]interface{})
clientAuthToken, tokenOk := payloadData["ClientAuthString"].(string)
fmt.Println("ClientAuthToken:", clientAuthToken, "TokenOkay", tokenOk, "PayloadData", payloadData, "PayloadData Okay?", ok)
if ok == false || tokenOk == false {
authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Message Payload in AuthRequest was malformed, closing connection"}
conn.WriteJSON(authFail)
conn.Close()
return
}
if err != nil {
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": clientAuthToken}).Error("Unable to read authentication message")
}
fmt.Println("Authstring", clientAuthToken)
signingKeyStruct := Storage.FetchJWTTokens(db)
singingKey := signingKeyStruct.SigningKey
token, err := jwt.Parse(clientAuthToken, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return singingKey, nil
})
if err != nil {
authFail := Engine.AuthResponse{MessageType: "authResponse", Payload: "Parsing of Token failed, ensure you have the correct token! Closing Connection"}
conn.WriteJSON(authFail)
Logger.WithFields(logrus.Fields{"error": err, "SuppliedToken": token}).Error("Unable to parse token!")
conn.Close()
return
}
if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
authTrue := Engine.AuthResponse{MessageType: "authResponse", Payload: "Authentication Verified, proceed with commands."}
conn.WriteJSON(authTrue)
fmt.Println("Claims", claims["ClientName"], claims["Issuer"])
Authenticated = true
} else {
Logger.WithFields(logrus.Fields{"error": err}).Error("Authentication Error occured, cannot complete!")
}
}
func main() {
Engine.Logger = Logger //Injecting the logger into all the packages
Storage.Logger = Logger
Settings.Logger = Logger
var torrentQueues = Storage.TorrentQueues{}
Config := Settings.FullClientSettingsNew() //grabbing from settings.go
Engine.Config = Config
if Config.LoggingOutput == "file" {
_, err := os.Stat("logs")
if os.IsNotExist(err) {
err := os.Mkdir("logs", 0755)
if err != nil {
fmt.Println("Unable to create 'log' folder for logging.... please check permissions.. forcing output to stdout", err)
Logger.Out = os.Stdout
}
} else {
os.Remove("logs/server.log") //cleanup the old log on every restart
file, err := os.OpenFile("logs/server.log", os.O_CREATE|os.O_WRONLY, 0755) //creating the log file
//defer file.Close() //TODO.. since we write to this constantly how does close work?
if err != nil {
fmt.Println("Unable to create file for logging.... please check permissions.. forcing output to stdout")
Logger.Out = os.Stdout
}
fmt.Println("Logging to file logs/server.log")
Logger.Out = file
}
} else {
Logger.Out = os.Stdout
}
Logger.SetLevel(Config.LoggingLevel)
httpAddr := Config.HTTPAddr
os.MkdirAll(Config.TFileUploadFolder, 0755) //creating a directory to store uploaded torrent files
os.MkdirAll(Config.TorrentWatchFolder, 0755) //creating a directory to watch for added .torrent files
Logger.WithFields(logrus.Fields{"Config": Config}).Info("Torrent Client Config has been generated...")
tclient, err := torrent.NewClient(&Config.TorrentConfig) //pulling out the torrent specific config to use
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatalf("Error creating torrent client: %s")
}
//fmt.Printf("%+v\n", Config.TorrentConfig)
db, err := storm.Open("storage.db") //initializing the boltDB store that contains all the added torrents
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Error opening/creating storage.db")
} else {
Logger.WithFields(logrus.Fields{"error": err}).Info("Opening or creating storage.db...")
}
defer db.Close() //defering closing the database until the program closes
err = db.One("ID", 5, &torrentQueues)
if err != nil { //Create the torrent que database
Logger.WithFields(logrus.Fields{"error": err}).Info("No Queue database found, assuming first run, creating database")
torrentQueues.ID = 5
db.Save(&torrentQueues)
}
tokens := Storage.IssuedTokensList{} //if first run setting up the authentication tokens
var signingKey []byte
err = db.One("ID", 3, &tokens)
if err != nil {
Logger.WithFields(logrus.Fields{"RSSFeedStore": tokens, "error": err}).Info("No Tokens database found, assuming first run, generating token...")
tokens.ID = 3 //creating the initial store
claims := Settings.GoTorrentClaims{
"goTorrentWebUI",
jwt.StandardClaims{
Issuer: "goTorrentServer",
},
}
signingKey = Settings.GenerateSigningKey() //Running this will invalidate any certs you already issued!!
authString := Settings.GenerateToken(claims, signingKey)
tokens.SigningKey = signingKey
tokens.FirstToken = authString
tokens.TokenNames = append(tokens.TokenNames, Storage.SingleToken{"firstClient"})
err := ioutil.WriteFile("clientAuth.txt", []byte(authString), 0755)
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Warn("Unable to write client auth to file..")
}
db.Save(&tokens) //Writing all of that to the database
} else { //Already have a signing key so pulling that signing key out of the database to sign any key requests
tokens := Storage.FetchJWTTokens(db)
signingKey = tokens.SigningKey
}
oldConfig, err := Storage.FetchConfig(db)
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Info("Assuming first run as no config found in database, client config being generated")
Settings.GenerateClientConfigFile(Config, tokens.FirstToken) //if first run generate the client config file
} else {
if reflect.DeepEqual(oldConfig.ClientConnectSettings, Config.ClientConnectSettings) {
Logger.WithFields(logrus.Fields{"error": err}).Info("Configs are the same, not regenerating client config")
} else {
Logger.WithFields(logrus.Fields{"error": err}).Info("Config has changed, re-writting config")
Settings.GenerateClientConfigFile(Config, tokens.FirstToken)
}
}
Storage.SaveConfig(db, Config) //Save the config to the database
cronEngine := Engine.InitializeCronEngine() //Starting the cron engine for tasks
Logger.Debug("Cron Engine Initialized...")
torrentLocalStorage := Storage.TorrentLocal{} //creating a new struct that stores all of our local storage info
var RunningTorrentArray = []Engine.ClientDB{} //this stores ALL of the torrents that are running, used for client update pushes combines Local Storage and Running tclient info
var PreviousTorrentArray = []Engine.ClientDB{}
TorrentLocalArray := Storage.FetchAllStoredTorrents(db) //pulling in all the already added torrents - this is an array of ALL of the local storage torrents, they will be added back in via hash
if TorrentLocalArray != nil { //the first creation of the running torrent array //since we are adding all of them in we use a coroutine... just allows the web ui to load then it will load in the torrents
Engine.CreateInitialTorrentArray(tclient, TorrentLocalArray, db, Config) //adding all of the stored torrents into the torrent client
//TODO add GO to this
} else {
Logger.Info("Database is empty, no torrents loaded")
}
Engine.CheckTorrentWatchFolder(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) //Every 5 minutes the engine will check the specified folder for new .torrent files
Engine.RefreshRSSCron(cronEngine, db, tclient, torrentLocalStorage, Config, torrentQueues) // Refresing the RSS feeds on an hourly basis to add torrents that show up in the RSS feed
router := mux.NewRouter() //setting up the handler for the web backend
router.HandleFunc("/", serveHome) //Serving the main page for our SPA
router.PathPrefix("/static/").Handler(http.FileServer(http.Dir("public")))
http.Handle("/", router)
router.HandleFunc("/api", func(w http.ResponseWriter, r *http.Request) { //TODO, remove this
TorrentLocalArray = Storage.FetchAllStoredTorrents(db)
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well
var torrentlistArray = new(Engine.TorrentList)
torrentlistArray.MessageType = "torrentList" //setting the type of message
torrentlistArray.ClientDBstruct = RunningTorrentArray //the full JSON that includes the number of torrents as the root
torrentlistArray.Totaltorrents = len(RunningTorrentArray)
torrentlistArrayJSON, _ := json.Marshal(torrentlistArray)
w.Header().Set("Content-Type", "application/json")
w.Write(torrentlistArrayJSON)
})
router.HandleFunc("/websocket", func(w http.ResponseWriter, r *http.Request) { //websocket is the main data pipe to the frontend
conn, err := upgrader.Upgrade(w, r, nil)
fmt.Println("Websocket connection established, awaiting authentication")
connResponse := Engine.ServerPushMessage{MessageType: "connectResponse", MessageLevel: "Message", Payload: "Websocket Connection Established, awaiting Authentication"}
conn.WriteJSON(&connResponse)
defer conn.Close() //defer closing the websocket until done.
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to create websocket!")
return
}
if Authenticated != true {
handleAuthentication(conn, db)
} else { //If we are authenticated inject the connection into the other packages
connResponse := Engine.ServerPushMessage{MessageType: "authResponse", MessageLevel: "Message", Payload: "Already Authenticated... Awaiting Commands"}
conn.WriteJSON(&connResponse)
Logger.Info("Authenticated, websocket connection available!")
}
Engine.Conn = conn
Storage.Conn = conn
MessageLoop: //Tagging this so we can continue out of it with any errors we encounter that are failing
for {
runningTorrents := tclient.Torrents() //getting running torrents here since multiple cases ask for the running torrents
msg := Engine.Message{}
err := conn.ReadJSON(&msg)
if err != nil {
Logger.WithFields(logrus.Fields{"error": err, "message": msg}).Error("Unable to read JSON client message")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Malformed JSON request made to server.. ignoring"}, conn)
break MessageLoop
}
var payloadData map[string]interface{}
if msg.Payload != nil && msg.Payload != "" {
payloadData = msg.Payload.(map[string]interface{})
}
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Message From Client")
switch msg.MessageType { //first handling data requests
case "authRequest":
if Authenticated {
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client already authenticated... skipping authentication method")
} else {
handleAuthentication(conn, db)
}
case "newAuthToken":
claims := Settings.GoTorrentClaims{
payloadData["ClientName"].(string),
jwt.StandardClaims{
Issuer: "goTorrentServer",
},
}
Logger.WithFields(logrus.Fields{"clientName": payloadData["ClientName"].(string)}).Info("New Auth Token creation request")
fmt.Println("Signing Key", signingKey)
token := Settings.GenerateToken(claims, signingKey)
tokenReturn := Settings.TokenReturn{MessageType: "TokenReturn", TokenReturn: token}
tokensDB := Storage.FetchJWTTokens(db)
tokensDB.TokenNames = append(tokens.TokenNames, Storage.SingleToken{payloadData["ClientName"].(string)})
db.Update(&tokensDB) //adding the new token client name to the database
conn.WriteJSON(tokenReturn)
case "torrentListRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Debug("Client Requested TorrentList Update")
go func() { //running updates in separate thread so can still accept commands
TorrentLocalArray = Storage.FetchAllStoredTorrents(db) //Required to re-read th database since we write to the DB and this will pull the changes from it
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db) //Updates the RunningTorrentArray with the current client data as well
PreviousTorrentArray = RunningTorrentArray
torrentlistArray := Engine.TorrentList{MessageType: "torrentList", ClientDBstruct: RunningTorrentArray, Totaltorrents: len(RunningTorrentArray)}
Logger.WithFields(logrus.Fields{"torrentList": torrentlistArray, "previousTorrentList": PreviousTorrentArray}).Debug("Previous and Current Torrent Lists for sending to client")
conn.WriteJSON(torrentlistArray)
}()
case "torrentFileListRequest": //client requested a filelist update
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested FileList Update")
fileListArrayRequest := payloadData["FileListHash"].(string)
FileListArray := Engine.CreateFileListArray(tclient, fileListArrayRequest, db, Config)
conn.WriteJSON(FileListArray) //writing the JSON to the client
case "torrentPeerListRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested PeerList Update")
peerListArrayRequest := payloadData["PeerListHash"].(string)
torrentPeerList := Engine.CreatePeerListArray(tclient, peerListArrayRequest)
conn.WriteJSON(torrentPeerList)
case "fetchTorrentsByLabel":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Torrents by Label")
label := payloadData["Label"].(string)
torrentsByLabel := Storage.FetchTorrentsByLabel(db, label)
RunningTorrentArray = Engine.CreateRunningTorrentArray(tclient, TorrentLocalArray, PreviousTorrentArray, Config, db)
labelRunningArray := []Engine.ClientDB{}
for _, torrent := range RunningTorrentArray { //Ranging over the running torrents and if the hashes match we have torrents by label
for _, label := range torrentsByLabel {
if torrent.TorrentHashString == label.Hash {
labelRunningArray = append(labelRunningArray, torrent)
}
}
}
conn.WriteJSON(labelRunningArray)
case "changeStorageValue":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Storage Location Update")
newStorageLocation := payloadData["StorageValue"].(string)
hashes := payloadData["ChangeStorageHashes"].([]interface{})
for _, singleHash := range hashes {
singleTorrent := Storage.FetchTorrentFromStorage(db, singleHash.(string))
oldPath := singleTorrent.StoragePath
newStorageLocationAbs, err := filepath.Abs(filepath.ToSlash(newStorageLocation))
if err != nil {
Logger.WithFields(logrus.Fields{"patherr": err, "path": newStorageLocation}).Warn("Unable to create absolute path for storage location, using default")
singleTorrent.StoragePath = Config.TorrentConfig.DataDir
} else {
singleTorrent.StoragePath = newStorageLocationAbs
}
Storage.UpdateStorageTick(db, singleTorrent) //push torrent to storage
if singleTorrent.TorrentMoved == true { //If torrent has already been moved and I change path then move it again... TODO, does this work with symlinks?
Logger.WithFields(logrus.Fields{"message": msg}).Info("Change Storage Value called")
Engine.MoveAndLeaveSymlink(Config, singleHash.(string), db, true, oldPath)
}
}
case "settingsFileRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested Settings File")
clientSettingsFile := Engine.SettingsFile{MessageType: "settingsFile", Config: Config}
conn.WriteJSON(clientSettingsFile)
case "rssFeedRequest":
Logger.WithFields(logrus.Fields{"message": msg}).Info("Client Requested RSS Update")
RSSList := Storage.FetchRSSFeeds(db)
RSSJSONFeed := Engine.RSSJSONList{MessageType: "rssList", TotalRSSFeeds: len(RSSList.RSSFeeds)}
RSSsingleFeed := Engine.RSSFeedsNames{}
for _, singleFeed := range RSSList.RSSFeeds {
RSSsingleFeed.RSSName = singleFeed.Name
RSSsingleFeed.RSSFeedURL = singleFeed.URL
RSSJSONFeed.RSSFeeds = append(RSSJSONFeed.RSSFeeds, RSSsingleFeed)
}
conn.WriteJSON(RSSJSONFeed)
case "addRSSFeed":
newRSSFeed := payloadData["RSSURL"].(string)
Logger.WithFields(logrus.Fields{"message": newRSSFeed}).Info("Client Added RSS Feed")
fullRSSFeeds := Storage.FetchRSSFeeds(db)
Logger.WithFields(logrus.Fields{"RSSFeeds": fullRSSFeeds}).Info("Pulled Full RSS Feeds")
for _, singleFeed := range fullRSSFeeds.RSSFeeds {
if newRSSFeed == singleFeed.URL || newRSSFeed == "" {
Logger.WithFields(logrus.Fields{"RSSFeed": newRSSFeed}).Warn("Empty URL or Duplicate RSS URL to one already in database! Rejecting submission")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Empty URL or Duplicate RSS URL to one already in database!"}, conn)
continue MessageLoop
}
}
fp := gofeed.NewParser()
feed, err := fp.ParseURL(newRSSFeed)
if err != nil {
Logger.WithFields(logrus.Fields{"RSSFeed": newRSSFeed}).Warn("Unable to parse the URL as valid RSS.. cannot add RSS...")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to parse the URL as valid RSS.. cannot add RSS..."}, conn)
continue MessageLoop
}
Logger.WithFields(logrus.Fields{"RSSFeedTitle": feed.Title}).Info("Have feed from URL...")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "success", Payload: "Added RSS Feed"}, conn)
newRSSFeedFull := Storage.SingleRSSFeed{}
newRSSFeedFull.Name = feed.Title
newRSSFeedFull.URL = newRSSFeed
fullRSSFeeds.RSSFeeds = append(fullRSSFeeds.RSSFeeds, newRSSFeedFull) // add the new RSS feed to the stack
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Adding RSS feed..."}, conn)
Engine.ForceRSSRefresh(db, fullRSSFeeds)
//forcing an RSS refresh to fully populate all rss feeds
case "deleteRSSFeed":
deleteRSSFeed := payloadData["RSSURL"].(string)
Logger.WithFields(logrus.Fields{"message": deleteRSSFeed}).Info("Deleting RSS Feed")
Storage.DeleteRSSFeed(db, deleteRSSFeed)
fullRSSFeeds := Storage.FetchRSSFeeds(db)
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Deleting RSS feed..."}, conn)
Engine.ForceRSSRefresh(db, fullRSSFeeds)
case "rssTorrentsRequest":
RSSFeedURL := payloadData["RSSURL"].(string)
Logger.WithFields(logrus.Fields{"RSSFeed": RSSFeedURL}).Info("Requesting torrentList for feed..")
UpdatedRSSFeed := Engine.RefreshSingleRSSFeed(db, Storage.FetchSpecificRSSFeed(db, RSSFeedURL))
TorrentRSSList := Engine.SingleRSSFeedMessage{MessageType: "rssTorrentList", URL: RSSFeedURL, Name: UpdatedRSSFeed.Name, TotalTorrents: len(UpdatedRSSFeed.Torrents), Torrents: UpdatedRSSFeed.Torrents}
Logger.WithFields(logrus.Fields{"TorrentRSSList": TorrentRSSList}).Info("Returning Torrent list from RSSFeed to client")
conn.WriteJSON(TorrentRSSList)
case "magnetLinkSubmit": //if we detect a magnet link we will be adding a magnet torrent
storageValue, ok := payloadData["StorageValue"].(string)
if storageValue == "" || ok == false {
storageValue, err = filepath.Abs(filepath.ToSlash(Config.DefaultMoveFolder))
if err != nil {
Logger.WithFields(logrus.Fields{"err": err, "MagnetLink": Config.DefaultMoveFolder}).Error("Unable to add default Storage Path")
}
} else {
storageValue, err = filepath.Abs(filepath.ToSlash(storageValue))
if err != nil {
Logger.WithFields(logrus.Fields{"err": err, "MagnetLink": storageValue}).Error("Unable to add Storage Path")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add Storage path..."}, conn)
storageValue, _ = filepath.Abs(filepath.ToSlash(Config.DefaultMoveFolder))
}
}
labelValue, ok := payloadData["Label"].(string)
if labelValue == "" || ok == false {
labelValue = "None"
}
magnetLinks := payloadData["MagnetLinks"].([]interface{})
for _, magnetLink := range magnetLinks {
clientTorrent, err := tclient.AddMagnet(magnetLink.(string)) //reading the payload into the torrent client
if err != nil {
Logger.WithFields(logrus.Fields{"err": err, "MagnetLink": magnetLink}).Error("Unable to add magnetlink to client!")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add magnetlink to client!"}, conn)
continue MessageLoop //continue out of the loop entirely for this message since we hit an error
}
Logger.WithFields(logrus.Fields{"clientTorrent": clientTorrent, "magnetLink": magnetLink}).Info("Adding torrent to client!")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received MagnetLink"}, conn)
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Name: ": clientTorrent.Name()}).Info("Adding New torrent to active, pushing other torrent to queue")
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
}
}
}
go Engine.AddTorrent(clientTorrent, torrentLocalStorage, db, "magnet", "", storageValue, labelValue, Config) //starting the torrent and creating local DB entry
}
case "torrentFileSubmit":
base64encoded := payloadData["FileData"].(string)
fileName := payloadData["FileName"].(string)
storageValue, ok := payloadData["StorageValue"].(string)
if storageValue == "" || ok == false {
storageValue, err = filepath.Abs(filepath.ToSlash(Config.DefaultMoveFolder))
if err != nil {
Logger.WithFields(logrus.Fields{"err": err, "MagnetLink": Config.DefaultMoveFolder}).Error("Unable to add Storage Path")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add default Storage Path"}, conn)
} else {
storageValue, err = filepath.Abs(filepath.ToSlash(storageValue))
if err != nil {
Logger.WithFields(logrus.Fields{"err": err, "MagnetLink": storageValue}).Error("Unable to add Storage Path")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add Storage Path"}, conn)
storageValue, _ = filepath.Abs(filepath.ToSlash(Config.DefaultMoveFolder))
}
}
}
labelValue, ok := payloadData["Label"].(string)
if labelValue == "" || ok == false {
labelValue = "None"
}
base64file := strings.Split(base64encoded, ",") //Mozilla and Chrome have different payloads, but both start the file after the comma
file, err := base64.StdEncoding.DecodeString(base64file[1]) //grabbing the second half of the string after the split
if err != nil {
Logger.WithFields(logrus.Fields{"Error": err, "file": file}).Info("Unable to decode base64 string to file")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to decode base64 string to file"}, conn)
}
filePath := filepath.Join(Config.TFileUploadFolder, fileName)
filePathAbs, err := filepath.Abs(filePath) //creating a full filepath to store the .torrent files
err = ioutil.WriteFile(filePathAbs, file, 0755) //Dumping our received file into the filename
if err != nil {
Logger.WithFields(logrus.Fields{"filepath": filePathAbs, "file Name": fileName, "Error": err}).Error("Unable to write torrent data to file")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to write torrent data to file"}, conn)
}
clientTorrent, err := tclient.AddTorrentFromFile(filePathAbs)
if err != nil {
Logger.WithFields(logrus.Fields{"filepath": filePathAbs, "Error": err}).Error("Unable to add Torrent to torrent server")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "error", Payload: "Unable to add Torrent to torrent server"}, conn)
}
Logger.WithFields(logrus.Fields{"clienttorrent": clientTorrent.Name(), "filename": filePathAbs}).Info("Added torrent")
if len(torrentQueues.ActiveTorrents) >= Config.MaxActiveTorrents {
Logger.WithFields(logrus.Fields{"Name: ": clientTorrent.Name()}).Info("Adding New torrent to active, pushing other torrent to queue")
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
go Engine.AddTorrent(clientTorrent, torrentLocalStorage, db, "file", filePathAbs, storageValue, labelValue, Config)
}
}
}
case "stopTorrents":
torrentHashes := payloadData["TorrentHashes"].([]interface{})
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Stop Request"}, conn)
for _, singleTorrent := range tclient.Torrents() {
for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for stopping torrents")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.StopTorrent(singleTorrent, &oldTorrentInfo, db)
if len(torrentQueues.QueuedTorrents) > 1 {
addTorrent := torrentQueues.QueuedTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == addTorrent[0] {
Engine.AddTorrentToActive(&torrentLocalStorage, singleTorrent, db)
}
}
}
}
}
}
case "deleteTorrents":
torrentHashes := payloadData["TorrentHashes"].([]interface{})
withData := payloadData["WithData"].(bool) //Checking if torrents should be deleted with data
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Delete Request"}, conn)
Logger.WithFields(logrus.Fields{"deleteTorrentsPayload": msg.Payload, "torrentlist": msg.Payload, "deleteWithData?": withData}).Info("message for deleting torrents")
for _, singleTorrent := range runningTorrents {
for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
torrentQueues = Storage.FetchQueues(db)
for index, activeTorrentHash := range torrentQueues.ActiveTorrents { //If torrent is in the active slice, pull it
if singleTorrent.InfoHash().String() == activeTorrentHash {
singleTorrent.SetMaxEstablishedConns(0)
torrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:index], torrentQueues.ActiveTorrents[index+1:]...)
}
}
for index, queuedTorrentHash := range torrentQueues.QueuedTorrents { //If torrent is in the queued slice, pull it
if singleTorrent.InfoHash().String() == queuedTorrentHash {
torrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:index], torrentQueues.QueuedTorrents[index+1:]...)
}
}
Logger.WithFields(logrus.Fields{"selection": singleSelection}).Info("Matched for deleting torrents")
if withData {
oldTorrentInfo.TorrentStatus = "DroppedData" //Will be cleaned up the next engine loop since deleting a torrent mid loop can cause issues
} else {
oldTorrentInfo.TorrentStatus = "Dropped"
}
Storage.UpdateStorageTick(db, oldTorrentInfo)
Storage.UpdateQueues(db, torrentQueues)
}
}
}
case "startTorrents":
torrentHashes := payloadData["TorrentHashes"].([]interface{})
Logger.WithFields(logrus.Fields{"selection": msg.Payload}).Info("Matched for starting torrents")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Start Request"}, conn)
for _, singleTorrent := range runningTorrents {
for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Info("Found matching torrent to start")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.AddTorrentToActive(&oldTorrentInfo, singleTorrent, db)
Logger.WithFields(logrus.Fields{"Torrent": oldTorrentInfo.TorrentName}).Info("Changing database to torrent running with 80 max connections")
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
}
torrentQueues = Storage.FetchQueues(db)
if len(torrentQueues.ActiveTorrents) > Config.MaxActiveTorrents { //Since we are starting a new torrent stop the first torrent in the que if running is full
//removeTorrent := torrentQueues.ActiveTorrents[len(torrentQueues.ActiveTorrents)-1]
removeTorrent := torrentQueues.ActiveTorrents[:1]
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == removeTorrent[0] {
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
Engine.RemoveTorrentFromActive(&oldTorrentInfo, singleTorrent, db)
Storage.UpdateStorageTick(db, oldTorrentInfo)
}
}
}
}
}
case "forceUploadTorrents": //TODO allow force to override total limit of queued torrents?
torrentHashes := payloadData["TorrentHashes"].([]interface{})
Logger.WithFields(logrus.Fields{"selection": msg.Payload}).Info("Matched for force Uploading Torrents")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Force Start Request"}, conn)
for _, singleTorrent := range runningTorrents {
for _, singleSelection := range torrentHashes {
if singleTorrent.InfoHash().String() == singleSelection {
Logger.WithFields(logrus.Fields{"infoHash": singleTorrent.InfoHash().String()}).Debug("Found matching torrent to force start")
oldTorrentInfo := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())
oldTorrentInfo.TorrentUploadLimit = false // no upload limit for this torrent
oldTorrentInfo.TorrentStatus = "Running"
oldTorrentInfo.MaxConnections = 80
Logger.WithFields(logrus.Fields{"NewMax": oldTorrentInfo.MaxConnections, "Torrent": oldTorrentInfo.TorrentName}).Info("Setting max connection from zero to 80")
Storage.UpdateStorageTick(db, oldTorrentInfo) //Updating the torrent status
}
}
}
case "setFilePriority": //TODO disable if the file is already at 100%?
priorityRequested := payloadData["FilePriority"].(string)
torrentHash := payloadData["TorrentHash"].(string)
fileList := payloadData["FilePaths"].([]interface{})
Logger.WithFields(logrus.Fields{"selection": torrentHash}).Info("Matched for setting file priority")
Engine.CreateServerPushMessage(Engine.ServerPushMessage{MessageType: "serverPushMessage", MessageLevel: "info", Payload: "Received Set Priority Request"}, conn)
Logger.WithFields(logrus.Fields{"filelist": fileList}).Debug("Full filelist for setting file priority")
for _, singleTorrent := range runningTorrents {
if singleTorrent.InfoHash().String() == torrentHash {
activeTorrentStruct := Storage.FetchTorrentFromStorage(db, torrentHash) //fetching all the data from the db to update certain fields then write it all back
Logger.WithFields(logrus.Fields{"singleTorrent": singleTorrent}).Debug("Matched for changing file prio torrents")
for _, file := range singleTorrent.Files() {
for _, sentFile := range fileList {
var priorityString string
if file.Path() == sentFile {
switch priorityRequested {
case "High":
priorityString = "High"
file.SetPriority(torrent.PiecePriorityHigh)
case "Normal":
priorityString = "Normal"
file.SetPriority(torrent.PiecePriorityNormal)
case "Cancel":
priorityString = "Cancel"
file.SetPriority(torrent.PiecePriorityNone)
default:
priorityString = "Normal"
file.SetPriority(torrent.PiecePriorityNormal)
}
for i, specificFile := range activeTorrentStruct.TorrentFilePriority { //searching for that specific file
if specificFile.TorrentFilePath == file.DisplayPath() {
activeTorrentStruct.TorrentFilePriority[i].TorrentFilePriority = priorityString //writing just that field to the current struct
}
}
Logger.WithFields(logrus.Fields{"singleTorrent": file.DisplayPath()}).Debug("Setting priority for ", priorityString)
Storage.UpdateStorageTick(db, activeTorrentStruct) //re-writting essentially that entire struct right back into the database
}
}
}
}
}
default:
//conn.Close()
Logger.WithFields(logrus.Fields{"message": msg}).Info("Unrecognized Message from client... ignoring")
return
}
}
})
if Config.UseProxy {
err := http.ListenAndServe(httpAddr, handlers.ProxyHeaders(router))
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server!")
}
} else {
err := http.ListenAndServe(httpAddr, nil) //Can't send proxy headers if not used since that can be a security issue
if err != nil {
Logger.WithFields(logrus.Fields{"error": err}).Fatal("Unable to listen on the http Server! (Maybe wrong IP in config, port already in use?) (Config: Not using proxy, see error for more details)")
}
}
}
| [
"\"APP_ID\""
] | [] | [
"APP_ID"
] | [] | ["APP_ID"] | go | 1 | 0 | |
service/s3/s3manager/upload_test.go | // +build go1.8
package s3manager_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
random "math/rand"
"net/http"
"net/http/httptest"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/internal/s3testing"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
var emptyList = []string{}
const respMsg = `<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadOutput>
<Location>mockValue</Location>
<Bucket>mockValue</Bucket>
<Key>mockValue</Key>
<ETag>mockValue</ETag>
</CompleteMultipartUploadOutput>`
func val(i interface{}, s string) interface{} {
v, err := awsutil.ValuesAtPath(i, s)
if err != nil || len(v) == 0 {
return nil
}
if _, ok := v[0].(io.Reader); ok {
return v[0]
}
if rv := reflect.ValueOf(v[0]); rv.Kind() == reflect.Ptr {
return rv.Elem().Interface()
}
return v[0]
}
func contains(src []string, s string) bool {
for _, v := range src {
if s == v {
return true
}
}
return false
}
func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) {
var m sync.Mutex
partNum := 0
names := []string{}
params := []interface{}{}
svc := s3.New(unit.Session)
svc.Handlers.Unmarshal.Clear()
svc.Handlers.UnmarshalMeta.Clear()
svc.Handlers.UnmarshalError.Clear()
svc.Handlers.Send.Clear()
svc.Handlers.Send.PushBack(func(r *request.Request) {
m.Lock()
defer m.Unlock()
if !contains(ignoreOps, r.Operation.Name) {
names = append(names, r.Operation.Name)
params = append(params, r.Params)
}
r.HTTPResponse = &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader([]byte(respMsg))),
}
switch data := r.Data.(type) {
case *s3.CreateMultipartUploadOutput:
data.UploadId = aws.String("UPLOAD-ID")
case *s3.UploadPartOutput:
partNum++
data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum))
case *s3.CompleteMultipartUploadOutput:
data.Location = aws.String("https://location")
data.VersionId = aws.String("VERSION-ID")
data.ETag = aws.String("ETAG")
case *s3.PutObjectOutput:
data.VersionId = aws.String("VERSION-ID")
data.ETag = aws.String("ETAG")
}
})
return svc, &names, ¶ms
}
func buflen(i interface{}) int {
r := i.(io.Reader)
b, _ := ioutil.ReadAll(r)
return len(b)
}
func TestUploadOrderMulti(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
u := s3manager.NewUploaderWithClient(s)
resp, err := u.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key - value"),
Body: bytes.NewReader(buf12MB),
ServerSideEncryption: aws.String("aws:kms"),
SSEKMSKeyId: aws.String("KmsId"),
ContentType: aws.String("content/type"),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
expected := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}
if !reflect.DeepEqual(expected, *ops) {
t.Errorf("Expected %v, but received %v", expected, *ops)
}
if e, a := `https://s3.mock-region.amazonaws.com/Bucket/Key%20-%20value`, resp.Location; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if "UPLOAD-ID" != resp.UploadID {
t.Errorf("Expected %q, but received %q", "UPLOAD-ID", resp.UploadID)
}
if "VERSION-ID" != *resp.VersionID {
t.Errorf("Expected %q, but received %q", "VERSION-ID", *resp.VersionID)
}
if "ETAG" != *resp.ETag {
t.Errorf("Expected %q, but received %q", "ETAG", *resp.ETag)
}
// Validate input values
// UploadPart
for i := 1; i < 5; i++ {
v := val((*args)[i], "UploadId")
if "UPLOAD-ID" != v {
t.Errorf("Expected %q, but received %q", "UPLOAD-ID", v)
}
}
// CompleteMultipartUpload
v := val((*args)[4], "UploadId")
if "UPLOAD-ID" != v {
t.Errorf("Expected %q, but received %q", "UPLOAD-ID", v)
}
for i := 0; i < 3; i++ {
e := val((*args)[4], fmt.Sprintf("MultipartUpload.Parts[%d].PartNumber", i))
if int64(i+1) != e.(int64) {
t.Errorf("Expected %d, but received %d", i+1, e)
}
}
vals := []string{
val((*args)[4], "MultipartUpload.Parts[0].ETag").(string),
val((*args)[4], "MultipartUpload.Parts[1].ETag").(string),
val((*args)[4], "MultipartUpload.Parts[2].ETag").(string),
}
for _, a := range vals {
if matched, err := regexp.MatchString(`^ETAG\d+$`, a); !matched || err != nil {
t.Errorf("Failed regexp expression `^ETAG\\d+$`")
}
}
// Custom headers
e := val((*args)[0], "ServerSideEncryption")
if e != "aws:kms" {
t.Errorf("Expected %q, but received %q", "aws:kms", e)
}
e = val((*args)[0], "SSEKMSKeyId")
if e != "KmsId" {
t.Errorf("Expected %q, but received %q", "KmsId", e)
}
e = val((*args)[0], "ContentType")
if e != "content/type" {
t.Errorf("Expected %q, but received %q", "content/type", e)
}
}
func TestUploadOrderMultiDifferentPartSize(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.PartSize = 1024 * 1024 * 7
u.Concurrency = 1
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf12MB),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
vals := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}
if !reflect.DeepEqual(vals, *ops) {
t.Errorf("Expected %v, but received %v", vals, *ops)
}
// Part lengths
if len := buflen(val((*args)[1], "Body")); 1024*1024*7 != len {
t.Errorf("Expected %d, but received %d", 1024*1024*7, len)
}
if len := buflen(val((*args)[2], "Body")); 1024*1024*5 != len {
t.Errorf("Expected %d, but received %d", 1024*1024*5, len)
}
}
func TestUploadIncreasePartSize(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
u.MaxUploadParts = 2
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf12MB),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if int64(s3manager.DefaultDownloadPartSize) != mgr.PartSize {
t.Errorf("Expected %d, but received %d", s3manager.DefaultDownloadPartSize, mgr.PartSize)
}
vals := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}
if !reflect.DeepEqual(vals, *ops) {
t.Errorf("Expected %v, but received %v", vals, *ops)
}
// Part lengths
if len := buflen(val((*args)[1], "Body")); (1024*1024*6)+1 != len {
t.Errorf("Expected %d, but received %d", (1024*1024*6)+1, len)
}
if len := buflen(val((*args)[2], "Body")); (1024*1024*6)-1 != len {
t.Errorf("Expected %d, but received %d", (1024*1024*6)-1, len)
}
}
func TestUploadFailIfPartSizeTooSmall(t *testing.T) {
mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) {
u.PartSize = 5
})
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf12MB),
})
if resp != nil {
t.Errorf("Expected response to be nil, but received %v", resp)
}
if err == nil {
t.Errorf("Expected error, but received nil")
}
aerr := err.(awserr.Error)
if e, a := "ConfigError", aerr.Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := "part size must be at least", aerr.Message(); !strings.Contains(a, e) {
t.Errorf("expect %v to be in %v", e, a)
}
}
func TestUploadOrderSingle(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key - value"),
Body: bytes.NewReader(buf2MB),
ServerSideEncryption: aws.String("aws:kms"),
SSEKMSKeyId: aws.String("KmsId"),
ContentType: aws.String("content/type"),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if vals := []string{"PutObject"}; !reflect.DeepEqual(vals, *ops) {
t.Errorf("Expected %v, but received %v", vals, *ops)
}
if e, a := `https://s3.mock-region.amazonaws.com/Bucket/Key%20-%20value`, resp.Location; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e := "VERSION-ID"; e != *resp.VersionID {
t.Errorf("Expected %q, but received %q", e, *resp.VersionID)
}
if "ETAG" != *resp.ETag {
t.Errorf("Expected %q, but received %q", "ETAG", *resp.ETag)
}
if len(resp.UploadID) > 0 {
t.Errorf("Expected empty string, but received %q", resp.UploadID)
}
if e, a := "aws:kms", val((*args)[0], "ServerSideEncryption").(string); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := "KmsId", val((*args)[0], "SSEKMSKeyId").(string); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := "content/type", val((*args)[0], "ContentType").(string); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
}
func TestUploadOrderSingleFailure(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse.StatusCode = 400
})
mgr := s3manager.NewUploaderWithClient(s)
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf2MB),
})
if err == nil {
t.Error("Expected error, but receievd nil")
}
if vals := []string{"PutObject"}; !reflect.DeepEqual(vals, *ops) {
t.Errorf("Expected %v, but received %v", vals, *ops)
}
if resp != nil {
t.Errorf("Expected response to be nil, but received %v", resp)
}
}
func TestUploadOrderZero(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(make([]byte, 0)),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if vals := []string{"PutObject"}; !reflect.DeepEqual(vals, *ops) {
t.Errorf("Expected %v, but received %v", vals, *ops)
}
if len(resp.Location) == 0 {
t.Error("Expected Location to not be empty")
}
if len(resp.UploadID) > 0 {
t.Errorf("Expected empty string, but received %q", resp.UploadID)
}
if e, a := 0, buflen(val((*args)[0], "Body")); e != a {
t.Errorf("Expected %d, but received %d", e, a)
}
}
func TestUploadOrderMultiFailure(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
s.Handlers.Send.PushBack(func(r *request.Request) {
switch t := r.Data.(type) {
case *s3.UploadPartOutput:
if *t.ETag == "ETAG2" {
r.HTTPResponse.StatusCode = 400
}
}
})
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf12MB),
})
if err == nil {
t.Error("Expected error, but receievd nil")
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
}
func TestUploadOrderMultiFailureOnComplete(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
s.Handlers.Send.PushBack(func(r *request.Request) {
switch r.Data.(type) {
case *s3.CompleteMultipartUploadOutput:
r.HTTPResponse.StatusCode = 400
}
})
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(buf12MB),
})
if err == nil {
t.Error("Expected error, but receievd nil")
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart",
"UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
}
func TestUploadOrderMultiFailureOnCreate(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
s.Handlers.Send.PushBack(func(r *request.Request) {
switch r.Data.(type) {
case *s3.CreateMultipartUploadOutput:
r.HTTPResponse.StatusCode = 400
}
})
mgr := s3manager.NewUploaderWithClient(s)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(make([]byte, 1024*1024*12)),
})
if err == nil {
t.Error("Expected error, but receievd nil")
}
if e, a := []string{"CreateMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
}
func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
s.Handlers.Send.PushBack(func(r *request.Request) {
switch data := r.Data.(type) {
case *s3.UploadPartOutput:
if *data.ETag == "ETAG2" {
r.HTTPResponse.StatusCode = 400
}
}
})
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
u.LeavePartsOnError = true
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(make([]byte, 1024*1024*12)),
})
if err == nil {
t.Error("Expected error, but receievd nil")
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
}
type failreader struct {
times int
failCount int
}
func (f *failreader) Read(b []byte) (int, error) {
f.failCount++
if f.failCount >= f.times {
return 0, fmt.Errorf("random failure")
}
return len(b), nil
}
func TestUploadOrderReadFail1(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &failreader{times: 1},
})
if e, a := "ReadRequestBody", err.(awserr.Error).Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := err.(awserr.Error).OrigErr().Error(), "random failure"; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := []string{}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
}
func TestUploadOrderReadFail2(t *testing.T) {
s, ops, _ := loggingSvc([]string{"UploadPart"})
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &failreader{times: 2},
})
if e, a := "MultipartUpload", err.(awserr.Error).Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := "ReadRequestBody", err.(awserr.Error).OrigErr().(awserr.Error).Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if errStr := err.(awserr.Error).OrigErr().Error(); !strings.Contains(errStr, "random failure") {
t.Errorf("Expected error to contains 'random failure', but was %q", errStr)
}
if e, a := []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
}
type sizedReader struct {
size int
cur int
err error
}
func (s *sizedReader) Read(p []byte) (n int, err error) {
if s.cur >= s.size {
if s.err == nil {
s.err = io.EOF
}
return 0, s.err
}
n = len(p)
s.cur += len(p)
if s.cur > s.size {
n -= s.cur - s.size
}
return n, err
}
func TestUploadOrderMultiBufferedReader(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &sizedReader{size: 1024 * 1024 * 12},
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
// Part lengths
parts := []int{
buflen(val((*args)[1], "Body")),
buflen(val((*args)[2], "Body")),
buflen(val((*args)[3], "Body")),
}
sort.Ints(parts)
if e, a := []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
}
func TestUploadOrderMultiBufferedReaderPartial(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &sizedReader{size: 1024 * 1024 * 12, err: io.EOF},
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
// Part lengths
parts := []int{
buflen(val((*args)[1], "Body")),
buflen(val((*args)[2], "Body")),
buflen(val((*args)[3], "Body")),
}
sort.Ints(parts)
if e, a := []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
}
// TestUploadOrderMultiBufferedReaderEOF tests the edge case where the
// file size is the same as part size.
func TestUploadOrderMultiBufferedReaderEOF(t *testing.T) {
s, ops, args := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &sizedReader{size: 1024 * 1024 * 10, err: io.EOF},
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if e, a := []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
// Part lengths
parts := []int{
buflen(val((*args)[1], "Body")),
buflen(val((*args)[2], "Body")),
}
sort.Ints(parts)
if e, a := []int{1024 * 1024 * 5, 1024 * 1024 * 5}, parts; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
}
func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) {
s, ops, _ := loggingSvc([]string{"UploadPart"})
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
u.MaxUploadParts = 2
})
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &sizedReader{size: 1024 * 1024 * 12},
})
if err == nil {
t.Error("Expected an error, but received nil")
}
if resp != nil {
t.Errorf("Expected nil, but receievd %v", resp)
}
if e, a := []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but receievd %v", e, a)
}
aerr := err.(awserr.Error)
if e, a := "MultipartUpload", aerr.Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if e, a := "TotalPartsExceeded", aerr.OrigErr().(awserr.Error).Code(); e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
if !strings.Contains(aerr.Error(), "configured MaxUploadParts (2)") {
t.Errorf("Expected error to contain 'configured MaxUploadParts (2)', but receievd %q", aerr.Error())
}
}
func TestUploadOrderSingleBufferedReader(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s)
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &sizedReader{size: 1024 * 1024 * 2},
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if e, a := []string{"PutObject"}, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, but received %v", e, a)
}
if len(resp.Location) == 0 {
t.Error("Expected a value in Location but received empty string")
}
if len(resp.UploadID) > 0 {
t.Errorf("Expected empty string but received %q", resp.UploadID)
}
}
func TestUploadZeroLenObject(t *testing.T) {
requestMade := false
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestMade = true
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{
Endpoint: aws.String(server.URL),
}))
resp, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: strings.NewReader(""),
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if !requestMade {
t.Error("Expected request to have been made, but was not")
}
if len(resp.Location) == 0 {
t.Error("Expected a non-empty string value for Location")
}
if len(resp.UploadID) > 0 {
t.Errorf("Expected empty string, but received %q", resp.UploadID)
}
}
func TestUploadInputS3PutObjectInputPairity(t *testing.T) {
matchings := compareStructType(reflect.TypeOf(s3.PutObjectInput{}),
reflect.TypeOf(s3manager.UploadInput{}))
aOnly := []string{}
bOnly := []string{}
for k, c := range matchings {
if c == 1 && k != "ContentLength" {
aOnly = append(aOnly, k)
} else if c == 2 {
bOnly = append(bOnly, k)
}
}
if len(aOnly) > 0 {
t.Errorf("Expected empty array, but received %v", aOnly)
}
if len(bOnly) > 0 {
t.Errorf("Expected empty array, but received %v", bOnly)
}
}
type testIncompleteReader struct {
Size int64
read int64
}
func (r *testIncompleteReader) Read(p []byte) (n int, err error) {
r.read += int64(len(p))
if r.read >= r.Size {
return int(r.read - r.Size), io.ErrUnexpectedEOF
}
return len(p), nil
}
func TestUploadUnexpectedEOF(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
u.PartSize = s3manager.MinUploadPartSize
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &testIncompleteReader{
Size: int64(s3manager.MinUploadPartSize + 1),
},
})
if err == nil {
t.Error("Expected error, but received none")
}
// Ensure upload started.
if e, a := "CreateMultipartUpload", (*ops)[0]; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
// Part may or may not be sent because of timing of sending parts and
// reading next part in upload manager. Just check for the last abort.
if e, a := "AbortMultipartUpload", (*ops)[len(*ops)-1]; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
}
func compareStructType(a, b reflect.Type) map[string]int {
if a.Kind() != reflect.Struct || b.Kind() != reflect.Struct {
panic(fmt.Sprintf("types must both be structs, got %v and %v", a.Kind(), b.Kind()))
}
aFields := enumFields(a)
bFields := enumFields(b)
matchings := map[string]int{}
for i := 0; i < len(aFields) || i < len(bFields); i++ {
if i < len(aFields) {
c := matchings[aFields[i].Name]
matchings[aFields[i].Name] = c + 1
}
if i < len(bFields) {
c := matchings[bFields[i].Name]
matchings[bFields[i].Name] = c + 2
}
}
return matchings
}
func enumFields(v reflect.Type) []reflect.StructField {
fields := []reflect.StructField{}
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
// Ignoreing anon fields
if field.PkgPath != "" {
// Ignore unexported fields
continue
}
fields = append(fields, field)
}
return fields
}
type fooReaderAt struct{}
func (r *fooReaderAt) Read(p []byte) (n int, err error) {
return 12, io.EOF
}
func (r *fooReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
return 12, io.EOF
}
func TestReaderAt(t *testing.T) {
svc := s3.New(unit.Session)
svc.Handlers.Unmarshal.Clear()
svc.Handlers.UnmarshalMeta.Clear()
svc.Handlers.UnmarshalError.Clear()
svc.Handlers.Send.Clear()
contentLen := ""
svc.Handlers.Send.PushBack(func(r *request.Request) {
contentLen = r.HTTPRequest.Header.Get("Content-Length")
r.HTTPResponse = &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
})
mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
u.Concurrency = 1
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: &fooReaderAt{},
})
if err != nil {
t.Errorf("Expected no error but received %v", err)
}
if e, a := "12", contentLen; e != a {
t.Errorf("Expected %q, but received %q", e, a)
}
}
func TestSSE(t *testing.T) {
svc := s3.New(unit.Session)
svc.Handlers.Unmarshal.Clear()
svc.Handlers.UnmarshalMeta.Clear()
svc.Handlers.UnmarshalError.Clear()
svc.Handlers.ValidateResponse.Clear()
svc.Handlers.Send.Clear()
partNum := 0
mutex := &sync.Mutex{}
svc.Handlers.Send.PushBack(func(r *request.Request) {
mutex.Lock()
defer mutex.Unlock()
r.HTTPResponse = &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader([]byte(respMsg))),
}
switch data := r.Data.(type) {
case *s3.CreateMultipartUploadOutput:
data.UploadId = aws.String("UPLOAD-ID")
case *s3.UploadPartOutput:
input := r.Params.(*s3.UploadPartInput)
if input.SSECustomerAlgorithm == nil {
t.Fatal("SSECustomerAlgoritm should not be nil")
}
if input.SSECustomerKey == nil {
t.Fatal("SSECustomerKey should not be nil")
}
partNum++
data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum))
case *s3.CompleteMultipartUploadOutput:
data.Location = aws.String("https://location")
data.VersionId = aws.String("VERSION-ID")
case *s3.PutObjectOutput:
data.VersionId = aws.String("VERSION-ID")
}
})
mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
u.Concurrency = 5
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: aws.String("foo"),
Body: bytes.NewBuffer(make([]byte, 1024*1024*10)),
})
if err != nil {
t.Fatal("Expected no error, but received" + err.Error())
}
}
func TestUploadWithContextCanceled(t *testing.T) {
u := s3manager.NewUploader(unit.Session)
params := s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: bytes.NewReader(make([]byte, 0)),
}
ctx := &awstesting.FakeContext{DoneCh: make(chan struct{})}
ctx.Error = fmt.Errorf("context canceled")
close(ctx.DoneCh)
_, err := u.UploadWithContext(ctx, ¶ms)
if err == nil {
t.Fatalf("expected error, did not get one")
}
aerr := err.(awserr.Error)
if e, a := request.CanceledErrorCode, aerr.Code(); e != a {
t.Errorf("expected error code %q, got %q", e, a)
}
if e, a := "canceled", aerr.Message(); !strings.Contains(a, e) {
t.Errorf("expected error message to contain %q, but did not %q", e, a)
}
}
// S3 Uploader incorrectly fails an upload if the content being uploaded
// has a size of MinPartSize * MaxUploadParts.
// Github: aws/aws-sdk-go#2557
func TestUploadMaxPartsEOF(t *testing.T) {
s, ops, _ := loggingSvc(emptyList)
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
u.Concurrency = 1
u.PartSize = s3manager.DefaultUploadPartSize
u.MaxUploadParts = 2
})
f := bytes.NewReader(make([]byte, int(mgr.PartSize)*mgr.MaxUploadParts))
r1 := io.NewSectionReader(f, 0, s3manager.DefaultUploadPartSize)
r2 := io.NewSectionReader(f, s3manager.DefaultUploadPartSize, 2*s3manager.DefaultUploadPartSize)
body := io.MultiReader(r1, r2)
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: aws.String("Bucket"),
Key: aws.String("Key"),
Body: body,
})
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
expectOps := []string{
"CreateMultipartUpload",
"UploadPart",
"UploadPart",
"CompleteMultipartUpload",
}
if e, a := expectOps, *ops; !reflect.DeepEqual(e, a) {
t.Errorf("expect %v ops, got %v", e, a)
}
}
func createTempFile(t *testing.T, size int64) (*os.File, func(*testing.T), error) {
file, err := ioutil.TempFile(os.TempDir(), aws.SDKName+t.Name())
if err != nil {
return nil, nil, err
}
filename := file.Name()
if err := file.Truncate(size); err != nil {
return nil, nil, err
}
return file,
func(t *testing.T) {
if err := file.Close(); err != nil {
t.Errorf("failed to close temp file, %s, %v", filename, err)
}
if err := os.Remove(filename); err != nil {
t.Errorf("failed to remove temp file, %s, %v", filename, err)
}
},
nil
}
func buildFailHandlers(tb testing.TB, parts, retry int) []http.Handler {
handlers := make([]http.Handler, parts)
for i := 0; i < len(handlers); i++ {
handlers[i] = &failPartHandler{
tb: tb,
failsRemaining: retry,
successHandler: successPartHandler{tb: tb},
}
}
return handlers
}
func TestUploadRetry(t *testing.T) {
const numParts, retries = 3, 10
testFile, testFileCleanup, err := createTempFile(t, s3manager.DefaultUploadPartSize*numParts)
if err != nil {
t.Fatalf("failed to create test file, %v", err)
}
defer testFileCleanup(t)
cases := map[string]struct {
Body io.Reader
PartHandlers func(testing.TB) []http.Handler
}{
"bytes.Buffer": {
Body: bytes.NewBuffer(make([]byte, s3manager.DefaultUploadPartSize*numParts)),
PartHandlers: func(tb testing.TB) []http.Handler {
return buildFailHandlers(tb, numParts, retries)
},
},
"bytes.Reader": {
Body: bytes.NewReader(make([]byte, s3manager.DefaultUploadPartSize*numParts)),
PartHandlers: func(tb testing.TB) []http.Handler {
return buildFailHandlers(tb, numParts, retries)
},
},
"os.File": {
Body: testFile,
PartHandlers: func(tb testing.TB) []http.Handler {
return buildFailHandlers(tb, numParts, retries)
},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
mux := newMockS3UploadServer(t, c.PartHandlers(t))
server := httptest.NewServer(mux)
defer server.Close()
var logger aws.Logger
var logLevel *aws.LogLevelType
if v := os.Getenv("DEBUG_BODY"); len(v) != 0 {
logger = t
logLevel = aws.LogLevel(
aws.LogDebugWithRequestErrors | aws.LogDebugWithRequestRetries,
)
}
sess := unit.Session.Copy(&aws.Config{
Endpoint: aws.String(server.URL),
S3ForcePathStyle: aws.Bool(true),
DisableSSL: aws.Bool(true),
MaxRetries: aws.Int(retries + 1),
SleepDelay: func(time.Duration) {},
Logger: logger,
LogLevel: logLevel,
//Credentials: credentials.AnonymousCredentials,
})
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
// u.Concurrency = 1
})
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("key"),
Body: c.Body,
})
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
})
}
}
func TestUploadBufferStrategy(t *testing.T) {
cases := map[string]struct {
PartSize int64
Size int64
Strategy s3manager.ReadSeekerWriteToProvider
callbacks int
}{
"NoBuffer": {
PartSize: s3manager.DefaultUploadPartSize,
Strategy: nil,
},
"SinglePart": {
PartSize: s3manager.DefaultUploadPartSize,
Size: s3manager.DefaultUploadPartSize,
Strategy: &recordedBufferProvider{size: int(s3manager.DefaultUploadPartSize)},
callbacks: 1,
},
"MultiPart": {
PartSize: s3manager.DefaultUploadPartSize,
Size: s3manager.DefaultUploadPartSize * 2,
Strategy: &recordedBufferProvider{size: int(s3manager.DefaultUploadPartSize)},
callbacks: 2,
},
}
for name, tCase := range cases {
t.Run(name, func(t *testing.T) {
_ = tCase
sess := unit.Session.Copy()
svc := s3.New(sess)
svc.Handlers.Unmarshal.Clear()
svc.Handlers.UnmarshalMeta.Clear()
svc.Handlers.UnmarshalError.Clear()
svc.Handlers.Send.Clear()
svc.Handlers.Send.PushBack(func(r *request.Request) {
if r.Body != nil {
io.Copy(ioutil.Discard, r.Body)
}
r.HTTPResponse = &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader([]byte(respMsg))),
}
switch data := r.Data.(type) {
case *s3.CreateMultipartUploadOutput:
data.UploadId = aws.String("UPLOAD-ID")
case *s3.UploadPartOutput:
data.ETag = aws.String(fmt.Sprintf("ETAG%d", random.Int()))
case *s3.CompleteMultipartUploadOutput:
data.Location = aws.String("https://location")
data.VersionId = aws.String("VERSION-ID")
case *s3.PutObjectOutput:
data.VersionId = aws.String("VERSION-ID")
}
})
uploader := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
u.PartSize = tCase.PartSize
u.BufferProvider = tCase.Strategy
u.Concurrency = 1
})
expected := s3testing.GetTestBytes(int(tCase.Size))
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("key"),
Body: bytes.NewReader(expected),
})
if err != nil {
t.Fatalf("failed to upload file: %v", err)
}
switch strat := tCase.Strategy.(type) {
case *recordedBufferProvider:
if !bytes.Equal(expected, strat.content) {
t.Errorf("content buffered did not match expected")
}
if tCase.callbacks != strat.callbackCount {
t.Errorf("expected %v, got %v callbacks", tCase.callbacks, strat.callbackCount)
}
}
})
}
}
type mockS3UploadServer struct {
*http.ServeMux
tb testing.TB
partHandler []http.Handler
}
func newMockS3UploadServer(tb testing.TB, partHandler []http.Handler) *mockS3UploadServer {
s := &mockS3UploadServer{
ServeMux: http.NewServeMux(),
partHandler: partHandler,
tb: tb,
}
s.HandleFunc("/", s.handleRequest)
return s
}
func (s mockS3UploadServer) handleRequest(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
_, hasUploads := r.URL.Query()["uploads"]
switch {
case r.Method == "POST" && hasUploads:
// CreateMultipartUpload
w.Header().Set("Content-Length", strconv.Itoa(len(createUploadResp)))
w.Write([]byte(createUploadResp))
case r.Method == "PUT":
// UploadPart
partNumStr := r.URL.Query().Get("partNumber")
id, err := strconv.Atoi(partNumStr)
if err != nil {
failRequest(w, 400, "BadRequest",
fmt.Sprintf("unable to parse partNumber, %q, %v",
partNumStr, err))
return
}
id--
if id < 0 || id >= len(s.partHandler) {
failRequest(w, 400, "BadRequest",
fmt.Sprintf("invalid partNumber %v", id))
return
}
s.partHandler[id].ServeHTTP(w, r)
case r.Method == "POST":
// CompleteMultipartUpload
w.Header().Set("Content-Length", strconv.Itoa(len(completeUploadResp)))
w.Write([]byte(completeUploadResp))
case r.Method == "DELETE":
// AbortMultipartUpload
w.Header().Set("Content-Length", strconv.Itoa(len(abortUploadResp)))
w.WriteHeader(200)
w.Write([]byte(abortUploadResp))
default:
failRequest(w, 400, "BadRequest",
fmt.Sprintf("invalid request %v %v", r.Method, r.URL))
}
}
func failRequest(w http.ResponseWriter, status int, code, msg string) {
msg = fmt.Sprintf(baseRequestErrorResp, code, msg)
w.Header().Set("Content-Length", strconv.Itoa(len(msg)))
w.WriteHeader(status)
w.Write([]byte(msg))
}
type successPartHandler struct {
tb testing.TB
}
func (h successPartHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
n, err := io.Copy(ioutil.Discard, r.Body)
if err != nil {
failRequest(w, 400, "BadRequest",
fmt.Sprintf("failed to read body, %v", err))
return
}
contLenStr := r.Header.Get("Content-Length")
expectLen, err := strconv.ParseInt(contLenStr, 10, 64)
if err != nil {
h.tb.Logf("expect content-length, got %q, %v", contLenStr, err)
failRequest(w, 400, "BadRequest",
fmt.Sprintf("unable to get content-length %v", err))
return
}
if e, a := expectLen, n; e != a {
h.tb.Logf("expect %v read, got %v", e, a)
failRequest(w, 400, "BadRequest",
fmt.Sprintf(
"content-length and body do not match, %v, %v", e, a))
return
}
w.Header().Set("Content-Length", strconv.Itoa(len(uploadPartResp)))
w.Write([]byte(uploadPartResp))
}
type failPartHandler struct {
tb testing.TB
failsRemaining int
successHandler http.Handler
}
func (h *failPartHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if h.failsRemaining == 0 && h.successHandler != nil {
h.successHandler.ServeHTTP(w, r)
return
}
io.Copy(ioutil.Discard, r.Body)
failRequest(w, 500, "InternalException",
fmt.Sprintf("mock error, partNumber %v", r.URL.Query().Get("partNumber")))
h.failsRemaining--
}
type recordedBufferProvider struct {
content []byte
size int
callbackCount int
}
func (r *recordedBufferProvider) GetWriteTo(seeker io.ReadSeeker) (s3manager.ReadSeekerWriteTo, func()) {
b := make([]byte, r.size)
w := &s3manager.BufferedReadSeekerWriteTo{BufferedReadSeeker: s3manager.NewBufferedReadSeeker(seeker, b)}
return w, func() {
r.content = append(r.content, b...)
r.callbackCount++
}
}
const createUploadResp = `
<CreateMultipartUploadResponse>
<Bucket>bucket</Bucket>
<Key>key</Key>
<UploadId>abc123</UploadId>
</CreateMultipartUploadResponse>
`
const uploadPartResp = `
<UploadPartResponse>
<ETag>key</ETag>
</UploadPartResponse>
`
const baseRequestErrorResp = `
<Error>
<Code>%s</Code>
<Message>%s</Message>
<RequestId>request-id</RequestId>
<HostId>host-id</HostId>
</Error>
`
const completeUploadResp = `
<CompleteMultipartUploadResponse>
<Bucket>bucket</Bucket>
<Key>key</Key>
<ETag>key</ETag>
<Location>https://bucket.us-west-2.amazonaws.com/key</Location>
<UploadId>abc123</UploadId>
</CompleteMultipartUploadResponse>
`
const abortUploadResp = `
<AbortMultipartUploadResponse>
</AbortMultipartUploadResponse>
`
| [
"\"DEBUG_BODY\""
] | [] | [
"DEBUG_BODY"
] | [] | ["DEBUG_BODY"] | go | 1 | 0 | |
runcommands/rc.go | package runcommands
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
yaml "gopkg.in/yaml.v2"
)
// Target is the strut for a copy-pasta target
type Target struct {
Name string `yaml:"name"`
Backend string `yaml:"backend"`
AccessKey string `yaml:"accesskey"`
SecretAccessKey string `yaml:"secretaccesskey"`
BucketName string `yaml:"bucketname"`
Endpoint string `yaml:"endpoint"`
Location string `yaml:"location"`
GistToken string `yaml:"gisttoken"`
GistID string `yaml:"gistID"`
}
// Config is the aggregation of currrent targets
type Config struct {
CurrentTarget *Target `yaml:"currenttarget"`
Targets map[string]*Target `yaml:"targets"`
}
// Update updates the config file
func Update(target, backend, accessKey, secretAccessKey, bucketName, endpoint, location, gistToken, gistID string) error {
var config *Config
var err error
config, err = Load()
if err != nil {
config = &Config{
CurrentTarget: &Target{},
Targets: make(map[string]*Target),
}
}
currentTarget := &Target{
Name: target,
Backend: backend,
AccessKey: accessKey,
SecretAccessKey: secretAccessKey,
BucketName: bucketName,
Endpoint: endpoint,
Location: location,
GistToken: gistToken,
GistID: gistID,
}
config.CurrentTarget = currentTarget
config.Targets[target] = currentTarget
configContents, err := yaml.Marshal(&config)
if err != nil {
// this err is not tested, but it should not happen either
return err
}
// TODO error case
ioutil.WriteFile(filepath.Join(os.Getenv("HOME"), ".copy-pastarc"), configContents, 0666)
return nil
}
// Load loads the config from a runcommands file
func Load() (*Config, error) {
var config *Config
byteContent, err := ioutil.ReadFile(filepath.Join(os.Getenv("HOME"), ".copy-pastarc"))
if err != nil {
return nil, fmt.Errorf("Unable to load the targets, please check if ~/.copy-pastarc exists %s", err.Error())
}
err = yaml.Unmarshal(byteContent, &config)
if err != nil {
return nil, fmt.Errorf("Parsing failed %s", err.Error())
}
return config, nil
}
| [
"\"HOME\"",
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
code/worker/watermark/config.py | import os
import uuid
class Config:
CLIENT = str(uuid.uuid4())
NAME = "{username}-{queue}".format(
username=os.getenv('OS_USERNAME'),
queue='watermark')
OS_REGION_NAME = os.getenv('OS_REGION_NAME')
OS_AUTH_URL = os.getenv('OS_AUTH_URL')
OS_USERNAME = os.getenv('OS_USERNAME')
OS_PASSWORD = os.getenv('OS_PASSWORD')
API_SCHEME = os.getenv('WM_API_SCHEME')
API_ENDPOINT = os.getenv('WM_API_ENDPOINT')
API_URL = '{scheme}://{endpoint}'.format(
scheme=API_SCHEME,
endpoint=API_ENDPOINT
)
class DevelopmentConfig(Config):
DEBUG = True
API_SCHEME = "http"
API_ENDPOINT = "127.0.0.1:5000"
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
PRODUCTION = True
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [] | [] | [
"OS_AUTH_URL",
"OS_REGION_NAME",
"OS_PASSWORD",
"OS_USERNAME",
"WM_API_ENDPOINT",
"WM_API_SCHEME"
] | [] | ["OS_AUTH_URL", "OS_REGION_NAME", "OS_PASSWORD", "OS_USERNAME", "WM_API_ENDPOINT", "WM_API_SCHEME"] | python | 6 | 0 | |
config/config_params.go | // Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
)
var (
// RegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value represents a regular expression and is marked by '/' at the start
// and end and cannot have spaces
RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`)
// NonRegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value is between 1-15 chars long and only be alphanumeric or - or _
NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`)
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
XDPRefreshInterval time.Duration `config:"seconds;90"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
VXLANEnabled bool `config:"bool;false"`
VXLANPort int `config:"int;4789"`
VXLANVNI int `config:"int;4096"`
VXLANMTU int `config:"int;1410;non-zero"`
IPv4VXLANTunnelAddr net.IP `config:"ipv4;"`
VXLANTunnelMACAddr string `config:"string;"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"string;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
NATOutgoingAddress net.IP `config:"ipv4;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"`
SidecarAccelerationEnabled bool `config:"bool;false"`
XDPEnabled bool `config:"bool;true"`
GenericXDPEnabled bool `config:"bool;false"`
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := apiconfig.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case.
if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
}
if !config.IpInIpEnabled && !config.VXLANEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("Encap disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "iface-list-regexp":
param = &RegexpPatternListParam{
NonRegexpElemRegexp: NonRegexpIfaceElemRegexp,
RegexpElemRegexp: RegexpIfaceElemRegexp,
Delimiter: ",",
Msg: "list contains invalid Linux interface name or regex pattern",
}
case "iface-param":
param = &RegexpParam{Regexp: IfaceParamRegexp,
Msg: "invalid Linux interface parameter"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| [
"\"HOSTNAME\""
] | [] | [
"HOSTNAME"
] | [] | ["HOSTNAME"] | go | 1 | 0 | |
orgbook-issuer-agent/issuer_controller/src/issuer.py | import json
import os
import threading
import time
import logging
import requests
from flask import jsonify
import config
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()
LOGGER = logging.getLogger(__name__)
TRACE_EVENTS = os.getenv("TRACE_EVENTS", "False").lower() == "true"
if TRACE_EVENTS:
LOGGER.setLevel(logging.INFO)
TOB_ADMIN_API_KEY = os.environ.get("TOB_ADMIN_API_KEY")
TOB_REQUEST_HEADERS = {}
if TOB_ADMIN_API_KEY is not None and 0 < len(TOB_ADMIN_API_KEY):
TOB_REQUEST_HEADERS = {"x-api-key": TOB_ADMIN_API_KEY}
# list of cred defs per schema name/version
app_config = {}
app_config["schemas"] = {}
synced = {}
MAX_RETRIES = 3
def agent_post_with_retry(url, payload, headers=None):
retries = 0
while True:
try:
# test code to test exception handling
# if retries < MAX_RETRIES:
# raise Exception("Fake exception!!!")
response = requests.post(url, payload, headers=headers)
response.raise_for_status()
return response
except Exception as e:
print("Error posting", url, e)
retries = retries + 1
if retries > MAX_RETRIES:
raise e
time.sleep(5)
def agent_schemas_cred_defs(agent_admin_url):
ret_schemas = {}
# get loaded cred defs and schemas
response = requests.get(
agent_admin_url + "/schemas/created", headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schemas = response.json()["schema_ids"]
for schema_id in schemas:
response = requests.get(
agent_admin_url + "/schemas/" + schema_id, headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schema = response.json()["schema"]
schema_key = schema["name"] + "::" + schema["version"]
ret_schemas[schema_key] = {"schema": schema, "schema_id": str(schema["seqNo"])}
response = requests.get(
agent_admin_url + "/credential-definitions/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_defs = response.json()["credential_definition_ids"]
for cred_def_id in cred_defs:
response = requests.get(
agent_admin_url + "/credential-definitions/" + cred_def_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_def = response.json()["credential_definition"]
for schema_key in ret_schemas:
if ret_schemas[schema_key]["schema_id"] == cred_def["schemaId"]:
ret_schemas[schema_key]["cred_def"] = cred_def
break
return ret_schemas
class StartupProcessingThread(threading.Thread):
global app_config
def __init__(self, ENV):
threading.Thread.__init__(self)
self.ENV = ENV
def run(self):
# read configuration files
config_root = self.ENV.get("CONFIG_ROOT", "../config")
config_schemas = config.load_config(config_root + "/schemas.yml", env=self.ENV)
config_services = config.load_config(
config_root + "/services.yml", env=self.ENV
)
# print("schemas.yml -->", json.dumps(config_schemas))
# print("services.yml -->", json.dumps(config_services))
agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
if not agent_admin_url:
raise RuntimeError(
"Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
)
app_config["AGENT_ADMIN_URL"] = agent_admin_url
# get public DID from our agent
response = requests.get(
agent_admin_url + "/wallet/did/public", headers=ADMIN_REQUEST_HEADERS
)
result = response.json()
did = result["result"]
print("Fetched DID from agent: ", did)
app_config["DID"] = did["did"]
# determine pre-registered schemas and cred defs
existing_schemas = agent_schemas_cred_defs(agent_admin_url)
print("Existing schemas:", json.dumps(existing_schemas))
# register schemas and credential definitions
for schema in config_schemas:
schema_name = schema["name"]
schema_version = schema["version"]
schema_key = schema_name + "::" + schema_version
if schema_key not in existing_schemas:
schema_attrs = []
schema_descs = {}
if isinstance(schema["attributes"], dict):
# each element is a dict
for attr, desc in schema["attributes"].items():
schema_attrs.append(attr)
schema_descs[attr] = desc
else:
# assume it's an array
for attr in schema["attributes"]:
schema_attrs.append(attr)
# register our schema(s) and credential definition(s)
schema_request = {
"schema_name": schema_name,
"schema_version": schema_version,
"attributes": schema_attrs,
}
response = agent_post_with_retry(
agent_admin_url + "/schemas",
json.dumps(schema_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema_id = response.json()
else:
schema_id = {"schema_id": existing_schemas[schema_key]["schema"]["id"]}
app_config["schemas"]["SCHEMA_" + schema_name] = schema
app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
] = schema_id["schema_id"]
print("Registered schema: ", schema_id)
if (
schema_key not in existing_schemas
or "cred_def" not in existing_schemas[schema_key]
):
cred_def_request = {"schema_id": schema_id["schema_id"]}
response = agent_post_with_retry(
agent_admin_url + "/credential-definitions",
json.dumps(cred_def_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
credential_definition_id = response.json()
else:
credential_definition_id = {
"credential_definition_id": existing_schemas[schema_key][
"cred_def"
]["id"]
}
app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_version
] = credential_definition_id["credential_definition_id"]
print("Registered credential definition: ", credential_definition_id)
# what is the TOB connection name?
tob_connection_params = config_services["verifiers"]["bctob"]
# check if we have a TOB connection
response = requests.get(
agent_admin_url + "/connections?alias=" + tob_connection_params["alias"],
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
connections = response.json()["results"]
tob_connection = None
for connection in connections:
# check for TOB connection
if connection["alias"] == tob_connection_params["alias"]:
tob_connection = connection
if not tob_connection:
# if no tob connection then establish one
tob_agent_admin_url = tob_connection_params["connection"]["agent_admin_url"]
if not tob_agent_admin_url:
raise RuntimeError(
"Error TOB_AGENT_ADMIN_URL is not specified, can't establish a TOB connection."
)
response = requests.post(
tob_agent_admin_url + "/connections/create-invitation",
headers=TOB_REQUEST_HEADERS,
)
response.raise_for_status()
invitation = response.json()
response = requests.post(
agent_admin_url
+ "/connections/receive-invitation?alias="
+ tob_connection_params["alias"],
json.dumps(invitation["invitation"]),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
tob_connection = response.json()
print("Established tob connection: ", tob_connection)
time.sleep(5)
app_config["TOB_CONNECTION"] = tob_connection["connection_id"]
synced[tob_connection["connection_id"]] = False
for issuer_name, issuer_info in config_services["issuers"].items():
# register ourselves (issuer, schema(s), cred def(s)) with TOB
issuer_config = {
"name": issuer_name,
"did": app_config["DID"],
"config_root": config_root,
}
issuer_config.update(issuer_info)
issuer_spec = config.assemble_issuer_spec(issuer_config)
credential_types = []
for credential_type in issuer_info["credential_types"]:
schema_name = credential_type["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
ctype_config = {
"schema_name": schema_name,
"schema_version": schema_info["version"],
"issuer_url": issuer_config["url"],
"config_root": config_root,
"credential_def_id": app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_info["version"]
],
}
credential_type['attributes'] = schema_info["attributes"]
ctype_config.update(credential_type)
ctype = config.assemble_credential_type_spec(ctype_config, schema_info.get("attributes"))
if ctype is not None:
credential_types.append(ctype)
issuer_request = {
"connection_id": app_config["TOB_CONNECTION"],
"issuer_registration": {
"credential_types": credential_types,
"issuer": issuer_spec,
},
}
print(json.dumps(issuer_request))
response = requests.post(
agent_admin_url + "/issuer_registration/send",
json.dumps(issuer_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
response.json()
print("Registered issuer: ", issuer_name)
synced[tob_connection["connection_id"]] = True
print("Connection {} is synchronized".format(tob_connection))
def tob_connection_synced():
return (
("TOB_CONNECTION" in app_config)
and (app_config["TOB_CONNECTION"] in synced)
and (synced[app_config["TOB_CONNECTION"]])
)
def startup_init(ENV):
global app_config
thread = StartupProcessingThread(ENV)
thread.start()
credential_lock = threading.Lock()
credential_requests = {}
credential_responses = {}
credential_threads = {}
timing_lock = threading.Lock()
record_timings = True
timings = {}
def clear_stats():
global timings
timing_lock.acquire()
try:
timings = {}
finally:
timing_lock.release()
def get_stats():
timing_lock.acquire()
try:
return timings
finally:
timing_lock.release()
def log_timing_method(method, start_time, end_time, success, data=None):
if not record_timings:
return
timing_lock.acquire()
try:
elapsed_time = end_time - start_time
if not method in timings:
timings[method] = {
"total_count": 1,
"success_count": 1 if success else 0,
"fail_count": 0 if success else 1,
"min_time": elapsed_time,
"max_time": elapsed_time,
"total_time": elapsed_time,
"avg_time": elapsed_time,
"data": {},
}
else:
timings[method]["total_count"] = timings[method]["total_count"] + 1
if success:
timings[method]["success_count"] = timings[method]["success_count"] + 1
else:
timings[method]["fail_count"] = timings[method]["fail_count"] + 1
if elapsed_time > timings[method]["max_time"]:
timings[method]["max_time"] = elapsed_time
if elapsed_time < timings[method]["min_time"]:
timings[method]["min_time"] = elapsed_time
timings[method]["total_time"] = timings[method]["total_time"] + elapsed_time
timings[method]["avg_time"] = (
timings[method]["total_time"] / timings[method]["total_count"]
)
if data:
timings[method]["data"][str(timings[method]["total_count"])] = data
finally:
timing_lock.release()
def set_credential_thread_id(cred_exch_id, thread_id):
credential_lock.acquire()
try:
# add 2 records so we can x-ref
print("Set cred_exch_id, thread_id", cred_exch_id, thread_id)
credential_threads[thread_id] = cred_exch_id
credential_threads[cred_exch_id] = thread_id
finally:
credential_lock.release()
def add_credential_request(cred_exch_id):
credential_lock.acquire()
try:
# short circuit if we already have the response
if cred_exch_id in credential_responses:
return None
result_available = threading.Event()
credential_requests[cred_exch_id] = result_available
return result_available
finally:
credential_lock.release()
def add_credential_response(cred_exch_id, response):
credential_lock.acquire()
try:
credential_responses[cred_exch_id] = response
if cred_exch_id in credential_requests:
result_available = credential_requests[cred_exch_id]
result_available.set()
del credential_requests[cred_exch_id]
finally:
credential_lock.release()
def add_credential_problem_report(thread_id, response):
print("get problem report for thread", thread_id)
if thread_id in credential_threads:
cred_exch_id = credential_threads[thread_id]
add_credential_response(cred_exch_id, response)
else:
print("thread_id not found", thread_id)
# hack for now
if 1 == len(list(credential_requests.keys())):
cred_exch_id = list(credential_requests.keys())[0]
add_credential_response(cred_exch_id, response)
else:
print("darn, too many outstanding requests :-(")
print(credential_requests)
def add_credential_timeout_report(cred_exch_id):
print("add timeout report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::Error thread timeout"}
add_credential_response(cred_exch_id, response)
def add_credential_exception_report(cred_exch_id, exc):
print("add exception report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::" + str(exc)}
add_credential_response(cred_exch_id, response)
def get_credential_response(cred_exch_id):
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
response = credential_responses[cred_exch_id]
del credential_responses[cred_exch_id]
if cred_exch_id in credential_threads:
thread_id = credential_threads[cred_exch_id]
print("cleaning out cred_exch_id, thread_id", cred_exch_id, thread_id)
del credential_threads[cred_exch_id]
del credential_threads[thread_id]
return response
else:
return None
finally:
credential_lock.release()
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_actvity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
TOPIC_PROBLEM_REPORT = "problem_report"
# max 15 second wait for a credential response (prevents blocking forever)
MAX_CRED_RESPONSE_TIMEOUT = 45
def handle_connections(state, message):
# TODO auto-accept?
print("handle_connections()", state)
return jsonify({"message": state})
def handle_credentials(state, message):
# TODO auto-respond to proof requests
print("handle_credentials()", state, message["credential_exchange_id"])
# TODO new "stored" state is being added by Nick
if "thread_id" in message:
set_credential_thread_id(
message["credential_exchange_id"], message["thread_id"]
)
if state == "credential_acked":
response = {"success": True, "result": message["credential_exchange_id"]}
add_credential_response(message["credential_exchange_id"], response)
return jsonify({"message": state})
def handle_presentations(state, message):
# TODO auto-respond to proof requests
print("handle_presentations()", state)
return jsonify({"message": state})
def handle_get_active_menu(message):
# TODO add/update issuer info?
print("handle_get_active_menu()", message)
return jsonify({})
def handle_perform_menu_action(message):
# TODO add/update issuer info?
print("handle_perform_menu_action()", message)
return jsonify({})
def handle_register_issuer(message):
# TODO add/update issuer info?
print("handle_register_issuer()")
return jsonify({})
def handle_problem_report(message):
print("handle_problem_report()", message)
msg = message["~thread"]["thid"] + "::" + message["explain-ltxt"]
response = {"success": False, "result": msg}
add_credential_problem_report(message["~thread"]["thid"], response)
return jsonify({})
class SendCredentialThread(threading.Thread):
def __init__(self, credential_definition_id, cred_offer, url, headers):
threading.Thread.__init__(self)
self.credential_definition_id = credential_definition_id
self.cred_offer = cred_offer
self.url = url
self.headers = headers
def run(self):
start_time = time.perf_counter()
method = "submit_credential.credential"
cred_data = None
try:
response = requests.post(
self.url, json.dumps(self.cred_offer), headers=self.headers
)
response.raise_for_status()
cred_data = response.json()
result_available = add_credential_request(
cred_data["credential_exchange_id"]
)
# print(
# "Sent offer",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
# wait for confirmation from the agent, which will include the credential exchange id
if result_available and not result_available.wait(
MAX_CRED_RESPONSE_TIMEOUT
):
add_credential_timeout_report(cred_data["credential_exchange_id"])
end_time = time.perf_counter()
print(
"Got credential TIMEOUT:",
cred_data["credential_exchange_id"],
cred_data["connection_id"],
)
log_timing_method(
method,
start_time,
end_time,
False,
data={
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": "Timeout",
"elapsed_time": (end_time - start_time),
},
)
else:
# print(
# "Got credential response:",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
pass
except Exception as exc:
print(exc)
end_time = time.perf_counter()
# if cred_data is not set we don't have a credential to set status for
if cred_data:
add_credential_exception_report(
cred_data["credential_exchange_id"], exc
)
data = {
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": str(exc),
"elapsed_time": (end_time - start_time),
}
else:
data = {"Error": str(exc), "elapsed_time": (end_time - start_time)}
log_timing_method(method, start_time, end_time, False, data=data)
# don't re-raise; we want to log the exception as the credential error response
self.cred_response = get_credential_response(
cred_data["credential_exchange_id"]
)
processing_time = end_time - start_time
# print("Got response", self.cred_response, "time=", processing_time)
def handle_send_credential(cred_input):
"""
# other sample data
sample_credentials = [
{
"schema": "ian-registration.ian-ville",
"version": "1.0.0",
"attributes": {
"corp_num": "ABC12345",
"registration_date": "2018-01-01",
"entity_name": "Ima Permit",
"entity_name_effective": "2018-01-01",
"entity_status": "ACT",
"entity_status_effective": "2019-01-01",
"entity_type": "ABC",
"registered_jurisdiction": "BC",
"effective_date": "2019-01-01",
"expiry_date": ""
}
},
{
"schema": "ian-permit.ian-ville",
"version": "1.0.0",
"attributes": {
"permit_id": str(uuid.uuid4()),
"entity_name": "Ima Permit",
"corp_num": "ABC12345",
"permit_issued_date": "2018-01-01",
"permit_type": "ABC",
"permit_status": "OK",
"effective_date": "2019-01-01"
}
}
]
"""
# construct and send the credential
# print("Received credentials", cred_input)
global app_config
agent_admin_url = app_config["AGENT_ADMIN_URL"]
start_time = time.perf_counter()
processing_time = 0
processed_count = 0
# let's send a credential!
cred_responses = []
for credential in cred_input:
cred_def_key = "CRED_DEF_" + credential["schema"] + "_" + credential["version"]
credential_definition_id = app_config["schemas"][cred_def_key]
schema_name = credential["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
schema_version = schema_info["version"]
schema_id = app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
]
cred_req = {
"schema_issuer_did": app_config["DID"],
"issuer_did": app_config["DID"],
"schema_name": schema_name,
"cred_def_id": credential_definition_id,
"schema_version": schema_version,
"credential_proposal": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": [
{"name": attr_name, "value": attr_value}
for attr_name, attr_value in credential["attributes"].items()
],
},
"connection_id": app_config["TOB_CONNECTION"],
# "comment": "string",
"schema_id": schema_id,
}
if TRACE_EVENTS:
cred_req["trace"] = True
thread = SendCredentialThread(
credential_definition_id,
cred_req,
agent_admin_url + "/issue-credential/send",
ADMIN_REQUEST_HEADERS,
)
thread.start()
thread.join()
cred_responses.append(thread.cred_response)
processed_count = processed_count + 1
processing_time = time.perf_counter() - start_time
print(">>> Processed", processed_count, "credentials in", processing_time)
print(" ", processing_time / processed_count, "seconds per credential")
return jsonify(cred_responses) | [] | [] | [
"AGENT_ADMIN_API_KEY",
"TOB_ADMIN_API_KEY",
"TRACE_EVENTS",
"LOG_LEVEL"
] | [] | ["AGENT_ADMIN_API_KEY", "TOB_ADMIN_API_KEY", "TRACE_EVENTS", "LOG_LEVEL"] | python | 4 | 0 | |
test.py | import os
import cv2
import argparse
import numpy as np
import tensorflow as tf
import yolo.config as cfg
from yolo.yolo_net import YOLONet
from utils.timer import Timer
class Detector(object):
def __init__(self, net, weight_file):
self.net = net
self.weights_file = weight_file
self.classes = cfg.CLASSES
self.num_class = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.threshold = cfg.THRESHOLD
self.iou_threshold = cfg.IOU_THRESHOLD
self.boundary1 = self.cell_size * self.cell_size * self.num_class
self.boundary2 = self.boundary1 +\
self.cell_size * self.cell_size * self.boxes_per_cell
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
print('Restoring weights from: ' + self.weights_file)
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
def draw_result(self, img, result):
for i in range(len(result)):
x = int(result[i][1])
y = int(result[i][2])
w = int(result[i][3] / 2)
h = int(result[i][4] / 2)
cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img, (x - w, y - h - 20),
(x + w, y - h), (125, 125, 125), -1)
lineType = cv2.LINE_AA if cv2.__version__ > '3' else cv2.CV_AA
cv2.putText(
img, result[i][0] + ' : %.2f' % result[i][5],
(x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 0), 1, lineType)
def detect(self, img):
img_h, img_w, _ = img.shape
inputs = cv2.resize(img, (self.image_size, self.image_size))
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
inputs = (inputs / 255.0) * 2.0 - 1.0
inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))
result = self.detect_from_cvmat(inputs)[0]
for i in range(len(result)):
result[i][1] *= (1.0 * img_w / self.image_size)
result[i][2] *= (1.0 * img_h / self.image_size)
result[i][3] *= (1.0 * img_w / self.image_size)
result[i][4] *= (1.0 * img_h / self.image_size)
return result
def detect_from_cvmat(self, inputs):
net_output = self.sess.run(self.net.logits,
feed_dict={self.net.images: inputs})
results = []
for i in range(net_output.shape[0]):
results.append(self.interpret_output(net_output[i]))
return results
def interpret_output(self, output):
probs = np.zeros((self.cell_size, self.cell_size,
self.boxes_per_cell, self.num_class))
class_probs = np.reshape(
output[0:self.boundary1],
(self.cell_size, self.cell_size, self.num_class))
scales = np.reshape(
output[self.boundary1:self.boundary2],
(self.cell_size, self.cell_size, self.boxes_per_cell))
boxes = np.reshape(
output[self.boundary2:],
(self.cell_size, self.cell_size, self.boxes_per_cell, 4))
offset = np.array(
[np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell)
offset = np.transpose(
np.reshape(
offset,
[self.boxes_per_cell, self.cell_size, self.cell_size]),
(1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / self.cell_size
boxes[:, :, :, 2:] = np.square(boxes[:, :, :, 2:]) # 宽高还原
boxes *= self.image_size
for i in range(self.boxes_per_cell):
for j in range(self.num_class):
probs[:, :, i, j] = np.multiply(
class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],
filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(
filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0:
continue
for j in range(i + 1, len(boxes_filtered)):
if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append(
[self.classes[classes_num_filtered[i]],
boxes_filtered[i][0],
boxes_filtered[i][1],
boxes_filtered[i][2],
boxes_filtered[i][3],
probs_filtered[i]])
return result
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - \
max(box1[0] - 0.5 * box1[2], box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - \
max(box1[1] - 0.5 * box1[3], box2[1] - 0.5 * box2[3])
inter = 0 if tb < 0 or lr < 0 else tb * lr
return inter / (box1[2] * box1[3] + box2[2] * box2[3] - inter)
def camera_detector(self, cap, wait=10):
detect_timer = Timer()
ret, _ = cap.read()
while ret:
ret, frame = cap.read()
detect_timer.tic()
result = self.detect(frame)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(
detect_timer.average_time))
self.draw_result(frame, result)
cv2.imshow('Camera', frame)
cv2.waitKey(wait)
ret, frame = cap.read()
def image_detector(self, imname, wait=0):
detect_timer = Timer()
image = cv2.imread(imname)
detect_timer.tic()
result = self.detect(image)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(
detect_timer.average_time))
self.draw_result(image, result)
cv2.imshow('Image', image)
cv2.waitKey(wait)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--weight_dir', default='weights', type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
yolo = YOLONet(False)
weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
detector = Detector(yolo, weight_file)
# detect from camera
# cap = cv2.VideoCapture(-1)
# detector.camera_detector(cap)
# detect from image file
imname = 'test/person.jpg'
detector.image_detector(imname)
if __name__ == '__main__':
main()
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
vendor/gopkg.in/h2non/bimg.v1/vips.go | package bimg
/*
#cgo pkg-config: vips
#include "vips.h"
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"runtime"
"strings"
"sync"
"unsafe"
d "github.com/tj/go-debug"
)
// debug is internally used to
var debug = d.Debug("bimg")
// VipsVersion exposes the current libvips semantic version
const VipsVersion = string(C.VIPS_VERSION)
// VipsMajorVersion exposes the current libvips major version number
const VipsMajorVersion = int(C.VIPS_MAJOR_VERSION)
// VipsMinorVersion exposes the current libvips minor version number
const VipsMinorVersion = int(C.VIPS_MINOR_VERSION)
const (
maxCacheMem = 100 * 1024 * 1024
maxCacheSize = 500
)
var (
m sync.Mutex
initialized bool
)
// VipsMemoryInfo represents the memory stats provided by libvips.
type VipsMemoryInfo struct {
Memory int64
MemoryHighwater int64
Allocations int64
}
// vipsSaveOptions represents the internal option used to talk with libvips.
type vipsSaveOptions struct {
Quality int
Compression int
Type ImageType
Interlace bool
NoProfile bool
StripMetadata bool
OutputICC string // Absolute path to the output ICC profile
Interpretation Interpretation
}
type vipsWatermarkOptions struct {
Width C.int
DPI C.int
Margin C.int
NoReplicate C.int
Opacity C.float
Background [3]C.double
}
type vipsWatermarkImageOptions struct {
Left C.int
Top C.int
Opacity C.float
}
type vipsWatermarkTextOptions struct {
Text *C.char
Font *C.char
}
func init() {
Initialize()
}
// Initialize is used to explicitly start libvips in thread-safe way.
// Only call this function if you have previously turned off libvips.
func Initialize() {
if C.VIPS_MAJOR_VERSION <= 7 && C.VIPS_MINOR_VERSION < 40 {
panic("unsupported libvips version!")
}
m.Lock()
runtime.LockOSThread()
defer m.Unlock()
defer runtime.UnlockOSThread()
err := C.vips_init(C.CString("bimg"))
if err != 0 {
panic("unable to start vips!")
}
// Set libvips cache params
C.vips_cache_set_max_mem(maxCacheMem)
C.vips_cache_set_max(maxCacheSize)
// Define a custom thread concurrency limit in libvips (this may generate thread-unsafe issues)
// See: https://github.com/jcupitt/libvips/issues/261#issuecomment-92850414
if os.Getenv("VIPS_CONCURRENCY") == "" {
C.vips_concurrency_set(1)
}
// Enable libvips cache tracing
if os.Getenv("VIPS_TRACE") != "" {
C.vips_enable_cache_set_trace()
}
initialized = true
}
// Shutdown is used to shutdown libvips in a thread-safe way.
// You can call this to drop caches as well.
// If libvips was already initialized, the function is no-op
func Shutdown() {
m.Lock()
defer m.Unlock()
if initialized {
C.vips_shutdown()
initialized = false
}
}
// VipsCacheSetMaxMem Sets the maximum amount of tracked memory allowed before the vips operation cache
// begins to drop entries.
func VipsCacheSetMaxMem(maxCacheMem int) {
C.vips_cache_set_max_mem(C.size_t(maxCacheMem))
}
// VipsCacheSetMax sets the maximum number of operations to keep in the vips operation cache.
func VipsCacheSetMax(maxCacheSize int) {
C.vips_cache_set_max(C.int(maxCacheSize))
}
// VipsCacheDropAll drops the vips operation cache, freeing the allocated memory.
func VipsCacheDropAll() {
C.vips_cache_drop_all()
}
// VipsDebugInfo outputs to stdout libvips collected data. Useful for debugging.
func VipsDebugInfo() {
C.im__print_all()
}
// VipsMemory gets memory info stats from libvips (cache size, memory allocs...)
func VipsMemory() VipsMemoryInfo {
return VipsMemoryInfo{
Memory: int64(C.vips_tracked_get_mem()),
MemoryHighwater: int64(C.vips_tracked_get_mem_highwater()),
Allocations: int64(C.vips_tracked_get_allocs()),
}
}
// VipsIsTypeSupported returns true if the given image type
// is supported by the current libvips compilation.
func VipsIsTypeSupported(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_bridge(C.PNG)) != 0
}
if t == GIF {
return int(C.vips_type_find_bridge(C.GIF)) != 0
}
if t == PDF {
return int(C.vips_type_find_bridge(C.PDF)) != 0
}
if t == SVG {
return int(C.vips_type_find_bridge(C.SVG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_bridge(C.TIFF)) != 0
}
if t == MAGICK {
return int(C.vips_type_find_bridge(C.MAGICK)) != 0
}
return false
}
// VipsIsTypeSupportedSave returns true if the given image type
// is supported by the current libvips compilation for the
// save operation.
func VipsIsTypeSupportedSave(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_save_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_save_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_save_bridge(C.PNG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_save_bridge(C.TIFF)) != 0
}
return false
}
func vipsExifOrientation(image *C.VipsImage) int {
return int(C.vips_exif_orientation(image))
}
func vipsHasAlpha(image *C.VipsImage) bool {
return int(C.has_alpha_channel(image)) > 0
}
func vipsHasProfile(image *C.VipsImage) bool {
return int(C.has_profile_embed(image)) > 0
}
func vipsWindowSize(name string) float64 {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
return float64(C.interpolator_window_size(cname))
}
func vipsSpace(image *C.VipsImage) string {
return C.GoString(C.vips_enum_nick_bridge(image))
}
func vipsRotate(image *C.VipsImage, angle Angle) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_rotate(image, &out, C.int(angle))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsFlip(image *C.VipsImage, direction Direction) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_flip_bridge(image, &out, C.int(direction))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsZoom(image *C.VipsImage, zoom int) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_zoom_bridge(image, &out, C.int(zoom), C.int(zoom))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsWatermark(image *C.VipsImage, w Watermark) (*C.VipsImage, error) {
var out *C.VipsImage
// Defaults
noReplicate := 0
if w.NoReplicate {
noReplicate = 1
}
text := C.CString(w.Text)
font := C.CString(w.Font)
background := [3]C.double{C.double(w.Background.R), C.double(w.Background.G), C.double(w.Background.B)}
textOpts := vipsWatermarkTextOptions{text, font}
opts := vipsWatermarkOptions{C.int(w.Width), C.int(w.DPI), C.int(w.Margin), C.int(noReplicate), C.float(w.Opacity), background}
defer C.free(unsafe.Pointer(text))
defer C.free(unsafe.Pointer(font))
err := C.vips_watermark(image, &out, (*C.WatermarkTextOptions)(unsafe.Pointer(&textOpts)), (*C.WatermarkOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsRead(buf []byte) (*C.VipsImage, ImageType, error) {
var image *C.VipsImage
imageType := vipsImageType(buf)
if imageType == UNKNOWN {
return nil, UNKNOWN, errors.New("Unsupported image format")
}
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
err := C.vips_init_image(imageBuf, length, C.int(imageType), &image)
if err != 0 {
return nil, UNKNOWN, catchVipsError()
}
return image, imageType, nil
}
func vipsColourspaceIsSupportedBuffer(buf []byte) (bool, error) {
image, _, err := vipsRead(buf)
if err != nil {
return false, err
}
C.g_object_unref(C.gpointer(image))
return vipsColourspaceIsSupported(image), nil
}
func vipsColourspaceIsSupported(image *C.VipsImage) bool {
return int(C.vips_colourspace_issupported_bridge(image)) == 1
}
func vipsInterpretationBuffer(buf []byte) (Interpretation, error) {
image, _, err := vipsRead(buf)
if err != nil {
return InterpretationError, err
}
C.g_object_unref(C.gpointer(image))
return vipsInterpretation(image), nil
}
func vipsInterpretation(image *C.VipsImage) Interpretation {
return Interpretation(C.vips_image_guess_interpretation_bridge(image))
}
func vipsFlattenBackground(image *C.VipsImage, background Color) (*C.VipsImage, error) {
var outImage *C.VipsImage
backgroundC := [3]C.double{
C.double(background.R),
C.double(background.G),
C.double(background.B),
}
if vipsHasAlpha(image) {
err := C.vips_flatten_background_brigde(image, &outImage,
backgroundC[0], backgroundC[1], backgroundC[2])
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
image = outImage
}
return image, nil
}
func vipsPreSave(image *C.VipsImage, o *vipsSaveOptions) (*C.VipsImage, error) {
var outImage *C.VipsImage
// Remove ICC profile metadata
if o.NoProfile {
C.remove_profile(image)
}
// Use a default interpretation and cast it to C type
if o.Interpretation == 0 {
o.Interpretation = InterpretationSRGB
}
interpretation := C.VipsInterpretation(o.Interpretation)
// Apply the proper colour space
if vipsColourspaceIsSupported(image) {
err := C.vips_colourspace_bridge(image, &outImage, interpretation)
if int(err) != 0 {
return nil, catchVipsError()
}
image = outImage
}
if o.OutputICC != "" && vipsHasProfile(image) {
debug("Embedded ICC profile found, trying to convert to %s", o.OutputICC)
outputIccPath := C.CString(o.OutputICC)
defer C.free(unsafe.Pointer(outputIccPath))
err := C.vips_icc_transform_bridge(image, &outImage, outputIccPath)
if int(err) != 0 {
return nil, catchVipsError()
}
image = outImage
}
return image, nil
}
func vipsSave(image *C.VipsImage, o vipsSaveOptions) ([]byte, error) {
defer C.g_object_unref(C.gpointer(image))
tmpImage, err := vipsPreSave(image, &o)
if err != nil {
return nil, err
}
// When an image has an unsupported color space, vipsPreSave
// returns the pointer of the image passed to it unmodified.
// When this occurs, we must take care to not dereference the
// original image a second time; we may otherwise erroneously
// free the object twice.
if tmpImage != image {
defer C.g_object_unref(C.gpointer(tmpImage))
}
length := C.size_t(0)
saveErr := C.int(0)
interlace := C.int(boolToInt(o.Interlace))
quality := C.int(o.Quality)
strip := C.int(boolToInt(o.StripMetadata))
if o.Type != 0 && !IsTypeSupportedSave(o.Type) {
return nil, fmt.Errorf("VIPS cannot save to %#v", ImageTypes[o.Type])
}
var ptr unsafe.Pointer
switch o.Type {
case WEBP:
saveErr = C.vips_webpsave_bridge(tmpImage, &ptr, &length, strip, quality)
case PNG:
saveErr = C.vips_pngsave_bridge(tmpImage, &ptr, &length, strip, C.int(o.Compression), quality, interlace)
case TIFF:
saveErr = C.vips_tiffsave_bridge(tmpImage, &ptr, &length)
default:
saveErr = C.vips_jpegsave_bridge(tmpImage, &ptr, &length, strip, quality, interlace)
}
if int(saveErr) != 0 {
return nil, catchVipsError()
}
buf := C.GoBytes(ptr, C.int(length))
// Clean up
C.g_free(C.gpointer(ptr))
C.vips_error_clear()
return buf, nil
}
func getImageBuffer(image *C.VipsImage) ([]byte, error) {
var ptr unsafe.Pointer
length := C.size_t(0)
interlace := C.int(0)
quality := C.int(100)
err := C.int(0)
err = C.vips_jpegsave_bridge(image, &ptr, &length, 1, quality, interlace)
if int(err) != 0 {
return nil, catchVipsError()
}
defer C.g_free(C.gpointer(ptr))
defer C.vips_error_clear()
return C.GoBytes(ptr, C.int(length)), nil
}
func vipsExtract(image *C.VipsImage, left, top, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > MaxSize || height > MaxSize {
return nil, errors.New("Maximum image size exceeded")
}
top, left = max(top), max(left)
err := C.vips_extract_area_bridge(image, &buf, C.int(left), C.int(top), C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsSmartCrop(image *C.VipsImage, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > MaxSize || height > MaxSize {
return nil, errors.New("Maximum image size exceeded")
}
err := C.vips_smartcrop_bridge(image, &buf, C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsTrim(image *C.VipsImage) (int, int, int, int, error) {
defer C.g_object_unref(C.gpointer(image))
top := C.int(0)
left := C.int(0)
width := C.int(0)
height := C.int(0)
err := C.vips_find_trim_bridge(image, &top, &left, &width, &height)
if err != 0 {
return 0, 0, 0, 0, catchVipsError()
}
return int(top), int(left), int(width), int(height), nil
}
func vipsShrinkJpeg(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
var ptr = unsafe.Pointer(&buf[0])
defer C.g_object_unref(C.gpointer(input))
err := C.vips_jpegload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsShrink(input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_shrink_bridge(input, &image, C.double(float64(shrink)), C.double(float64(shrink)))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsReduce(input *C.VipsImage, xshrink float64, yshrink float64) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_reduce_bridge(input, &image, C.double(xshrink), C.double(yshrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsEmbed(input *C.VipsImage, left, top, width, height int, extend Extend, background Color) (*C.VipsImage, error) {
var image *C.VipsImage
// Max extend value, see: http://www.vips.ecs.soton.ac.uk/supported/8.4/doc/html/libvips/libvips-conversion.html#VipsExtend
if extend > 5 {
extend = ExtendBackground
}
defer C.g_object_unref(C.gpointer(input))
err := C.vips_embed_bridge(input, &image, C.int(left), C.int(top), C.int(width),
C.int(height), C.int(extend), C.double(background.R), C.double(background.G), C.double(background.B))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsAffine(input *C.VipsImage, residualx, residualy float64, i Interpolator) (*C.VipsImage, error) {
var image *C.VipsImage
cstring := C.CString(i.String())
interpolator := C.vips_interpolate_new(cstring)
defer C.free(unsafe.Pointer(cstring))
defer C.g_object_unref(C.gpointer(input))
defer C.g_object_unref(C.gpointer(interpolator))
err := C.vips_affine_interpolator(input, &image, C.double(residualx), 0, 0, C.double(residualy), interpolator)
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsImageType(buf []byte) ImageType {
if len(buf) < 12 {
return UNKNOWN
}
if buf[0] == 0xFF && buf[1] == 0xD8 && buf[2] == 0xFF {
return JPEG
}
if IsTypeSupported(GIF) && buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46 {
return GIF
}
if buf[0] == 0x89 && buf[1] == 0x50 && buf[2] == 0x4E && buf[3] == 0x47 {
return PNG
}
if IsTypeSupported(TIFF) &&
((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) ||
(buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) {
return TIFF
}
if IsTypeSupported(PDF) && buf[0] == 0x25 && buf[1] == 0x50 && buf[2] == 0x44 && buf[3] == 0x46 {
return PDF
}
if IsTypeSupported(WEBP) && buf[8] == 0x57 && buf[9] == 0x45 && buf[10] == 0x42 && buf[11] == 0x50 {
return WEBP
}
if IsTypeSupported(SVG) && IsSVGImage(buf) {
return SVG
}
if IsTypeSupported(MAGICK) && strings.HasSuffix(readImageType(buf), "MagickBuffer") {
return MAGICK
}
return UNKNOWN
}
func readImageType(buf []byte) string {
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
load := C.vips_foreign_find_load_buffer(imageBuf, length)
return C.GoString(load)
}
func catchVipsError() error {
s := C.GoString(C.vips_error_buffer())
C.vips_error_clear()
C.vips_thread_shutdown()
return errors.New(s)
}
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
func vipsGaussianBlur(image *C.VipsImage, o GaussianBlur) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_gaussblur_bridge(image, &out, C.double(o.Sigma), C.double(o.MinAmpl))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsSharpen(image *C.VipsImage, o Sharpen) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_sharpen_bridge(image, &out, C.int(o.Radius), C.double(o.X1), C.double(o.Y2), C.double(o.Y3), C.double(o.M1), C.double(o.M2))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func max(x int) int {
return int(math.Max(float64(x), 0))
}
func vipsDrawWatermark(image *C.VipsImage, o WatermarkImage) (*C.VipsImage, error) {
var out *C.VipsImage
watermark, _, e := vipsRead(o.Buf)
if e != nil {
return nil, e
}
opts := vipsWatermarkImageOptions{C.int(o.Left), C.int(o.Top), C.float(o.Opacity)}
err := C.vips_watermark_image(image, watermark, &out, (*C.WatermarkImageOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
| [
"\"VIPS_CONCURRENCY\"",
"\"VIPS_TRACE\""
] | [] | [
"VIPS_TRACE",
"VIPS_CONCURRENCY"
] | [] | ["VIPS_TRACE", "VIPS_CONCURRENCY"] | go | 2 | 0 | |
tests/models/test_amp.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import torch
from torch import optim
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import SLURMEnvironment
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class AMPTestModel(BoringModel):
def _step(self, batch, batch_idx):
assert torch.is_autocast_enabled()
output = self(batch)
bfloat16 = self.trainer.precision_plugin.is_bfloat16
assert output.dtype == torch.float16 if not bfloat16 else torch.bfloat16
loss = self.loss(batch, output)
return loss
def training_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"loss": output}
def validation_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"x": output}
def test_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"y": output}
def predict(self, batch, batch_idx, dataloader_idx=None):
assert torch.is_autocast_enabled()
output = self(batch)
bfloat16 = self.trainer.precision_plugin.is_bfloat16
assert output.dtype == torch.float16 if not bfloat16 else torch.bfloat16
return output
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
"accelerator",
[
pytest.param("dp", marks=pytest.mark.skip("dp + amp not supported currently")), # TODO
"ddp_spawn",
],
)
@pytest.mark.parametrize(
"precision",
[
16,
pytest.param(
"bf16",
marks=pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_10, reason="torch.bfloat16 not available"),
),
],
)
@pytest.mark.parametrize("gpus", [1, 2])
def test_amp_gpus(tmpdir, accelerator, precision, gpus):
"""Make sure combinations of AMP and training types work if supported."""
tutils.reset_seed()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, gpus=gpus, accelerator=accelerator, precision=precision)
model = AMPTestModel()
# tutils.run_model_test(trainer_options, model)
trainer.fit(model)
trainer.test(model)
trainer.predict(model, DataLoader(RandomDataset(32, 64)))
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "1",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0",
"SLURM_PROCID": "0",
},
)
def test_amp_gpu_ddp_slurm_managed(tmpdir):
"""Make sure DDP + AMP work."""
# simulate setting slurm flags
tutils.set_random_master_port()
model = AMPTestModel()
# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gpus=[0],
accelerator="ddp_spawn",
precision=16,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
# correct result and ok accuracy
assert trainer.state.finished, "amp + ddp model failed to complete"
# test root model address
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc") == "abc"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23]") == "abc23"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24]") == "abc23"
generated = trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24, 45-40, 40]")
assert generated == "abc23"
@pytest.mark.skipif(torch.cuda.is_available(), reason="test is restricted only on CPU")
def test_cpu_model_with_amp(tmpdir):
"""Make sure model trains on CPU."""
with pytest.raises(MisconfigurationException, match="AMP is only available on GPU"):
Trainer(precision=16)
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_without_apex(bwd_mock, tmpdir):
"""Check that even with apex amp type without requesting precision=16 the amp backend is void."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, amp_backend="native")
assert trainer.amp_backend is None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, amp_backend="apex")
assert trainer.amp_backend is None
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert not bwd_mock.called
@RunIf(min_gpus=1, amp_apex=True)
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_with_apex(bwd_mock, tmpdir):
"""Check calling apex scaling in training."""
class CustomModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=0.01)
optimizer2 = optim.SGD(self.parameters(), lr=0.01)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = CustomModel()
model.training_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, precision=16, amp_backend="apex", gpus=1)
assert str(trainer.amp_backend) == "AMPType.APEX"
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert bwd_mock.call_count == 10
assert isinstance(trainer.lr_schedulers[0]["scheduler"].optimizer, optim.Adam)
assert isinstance(trainer.lr_schedulers[1]["scheduler"].optimizer, optim.SGD)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/testcases/CWE190_Integer_Overflow/s06/CWE190_Integer_Overflow__int_Environment_postinc_11.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE190_Integer_Overflow__int_Environment_postinc_11.java
Label Definition File: CWE190_Integer_Overflow__int.label.xml
Template File: sources-sinks-11.tmpl.java
*/
/*
* @description
* CWE: 190 Integer Overflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: increment
* GoodSink: Ensure there will not be an overflow before incrementing data
* BadSink : Increment data, which can cause an overflow
* Flow Variant: 11 Control flow: if(IO.staticReturnsTrue()) and if(IO.staticReturnsFalse())
*
* */
package testcases.CWE190_Integer_Overflow.s06;
import testcasesupport.*;
import javax.servlet.http.*;
import java.util.logging.Level;
public class CWE190_Integer_Overflow__int_Environment_postinc_11 extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if(IO.staticReturnsTrue())
{
/* POTENTIAL FLAW: if data == Integer.MAX_VALUE, this will overflow */
data++;
int result = (int)(data);
IO.writeLine("result: " + result);
}
}
/* goodG2B1() - use goodsource and badsink by changing first IO.staticReturnsTrue() to IO.staticReturnsFalse() */
private void goodG2B1() throws Throwable
{
int data;
if (IO.staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
else
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
if (IO.staticReturnsTrue())
{
/* POTENTIAL FLAW: if data == Integer.MAX_VALUE, this will overflow */
data++;
int result = (int)(data);
IO.writeLine("result: " + result);
}
}
/* goodG2B2() - use goodsource and badsink by reversing statements in first if */
private void goodG2B2() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (IO.staticReturnsTrue())
{
/* POTENTIAL FLAW: if data == Integer.MAX_VALUE, this will overflow */
data++;
int result = (int)(data);
IO.writeLine("result: " + result);
}
}
/* goodB2G1() - use badsource and goodsink by changing second IO.staticReturnsTrue() to IO.staticReturnsFalse() */
private void goodB2G1() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (IO.staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
IO.writeLine("Benign, fixed string");
}
else
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data < Integer.MAX_VALUE)
{
data++;
int result = (int)(data);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too large to increment.");
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing statements in second if */
private void goodB2G2() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (IO.staticReturnsTrue())
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data < Integer.MAX_VALUE)
{
data++;
int result = (int)(data);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too large to increment.");
}
}
}
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
goodB2G1();
goodB2G2();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\"",
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
kilo.go | package kilo
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"os"
"strings"
"syscall"
"time"
"unicode"
"unsafe"
)
/*** defines ***/
const KILO_VERSION = "0.0.1"
const KILO_TAB_STOP = 8
const KILO_QUIT_TIMES = 3
const (
BACKSPACE = 127
ARROW_LEFT = 1000 + iota
ARROW_RIGHT = 1000 + iota
ARROW_UP = 1000 + iota
ARROW_DOWN = 1000 + iota
DEL_KEY = 1000 + iota
HOME_KEY = 1000 + iota
END_KEY = 1000 + iota
PAGE_UP = 1000 + iota
PAGE_DOWN = 1000 + iota
)
const (
HL_NORMAL = 0
HL_COMMENT = iota
HL_MLCOMMENT = iota
HL_KEYWORD1 = iota
HL_KEYWORD2 = iota
HL_STRING = iota
HL_NUMBER = iota
HL_MATCH = iota
)
const (
HL_HIGHLIGHT_NUMBERS = 1 << 0
HL_HIGHLIGHT_STRINGS = 1 << iota
)
/*** data ***/
type editorSyntax struct {
filetype string
filematch []string
keywords []string
singleLineCommentStart []byte
multiLineCommentStart []byte
multiLineCommentEnd []byte
flags int
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
type erow struct {
idx int
size int
rsize int
chars []byte
render []byte
hl []byte
hlOpenComment bool
}
type editorConfig struct {
cx int
cy int
rx int
rowoff int
coloff int
screenRows int
screenCols int
numRows int
rows []erow
dirty bool
filename string
statusmsg string
statusmsg_time time.Time
syntax *editorSyntax
origTermios *Termios
}
type WinSize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
var E editorConfig
/*** filetypes ***/
var HLDB []editorSyntax = []editorSyntax{
editorSyntax{
filetype: "c",
filematch: []string{".c", ".h", ".cpp"},
keywords: []string{"switch", "if", "while", "for",
"break", "continue", "return", "else", "struct",
"union", "typedef", "static", "enum", "class", "case",
"int|", "long|", "double|", "float|", "char|",
"unsigned|", "signed|", "void|",
},
singleLineCommentStart: []byte{'/', '/'},
multiLineCommentStart: []byte{'/', '*'},
multiLineCommentEnd: []byte{'*', '/'},
flags: HL_HIGHLIGHT_NUMBERS | HL_HIGHLIGHT_STRINGS,
},
}
/*** terminal ***/
func die(err error) {
DisableRawMode()
io.WriteString(os.Stdout, "\x1b[2J")
io.WriteString(os.Stdout, "\x1b[H")
log.Fatal(err)
}
func TcSetAttr(fd uintptr, termios *Termios) error {
// TCSETS+1 == TCSETSW, because TCSAFLUSH doesn't exist
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TCSETS+1), uintptr(unsafe.Pointer(termios))); err != 0 {
return err
}
return nil
}
func TcGetAttr(fd uintptr) *Termios {
var termios = &Termios{}
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, syscall.TCGETS, uintptr(unsafe.Pointer(termios))); err != 0 {
log.Fatalf("Problem getting terminal attributes: %s\n", err)
}
return termios
}
func EnableRawMode() {
E.origTermios = TcGetAttr(os.Stdin.Fd())
var raw Termios
raw = *E.origTermios
raw.Iflag &^= syscall.BRKINT | syscall.ICRNL | syscall.INPCK | syscall.ISTRIP | syscall.IXON
raw.Oflag &^= syscall.OPOST
raw.Cflag |= syscall.CS8
raw.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.IEXTEN | syscall.ISIG
raw.Cc[syscall.VMIN+1] = 0
raw.Cc[syscall.VTIME+1] = 1
if e := TcSetAttr(os.Stdin.Fd(), &raw); e != nil {
log.Fatalf("Problem enabling raw mode: %s\n", e)
}
}
func DisableRawMode() {
if e := TcSetAttr(os.Stdin.Fd(), E.origTermios); e != nil {
log.Fatalf("Problem disabling raw mode: %s\n", e)
}
}
func editorReadKey() int {
var buffer [1]byte
var cc int
var err error
for cc, err = os.Stdin.Read(buffer[:]); cc != 1; cc, err = os.Stdin.Read(buffer[:]) {
}
if err != nil {
die(err)
}
if buffer[0] == '\x1b' {
var seq [2]byte
if cc, _ = os.Stdin.Read(seq[:]); cc != 2 {
return '\x1b'
}
if seq[0] == '[' {
if seq[1] >= '0' && seq[1] <= '9' {
if cc, err = os.Stdin.Read(buffer[:]); cc != 1 {
return '\x1b'
}
if buffer[0] == '~' {
switch seq[1] {
case '1':
return HOME_KEY
case '3':
return DEL_KEY
case '4':
return END_KEY
case '5':
return PAGE_UP
case '6':
return PAGE_DOWN
case '7':
return HOME_KEY
case '8':
return END_KEY
}
}
// XXX - what happens here?
} else {
switch seq[1] {
case 'A':
return ARROW_UP
case 'B':
return ARROW_DOWN
case 'C':
return ARROW_RIGHT
case 'D':
return ARROW_LEFT
case 'H':
return HOME_KEY
case 'F':
return END_KEY
}
}
} else if seq[0] == '0' {
switch seq[1] {
case 'H':
return HOME_KEY
case 'F':
return END_KEY
}
}
return '\x1b'
}
return int(buffer[0])
}
func getCursorPosition(rows *int, cols *int) int {
io.WriteString(os.Stdout, "\x1b[6n")
var buffer [1]byte
var buf []byte
var cc int
for cc, _ = os.Stdin.Read(buffer[:]); cc == 1; cc, _ = os.Stdin.Read(buffer[:]) {
if buffer[0] == 'R' {
break
}
buf = append(buf, buffer[0])
}
if string(buf[0:2]) != "\x1b[" {
log.Printf("Failed to read rows;cols from tty\n")
return -1
}
if n, e := fmt.Sscanf(string(buf[2:]), "%d;%d", rows, cols); n != 2 || e != nil {
if e != nil {
log.Printf("getCursorPosition: fmt.Sscanf() failed: %s\n", e)
}
if n != 2 {
log.Printf("getCursorPosition: got %d items, wanted 2\n", n)
}
return -1
}
return 0
}
func getWindowSize(rows *int, cols *int) int {
var w WinSize
_, _, err := syscall.Syscall(syscall.SYS_IOCTL,
os.Stdout.Fd(),
syscall.TIOCGWINSZ,
uintptr(unsafe.Pointer(&w)),
)
if err != 0 { // type syscall.Errno
io.WriteString(os.Stdout, "\x1b[999C\x1b[999B")
return getCursorPosition(rows, cols)
} else {
*rows = int(w.Row)
*cols = int(w.Col)
return 0
}
return -1
}
/*** syntax hightlighting ***/
var separators []byte = []byte(",.()+-/*=~%<>[]; \t\n\r")
func isSeparator(c byte) bool {
if bytes.IndexByte(separators, c) >= 0 {
return true
}
return false
}
func editorUpdateSyntax(row *erow) {
row.hl = make([]byte, row.rsize)
if E.syntax == nil {
return
}
keywords := E.syntax.keywords[:]
scs := E.syntax.singleLineCommentStart
mcs := E.syntax.multiLineCommentStart
mce := E.syntax.multiLineCommentEnd
prevSep := true
inComment := row.idx > 0 && E.rows[row.idx-1].hlOpenComment
var inString byte = 0
var skip = 0
for i, c := range row.render {
if skip > 0 {
skip--
continue
}
if inString == 0 && len(scs) > 0 && !inComment {
if bytes.HasPrefix(row.render[i:], scs) {
for j := i; j < row.rsize; j++ {
row.hl[j] = HL_COMMENT
}
break
}
}
if inString == 0 && len(mcs) > 0 && len(mce) > 0 {
if inComment {
row.hl[i] = HL_MLCOMMENT
if bytes.HasPrefix(row.render[i:], mce) {
for l := i; l < i+len(mce); l++ {
row.hl[l] = HL_MLCOMMENT
}
skip = len(mce)
inComment = false
prevSep = true
}
continue
} else if bytes.HasPrefix(row.render[i:], mcs) {
for l := i; l < i+len(mcs); l++ {
row.hl[l] = HL_MLCOMMENT
}
inComment = true
skip = len(mcs)
}
}
var prevHl byte = HL_NORMAL
if i > 0 {
prevHl = row.hl[i-1]
}
if (E.syntax.flags & HL_HIGHLIGHT_STRINGS) == HL_HIGHLIGHT_STRINGS {
if inString != 0 {
row.hl[i] = HL_STRING
if c == '\\' && i+1 < row.rsize {
row.hl[i+1] = HL_STRING
skip = 1
continue
}
if c == inString {
inString = 0
}
prevSep = true
continue
} else {
if c == '"' || c == '\'' {
inString = c
row.hl[i] = HL_STRING
continue
}
}
}
if (E.syntax.flags & HL_HIGHLIGHT_NUMBERS) == HL_HIGHLIGHT_NUMBERS {
if unicode.IsDigit(rune(c)) &&
(prevSep || prevHl == HL_NUMBER) ||
(c == '.' && prevHl == HL_NUMBER) {
row.hl[i] = HL_NUMBER
prevSep = false
continue
}
}
if prevSep {
var j int
var skw string
for j, skw = range keywords {
kw := []byte(skw)
var color byte = HL_KEYWORD1
idx := bytes.LastIndexByte(kw, '|')
if idx > 0 {
kw = kw[:idx]
color = HL_KEYWORD2
}
klen := len(kw)
if bytes.HasPrefix(row.render[i:], kw) &&
(len(row.render[i:]) == klen ||
isSeparator(row.render[i+klen])) {
for l := i; l < i+klen; l++ {
row.hl[l] = color
}
skip = klen - 1
break
}
}
if j < len(keywords)-1 {
prevSep = false
continue
}
}
prevSep = isSeparator(c)
}
changed := row.hlOpenComment != inComment
row.hlOpenComment = inComment
if changed && row.idx+1 < E.numRows {
editorUpdateSyntax(&E.rows[row.idx+1])
}
}
func editorSyntaxToColor(hl byte) int {
switch hl {
case HL_COMMENT, HL_MLCOMMENT:
return 36
case HL_KEYWORD1:
return 32
case HL_KEYWORD2:
return 33
case HL_STRING:
return 35
case HL_NUMBER:
return 31
case HL_MATCH:
return 34
}
return 37
}
func editorSelectSyntaxHighlight() {
if E.filename == "" {
return
}
for _, s := range HLDB {
for _, suffix := range s.filematch {
if strings.HasSuffix(E.filename, suffix) {
E.syntax = &s
return
}
}
}
}
/*** row operations ***/
func editorRowCxToRx(row *erow, cx int) int {
rx := 0
for j := 0; j < row.size && j < cx; j++ {
if row.chars[j] == '\t' {
rx += ((KILO_TAB_STOP - 1) - (rx % KILO_TAB_STOP))
}
rx++
}
return rx
}
func editorRowRxToCx(row *erow, rx int) int {
curRx := 0
var cx int
for cx = 0; cx < row.size; cx++ {
if row.chars[cx] == '\t' {
curRx += (KILO_TAB_STOP - 1) - (curRx % KILO_TAB_STOP)
}
curRx++
if curRx > rx {
break
}
}
return cx
}
func editorUpdateRow(row *erow) {
tabs := 0
for _, c := range row.chars {
if c == '\t' {
tabs++
}
}
row.render = make([]byte, row.size+tabs*(KILO_TAB_STOP-1))
idx := 0
for _, c := range row.chars {
if c == '\t' {
row.render[idx] = ' '
idx++
for (idx % KILO_TAB_STOP) != 0 {
row.render[idx] = ' '
idx++
}
} else {
row.render[idx] = c
idx++
}
}
row.rsize = idx
editorUpdateSyntax(row)
}
func editorInsertRow(at int, s []byte) {
if at < 0 || at > E.numRows {
return
}
var r erow
r.chars = s
r.size = len(s)
r.idx = at
if at == 0 {
t := make([]erow, 1)
t[0] = r
E.rows = append(t, E.rows...)
} else if at == E.numRows {
E.rows = append(E.rows, r)
} else {
t := make([]erow, 1)
t[0] = r
E.rows = append(E.rows[:at], append(t, E.rows[at:]...)...)
}
for j := at + 1; j <= E.numRows; j++ {
E.rows[j].idx++
}
editorUpdateRow(&E.rows[at])
E.numRows++
E.dirty = true
}
func editorDelRow(at int) {
if at < 0 || at > E.numRows {
return
}
E.rows = append(E.rows[:at], E.rows[at+1:]...)
E.numRows--
E.dirty = true
for j := at; j < E.numRows; j++ {
E.rows[j].idx--
}
}
func editorRowInsertChar(row *erow, at int, c byte) {
if at < 0 || at > row.size {
row.chars = append(row.chars, c)
} else if at == 0 {
t := make([]byte, row.size+1)
t[0] = c
copy(t[1:], row.chars)
row.chars = t
} else {
row.chars = append(
row.chars[:at],
append(append(make([]byte, 0), c), row.chars[at:]...)...,
)
}
row.size = len(row.chars)
editorUpdateRow(row)
E.dirty = true
}
func editorRowAppendString(row *erow, s []byte) {
row.chars = append(row.chars, s...)
row.size = len(row.chars)
editorUpdateRow(row)
E.dirty = true
}
func editorRowDelChar(row *erow, at int) {
if at < 0 || at > row.size {
return
}
row.chars = append(row.chars[:at], row.chars[at+1:]...)
row.size--
E.dirty = true
editorUpdateRow(row)
}
/*** editor operations ***/
func editorInsertChar(c byte) {
if E.cy == E.numRows {
var emptyRow []byte
editorInsertRow(E.numRows, emptyRow)
}
editorRowInsertChar(&E.rows[E.cy], E.cx, c)
E.cx++
}
func editorInsertNewLine() {
if E.cx == 0 {
editorInsertRow(E.cy, make([]byte, 0))
} else {
editorInsertRow(E.cy+1, E.rows[E.cy].chars[E.cx:])
E.rows[E.cy].chars = E.rows[E.cy].chars[:E.cx]
E.rows[E.cy].size = len(E.rows[E.cy].chars)
editorUpdateRow(&E.rows[E.cy])
}
E.cy++
E.cx = 0
}
func editorDelChar() {
if E.cy == E.numRows {
return
}
if E.cx == 0 && E.cy == 0 {
return
}
if E.cx > 0 {
editorRowDelChar(&E.rows[E.cy], E.cx-1)
E.cx--
} else {
E.cx = E.rows[E.cy-1].size
editorRowAppendString(&E.rows[E.cy-1], E.rows[E.cy].chars)
editorDelRow(E.cy)
E.cy--
}
}
/*** file I/O ***/
func editorRowsToString() (string, int) {
totlen := 0
buf := ""
for _, row := range E.rows {
totlen += row.size + 1
buf += string(row.chars) + "\n"
}
return buf, totlen
}
func EditorOpen(filename string) {
E.filename = filename
editorSelectSyntaxHighlight()
fd, err := os.Open(filename)
if err != nil {
die(err)
}
defer fd.Close()
fp := bufio.NewReader(fd)
for line, err := fp.ReadBytes('\n'); err == nil; line, err = fp.ReadBytes('\n') {
// Trim trailing newlines and carriage returns
for c := line[len(line)-1]; len(line) > 0 && (c == '\n' || c == '\r'); {
line = line[:len(line)-1]
if len(line) > 0 {
c = line[len(line)-1]
}
}
editorInsertRow(E.numRows, line)
}
if err != nil && err != io.EOF {
die(err)
}
E.dirty = false
}
func editorSave() {
if E.filename == "" {
E.filename = editorPrompt("Save as: %q", nil)
if E.filename == "" {
EditorSetStatusMessage("Save aborted")
return
}
editorSelectSyntaxHighlight()
}
buf, len := editorRowsToString()
fp, e := os.OpenFile(E.filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if e != nil {
EditorSetStatusMessage("Can't save! file open error %s", e)
return
}
defer fp.Close()
n, err := io.WriteString(fp, buf)
if err == nil {
if n == len {
E.dirty = false
EditorSetStatusMessage("%d bytes written to disk", len)
} else {
EditorSetStatusMessage(fmt.Sprintf("wanted to write %d bytes to file, wrote %d", len, n))
}
return
}
EditorSetStatusMessage("Can't save! I/O error %s", err)
}
/*** find ***/
var lastMatch int = -1
var direction int = 1
var savedHlLine int
var savedHl []byte
func editorFindCallback(qry []byte, key int) {
if savedHlLine > 0 {
copy(E.rows[savedHlLine].hl, savedHl)
savedHlLine = 0
savedHl = nil
}
if key == '\r' || key == '\x1b' {
lastMatch = -1
direction = 1
return
} else if key == ARROW_RIGHT || key == ARROW_DOWN {
direction = 1
} else if key == ARROW_LEFT || key == ARROW_UP {
direction = -1
} else {
lastMatch = -1
direction = 1
}
if lastMatch == -1 {
direction = 1
}
current := lastMatch
for _ = range E.rows {
current += direction
if current == -1 {
current = E.numRows - 1
} else if current == E.numRows {
current = 0
}
row := &E.rows[current]
x := bytes.Index(row.render, qry)
if x > -1 {
lastMatch = current
E.cy = current
E.cx = editorRowRxToCx(row, x)
E.rowoff = E.numRows
savedHlLine = current
savedHl = make([]byte, row.rsize)
copy(savedHl, row.hl)
max := x + len(qry)
for i := x; i < max; i++ {
row.hl[i] = HL_MATCH
}
break
}
}
}
func editorFind() {
savedCx := E.cx
savedCy := E.cy
savedColoff := E.coloff
savedRowoff := E.rowoff
query := editorPrompt("Search: %s (ESC/Arrows/Enter)",
editorFindCallback)
if query == "" {
E.cx = savedCx
E.cy = savedCy
E.coloff = savedColoff
E.rowoff = savedRowoff
}
}
/*** input ***/
func editorPrompt(prompt string, callback func([]byte, int)) string {
var buf []byte
for {
EditorSetStatusMessage(prompt, buf)
EditorRefreshScreen()
c := editorReadKey()
if c == DEL_KEY || c == ('h'&0x1f) || c == BACKSPACE {
if len(buf) > 0 {
buf = buf[:len(buf)-1]
}
} else if c == '\x1b' {
EditorSetStatusMessage("")
if callback != nil {
callback(buf, c)
}
return ""
} else if c == '\r' {
if len(buf) != 0 {
EditorSetStatusMessage("")
if callback != nil {
callback(buf, c)
}
return string(buf)
}
} else {
if unicode.IsPrint(rune(c)) {
buf = append(buf, byte(c))
}
}
if callback != nil {
callback(buf, c)
}
}
}
func editorMoveCursor(key int) {
switch key {
case ARROW_LEFT:
if E.cx != 0 {
E.cx--
} else if E.cy > 0 {
E.cy--
E.cx = E.rows[E.cy].size
}
case ARROW_RIGHT:
if E.cy < E.numRows {
if E.cx < E.rows[E.cy].size {
E.cx++
} else if E.cx == E.rows[E.cy].size {
E.cy++
E.cx = 0
}
}
case ARROW_UP:
if E.cy != 0 {
E.cy--
}
case ARROW_DOWN:
if E.cy < E.numRows {
E.cy++
}
}
rowlen := 0
if E.cy < E.numRows {
rowlen = E.rows[E.cy].size
}
if E.cx > rowlen {
E.cx = rowlen
}
}
var quitTimes int = KILO_QUIT_TIMES
func EditorProcessKeypress() {
c := editorReadKey()
switch c {
case '\r':
editorInsertNewLine()
break
case ('q' & 0x1f):
if E.dirty && quitTimes > 0 {
EditorSetStatusMessage("Warning!!! File has unsaved changes. Press Ctrl-Q %d more times to quit.", quitTimes)
quitTimes--
return
}
io.WriteString(os.Stdout, "\x1b[2J")
io.WriteString(os.Stdout, "\x1b[H")
DisableRawMode()
os.Exit(0)
case ('s' & 0x1f):
editorSave()
case HOME_KEY:
E.cx = 0
case END_KEY:
if E.cy < E.numRows {
E.cx = E.rows[E.cy].size
}
case ('f' & 0x1f):
editorFind()
case ('h' & 0x1f), BACKSPACE, DEL_KEY:
if c == DEL_KEY {
editorMoveCursor(ARROW_RIGHT)
}
editorDelChar()
break
case PAGE_UP, PAGE_DOWN:
dir := ARROW_DOWN
if c == PAGE_UP {
E.cy = E.rowoff
dir = ARROW_UP
} else {
E.cy = E.rowoff + E.screenRows - 1
if E.cy > E.numRows {
E.cy = E.numRows
}
}
for times := E.screenRows; times > 0; times-- {
editorMoveCursor(dir)
}
case ARROW_UP, ARROW_DOWN, ARROW_LEFT, ARROW_RIGHT:
editorMoveCursor(c)
case ('l' & 0x1f):
break
case '\x1b':
break
default:
editorInsertChar(byte(c))
}
quitTimes = KILO_QUIT_TIMES
}
/*** output ***/
func editorScroll() {
E.rx = 0
if E.cy < E.numRows {
E.rx = editorRowCxToRx(&(E.rows[E.cy]), E.cx)
}
if E.cy < E.rowoff {
E.rowoff = E.cy
}
if E.cy >= E.rowoff+E.screenRows {
E.rowoff = E.cy - E.screenRows + 1
}
if E.rx < E.coloff {
E.coloff = E.rx
}
if E.rx >= E.coloff+E.screenCols {
E.coloff = E.rx - E.screenCols + 1
}
}
func EditorRefreshScreen() {
editorScroll()
ab := bytes.NewBufferString("\x1b[25l")
ab.WriteString("\x1b[H")
editorDrawRows(ab)
editorDrawStatusBar(ab)
editorDrawMessageBar(ab)
ab.WriteString(fmt.Sprintf("\x1b[%d;%dH", (E.cy-E.rowoff)+1, (E.rx-E.coloff)+1))
ab.WriteString("\x1b[?25h")
_, e := ab.WriteTo(os.Stdout)
if e != nil {
log.Fatal(e)
}
}
func editorDrawRows(ab *bytes.Buffer) {
for y := 0; y < E.screenRows; y++ {
filerow := y + E.rowoff
if filerow >= E.numRows {
if E.numRows == 0 && y == E.screenRows/3 {
w := fmt.Sprintf("Kilo editor -- version %s", KILO_VERSION)
if len(w) > E.screenCols {
w = w[0:E.screenCols]
}
pad := "~ "
for padding := (E.screenCols - len(w)) / 2; padding > 0; padding-- {
ab.WriteString(pad)
pad = " "
}
ab.WriteString(w)
} else {
ab.WriteString("~")
}
} else {
len := E.rows[filerow].rsize - E.coloff
if len < 0 {
len = 0
}
if len > 0 {
if len > E.screenCols {
len = E.screenCols
}
rindex := E.coloff + len
hl := E.rows[filerow].hl[E.coloff:rindex]
currentColor := -1
for j, c := range E.rows[filerow].render[E.coloff:rindex] {
if unicode.IsControl(rune(c)) {
ab.WriteString("\x1b[7m")
if c < 26 {
ab.WriteString("@")
} else {
ab.WriteString("?")
}
ab.WriteString("\x1b[m")
if currentColor != -1 {
ab.WriteString(fmt.Sprintf("\x1b[%dm", currentColor))
}
} else if hl[j] == HL_NORMAL {
if currentColor != -1 {
ab.WriteString("\x1b[39m")
currentColor = -1
}
ab.WriteByte(c)
} else {
color := editorSyntaxToColor(hl[j])
if color != currentColor {
currentColor = color
buf := fmt.Sprintf("\x1b[%dm", color)
ab.WriteString(buf)
}
ab.WriteByte(c)
}
}
ab.WriteString("\x1b[39m")
}
}
ab.WriteString("\x1b[K")
ab.WriteString("\r\n")
}
}
func editorDrawStatusBar(ab *bytes.Buffer) {
ab.WriteString("\x1b[7m")
fname := E.filename
if fname == "" {
fname = "[No Name]"
}
modified := ""
if E.dirty {
modified = "(modified)"
}
status := fmt.Sprintf("%.20s - %d lines %s", fname, E.numRows, modified)
ln := len(status)
if ln > E.screenCols {
ln = E.screenCols
}
filetype := "no ft"
if E.syntax != nil {
filetype = E.syntax.filetype
}
rstatus := fmt.Sprintf("%s | %d/%d", filetype, E.cy+1, E.numRows)
rlen := len(rstatus)
ab.WriteString(status[:ln])
for ln < E.screenCols {
if E.screenCols-ln == rlen {
ab.WriteString(rstatus)
break
} else {
ab.WriteString(" ")
ln++
}
}
ab.WriteString("\x1b[m")
ab.WriteString("\r\n")
}
func editorDrawMessageBar(ab *bytes.Buffer) {
ab.WriteString("\x1b[K")
msglen := len(E.statusmsg)
if msglen > E.screenCols {
msglen = E.screenCols
}
if msglen > 0 && (time.Now().Sub(E.statusmsg_time) < 5*time.Second) {
ab.WriteString(E.statusmsg)
}
}
func EditorSetStatusMessage(args ...interface{}) {
E.statusmsg = fmt.Sprintf(args[0].(string), args[1:]...)
E.statusmsg_time = time.Now()
}
/*** init ***/
func InitEditor() {
// Initialization a la C not necessary.
if getWindowSize(&E.screenRows, &E.screenCols) == -1 {
die(fmt.Errorf("couldn't get screen size"))
}
E.screenRows -= 2
}
| [] | [] | [] | [] | [] | go | null | null | null |
cmd/chartsvc/main.go | /*
Copyright (c) 2017 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/heptiolabs/healthcheck"
"github.com/kubeapps/common/datastore"
log "github.com/sirupsen/logrus"
"github.com/urfave/negroni"
)
const pathPrefix = "/v1"
var dbSession datastore.Session
func setupRoutes() http.Handler {
r := mux.NewRouter()
// Healthcheck
health := healthcheck.NewHandler()
r.Handle("/live", health)
r.Handle("/ready", health)
// Routes
apiv1 := r.PathPrefix(pathPrefix).Subrouter()
apiv1.Methods("GET").Path("/charts").HandlerFunc(listCharts)
apiv1.Methods("GET").Path("/charts/{repo}").Handler(WithParams(listRepoCharts))
apiv1.Methods("GET").Path("/charts/{repo}/{chartName}").Handler(WithParams(getChart))
apiv1.Methods("GET").Path("/charts/{repo}/{chartName}/versions").Handler(WithParams(listChartVersions))
apiv1.Methods("GET").Path("/charts/{repo}/{chartName}/versions/{version}").Handler(WithParams(getChartVersion))
apiv1.Methods("GET").Path("/assets/{repo}/{chartName}/logo-160x160-fit.png").Handler(WithParams(getChartIcon))
apiv1.Methods("GET").Path("/assets/{repo}/{chartName}/versions/{version}/README.md").Handler(WithParams(getChartVersionReadme))
apiv1.Methods("GET").Path("/assets/{repo}/{chartName}/versions/{version}/values.yaml").Handler(WithParams(getChartVersionValues))
n := negroni.Classic()
n.UseHandler(r)
return n
}
func main() {
dbURL := flag.String("mongo-url", "localhost", "MongoDB URL (see https://godoc.org/labix.org/v2/mgo#Dial for format)")
dbName := flag.String("mongo-database", "charts", "MongoDB database")
dbUsername := flag.String("mongo-user", "", "MongoDB user")
dbPassword := os.Getenv("MONGO_PASSWORD")
flag.Parse()
mongoConfig := datastore.Config{URL: *dbURL, Database: *dbName, Username: *dbUsername, Password: dbPassword}
var err error
dbSession, err = datastore.NewSession(mongoConfig)
if err != nil {
log.WithFields(log.Fields{"host": *dbURL}).Fatal(err)
}
n := setupRoutes()
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
addr := ":" + port
log.WithFields(log.Fields{"addr": addr}).Info("Started RateSvc")
http.ListenAndServe(addr, n)
}
| [
"\"MONGO_PASSWORD\"",
"\"PORT\""
] | [] | [
"PORT",
"MONGO_PASSWORD"
] | [] | ["PORT", "MONGO_PASSWORD"] | go | 2 | 0 | |
AlarmeDeAcoes/AlarmeDeAcoes/wsgi.py | """
WSGI config for AlarmeDeAcoes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AlarmeDeAcoes.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
k8s-crd/sonar-operator/vendor/github.com/metral/memhog-operator/pkg/operator/controller.go | package operator
import (
"context"
"fmt"
"os"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"github.com/golang/glog"
"github.com/metral/memhog-operator/pkg/operator/crd"
"github.com/metral/memhog-operator/pkg/utils"
prometheusClient "github.com/prometheus/client_golang/api"
prometheus "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
)
// Implements an AppMonitor's controller loop in a particular namespace.
// The controller makes use of an Informer resource to locally cache resources
// managed, and handle events on the resources.
type AppMonitorController struct {
// Baseline kubeconfig to use when communicating with the API.
kubecfg *rest.Config
// Clientset that has a REST client for each k8s API group.
clientSet kubernetes.Interface
// APIExtensions Clientset that has a REST client for each k8s API group.
apiextensionsClientSet apiextensionsclient.Interface
// REST client for the AppMonitor resource k8s API group (since its not an
// official resource, there is no existing Clientset for it in k8s).
restClient rest.Interface
// Informer for all resources being watched by the operator.
informer *AppMonitorControllerInformer
// The namespace where the operator is running.
namespace string
// The address of the Prometheus service
// e.g. "http://prometheus.tectonic-system:9090"
prometheusAddr string
}
// Implements an Informer for the resources being operated on: Pods &
// AppMonitors.
type AppMonitorControllerInformer struct {
// Store & controller for Pod resources
podStore cache.Store
podController cache.Controller
// Store & controller for AppMonitor resources
appMonitorStore cache.Store
appMonitorController cache.Controller
}
// Create a new Controller for the AppMonitor operator
func NewAppMonitorController(kubeconfig, namespace, prometheusAddr string) (
*AppMonitorController, error) {
// Create the client config for use in creating the k8s API client
// Use kubeconfig if given, otherwise use in-cluster
kubecfg, err := utils.BuildKubeConfig(kubeconfig)
if err != nil {
return nil, err
}
// Create a new k8s API client from the kubeconfig
clientSet, err := kubernetes.NewForConfig(kubecfg)
if err != nil {
return nil, err
}
// Create a new k8s API client for API Extenstions from the kubeconfig
apiextensionsClientSet, err := apiextensionsclient.NewForConfig(kubecfg)
if err != nil {
return nil, err
}
// Create & register the AppMonitor resource as a CRD in the cluster, if it
// doesn't exist
kind := reflect.TypeOf(AppMonitor{}).Name()
glog.V(2).Infof("Registering CRD: %s.%s | version: %s", CRDName, Domain, Version)
_, err = crd.CreateCustomResourceDefinition(
apiextensionsClientSet,
CRDName,
Domain,
kind,
ResourceNamePlural,
Version,
)
if err != nil {
return nil, err
}
// Discover or set the namespace in which this controller is running in
if namespace == "" {
if namespace = os.Getenv("POD_NAMESPACE"); namespace == "" {
namespace = "default"
}
}
// Create a new k8s REST API client for AppMonitors
restClient, err := newAppMonitorClient(kubecfg, namespace)
if err != nil {
return nil, err
}
// Create new AppMonitorController
amc := &AppMonitorController{
kubecfg: kubecfg,
clientSet: clientSet,
apiextensionsClientSet: apiextensionsClientSet,
restClient: restClient,
namespace: namespace,
prometheusAddr: prometheusAddr,
}
// Create a new Informer for the AppMonitorController
amc.informer = amc.newAppMonitorControllerInformer()
return amc, nil
}
// Start the AppMonitorController until stopped.
func (amc *AppMonitorController) Start(stop <-chan struct{}) {
// Don't let panics crash the process
defer utilruntime.HandleCrash()
glog.V(2).Infof("Starting AppMonitor controller...")
amc.start(stop)
// Block until stopped
<-stop
}
// Start the controllers with the stop chan as required by Informers.
func (amc *AppMonitorController) start(stop <-chan struct{}) {
glog.V(2).Infof("Namespace: %s", amc.namespace)
// Run controller for Pod Informer and handle events via callbacks
go amc.informer.podController.Run(stop)
// Run controller for AppMonitor Informer and handle events via callbacks
go amc.informer.appMonitorController.Run(stop)
// Run the AppMonitorController
go amc.Run(stop)
}
// Informers are a combination of a local cache store to buffer the state of a
// given resource locally, and a controller to handle events through callbacks.
//
// Informers sync the APIServer's state of a resource with the local cache
// store.
// Creates a new Informer for the AppMonitorController.
// An AppMonitorController uses a set of Informers to watch and operate on
// Pods and AppMonitor resources in its control loop.
func (amc *AppMonitorController) newAppMonitorControllerInformer() *AppMonitorControllerInformer {
podStore, podController := amc.newPodInformer()
appMonitorStore, appMonitorController := amc.newAppMonitorInformer()
return &AppMonitorControllerInformer{
podStore: podStore,
podController: podController,
appMonitorStore: appMonitorStore,
appMonitorController: appMonitorController,
}
}
// Create a new Informer on the Pod resources in the cluster to track them.
func (amc *AppMonitorController) newPodInformer() (cache.Store, cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return amc.clientSet.CoreV1().Pods(amc.namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return amc.clientSet.CoreV1().Pods(amc.namespace).Watch(options)
},
},
// The resource that the informer returns
&v1.Pod{},
// The sync interval of the informer
5*time.Second,
// Callback functions for add, delete & update events
cache.ResourceEventHandlerFuncs{
// AddFunc: func(o interface{}) {}
UpdateFunc: amc.handlePodsUpdate,
// DeleteFunc: func(o interface{}) {}
},
)
}
//TODO: Run controller loop
// - Watch for Pods that contain the 'app-monitor.kubedemo.com: true'
// - Watch for any new AppMonitor resources in the current namespace.
// annotation to indicate that an AppMonitor should operate on it.
// - Poll annotated Pod's heap size rate for an interval of time from Prometheus
// - Compare Pod heap size to Pod memory limit using the memThresholdPercent
// Callback for updates to a Pod Informer
func (amc *AppMonitorController) handlePodsUpdate(oldObj, newObj interface{}) {
// Make a copy of the object, to not mutate the original object from the local
// cache store
pod, err := utils.CopyObjToPod(newObj)
if err != nil {
glog.Errorf("Failed to copy Pod object: %v", err)
return
}
if _, ok := pod.Annotations[AppMonitorAnnotation]; !ok {
return
}
glog.V(2).Infof("Received update for annotated Pod: %s | Annotations: %s", pod.Name, pod.Annotations)
}
// Create a new Informer on the AppMonitor resources in the cluster to
// track them.
func (amc *AppMonitorController) newAppMonitorInformer() (cache.Store, cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
// Retrieve an AppMonitorList from the API
result := &AppMonitorList{}
err := amc.restClient.Get().
Resource(ResourceNamePlural).
VersionedParams(&options, api.ParameterCodec).
Do().
Into(result)
return result, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Watch the AppMonitors in the API
return amc.restClient.Get().
Prefix("watch").
Namespace(amc.namespace).
Resource(ResourceNamePlural).
VersionedParams(&options, api.ParameterCodec).
Watch()
},
},
// The resource that the informer returns
&AppMonitor{},
// The sync interval of the informer
5*time.Second,
// Callback functions for add, delete & update events
cache.ResourceEventHandlerFuncs{
// AddFunc: func(o interface{}) {}
UpdateFunc: amc.handleAppMonitorsUpdate,
// DeleteFunc: func(o interface{}) {}
},
)
}
// Callback for updates to an AppMonitor Informer
func (amc *AppMonitorController) handleAppMonitorsUpdate(oldObj, newObj interface{}) {
// Make a copy of the object, to not mutate the original object from the local
// cache store
am, err := CopyObjToAppMonitor(newObj)
if err != nil {
glog.Errorf("Failed to copy AppMonitor object: %v", err)
return
}
glog.V(2).Infof("Received update for AppMonitor: %s | "+
"memThresholdPercent=%.2f | memMultiplier=%.2f",
am.Metadata.Name,
am.Spec.MemThresholdPercent,
am.Spec.MemMultiplier,
)
}
// Run begins the AppMonitorController.
func (amc *AppMonitorController) Run(stop <-chan struct{}) {
for {
select {
case <-stop:
glog.V(2).Infof("Shutting down AppMonitor controller...")
return
default:
amc.run()
time.Sleep(2 * time.Second)
}
}
}
func (amc *AppMonitorController) run() {
glog.V(2).Infof("In AppMonitorController loop...")
// #########################################################################
// Select the Pods annotated to be managed by an AppMonitor
// #########################################################################
// List the pods as a []interface{} from the local cache store, representing
// a PodList.
podListObj := amc.informer.podStore.List()
var err error
// Make a copy of the object, to not mutate the original object from the local
// cache store.
pods, err := utils.CopyObjToPods(podListObj)
if err != nil {
glog.Errorf("Failed to copy object into Pods: %v", err)
return
}
// Select Pods that are annotated to be monitored by an AppMonitor.
var annotatedPods []v1.Pod
annotatedPods, err = utils.SelectAnnotatedPods(pods, AppMonitorAnnotation)
if err != nil {
glog.Errorf("Failed to select annotated pods: %v", err)
return
}
// #########################################################################
// Select the AppMonitor for the Namespace
// #########################################################################
// Use the first AppMonitor found as the operator currently
// only supports a single AppMonitor per Namespace.
// TODO (fix): This arbitrarily selects the first AppMonitor from the
// list in the store. Order is not guaranteed.
amListObj := amc.informer.appMonitorStore.List()
ams, err := CopyObjToAppMonitors(amListObj)
if err != nil {
glog.Errorf("Failed to copy object into AppMonitors: %v", err)
return
} else if len(ams) != 1 {
glog.Errorf("No AppMonitors to list.")
return
}
am := ams[0]
// #########################################################################
// Iterate on the annotated Pods, searching for Pods in need of vertical
// scaling / redeployment.
// #########################################################################
for _, pod := range annotatedPods {
glog.V(2).Infof("Iterating on Annotated Pod from Store PodList: %s", pod.Name)
glog.V(2).Infof("Iterating on AppMonitor from Store AppMonitorList: %s",
am.Metadata.Name)
// Pull metrics off Pod & AppMonitor to determine if Pod needs a redeploy.
container := pod.Spec.Containers[0]
podLimits, _ := container.Resources.Limits["memory"]
podLimitsBytes, _ := podLimits.AsInt64()
// Query Prometheus for the current bytes for the Pod
// Create a new Prometheus Client
c, err := prometheusClient.NewClient(
prometheusClient.Config{
Address: amc.prometheusAddr,
},
)
if err != nil {
glog.Errorf("Failed to create client to Prometheus API: %v", err)
return
}
promClient := prometheus.NewAPI(c)
queryString := `container_memory_usage_bytes
{
namespace="default",
pod_name="%s",
container_name="memhog"
}
`
query := fmt.Sprintf(queryString, pod.Name)
glog.V(2).Infof("Prometheus Query: %s", query)
rawResults, err := queryPrometheus(promClient, query)
if err != nil {
glog.Errorf("Failed to query the Prometheus API: %v", err)
return
}
glog.V(2).Infof("Prometheus query raw results: %s", rawResults)
results := getMatrixValuesFromResults(rawResults)
if len(results) == 0 {
glog.V(2).Infof("Prometheus query results: empty")
return
}
// Arbitrarily choose the first metric values returned in a
// possible series of metrics
r := results[0]
// Retrieve the metric in the (time, metric) tuple
for len(r.Values) == 0 {
time.Sleep(500 * time.Millisecond)
}
val := r.Values[len(r.Values)-1].Value
currentBytes := int(val)
thresholdBytes := int(am.Spec.MemThresholdPercent) * int(podLimitsBytes) / 100
// Check if the Pod needs redeployment, else continue onto the next Pod.
if !needsRedeploy(currentBytes, thresholdBytes) {
glog.V(2).Infof("Pod is operating normally: "+
"%s | currentBytes: %d | thresholdBytes: %d",
pod.Name, currentBytes, thresholdBytes)
continue
}
// Redeploy the Pod with AppMonitor settings, if a redeploy is not already
// in progress.
// This operates a vertical autoscaling of the Pod.
if !redeployInProgress(&pod) {
glog.V(2).Infof("-------------------------------------------------------")
glog.V(2).Infof("Pod *needs* redeployment: "+
"%s | currentBytes: %d | thresholdBytes: %d",
pod.Name, currentBytes, thresholdBytes)
glog.V(2).Infof("-------------------------------------------------------")
err := amc.redeployPodWithAppMonitor(&pod, &am)
if err != nil {
glog.V(2).Infof("Failed to vertically autoscale Pod: %s | "+
"Error autoscaling: %v", pod.Name, err)
continue
}
} else {
glog.V(2).Infof("Redeploy in Progress for Pod: %s", pod.Name)
}
}
}
func needsRedeploy(currentBytes, thresholdBytes int) bool {
if currentBytes >= thresholdBytes {
return true
}
return false
}
func redeployInProgress(pod *v1.Pod) bool {
if _, exists := pod.Annotations[AppMonitorAnnotationRedeployInProgress]; !exists {
return false
}
return true
}
func (amc *AppMonitorController) redeployPodWithAppMonitor(pod *v1.Pod, am *AppMonitor) error {
// Annotate the Pod in the cluster
pod.Annotations[AppMonitorAnnotationRedeployInProgress] = "true"
_, err := amc.clientSet.CoreV1().Pods(amc.namespace).Update(pod)
if err != nil {
return err
}
glog.V(2).Infof("Pod has been annotated: %s | Annotations: %s", pod.Name, pod.ObjectMeta.Annotations)
// Create a new Pod in the cluster
newPod := amc.newPodFromPod(pod, am)
_, err = amc.clientSet.CoreV1().Pods(amc.namespace).Create(newPod)
if err != nil {
return err
}
glog.V(2).Infof("-----------------------------------------------------------")
glog.V(2).Infof("AppMonitor autoscaled Pod: '%s' to '%s'", pod.Name, newPod.Name)
glog.V(2).Infof("-----------------------------------------------------------")
// Terminate Pod that crossed the threshold
err = amc.clientSet.CoreV1().Pods(amc.namespace).Delete(pod.Name, nil)
if err != nil {
return err
}
glog.V(2).Infof("-----------------------------------------------------------")
glog.V(2).Infof("Pod has been terminated: %s", pod.Name)
glog.V(2).Infof("-----------------------------------------------------------")
return nil
}
// Creates a new Pod for redeployment, based on the Pod being monitored
func (amc *AppMonitorController) newPodFromPod(pod *v1.Pod, am *AppMonitor) *v1.Pod {
// Copy spec of the first container in the Pod
newContainer := pod.Spec.Containers[0]
// Reset VolumeMounts
newContainer.VolumeMounts = nil
// Set new resource limits based on the AppMonitor MemMultiplier
podLimits, _ := newContainer.Resources.Limits["memory"]
podLimitsBytes, _ := podLimits.AsInt64()
newLimitsBytes := podLimitsBytes * int64(am.Spec.MemMultiplier)
podRequests, _ := newContainer.Resources.Requests["memory"]
podRequestsBytes, _ := podRequests.AsInt64()
newRequestsBytes := podRequestsBytes * int64(am.Spec.MemMultiplier)
newContainer.Resources.Limits = v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(
newLimitsBytes,
resource.BinarySI),
}
newContainer.Resources.Requests = v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(
newRequestsBytes,
resource.BinarySI),
}
// Create and return new Pod with AppMonitor settings applied
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-autoscaled", pod.Name),
Namespace: pod.Namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
newContainer,
},
},
}
}
// Query Prometheus over a range of time
func queryPrometheus(client prometheus.API, query string) (
model.Value, error) {
now := time.Now()
start := now.Add(-1 * time.Second)
results, err := client.QueryRange(
context.Background(),
query,
prometheus.Range{
Start: start,
End: now,
Step: time.Second,
},
)
if err != nil {
return nil, err
}
return results, nil
}
// Extract the Values of a Prometheus query for results of
//type prometheus.Matrix
func getMatrixValuesFromResults(results model.Value) []*model.SampleStream {
// Type assert the interface to a model.Matrix
matrix := results.(model.Matrix)
// Type convert the matrix to a model.SampleStream to extract its stream of
// values holding the data
ss := []*model.SampleStream(matrix)
return ss
}
| [
"\"POD_NAMESPACE\""
] | [] | [
"POD_NAMESPACE"
] | [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
vendor/github.com/ksonnet/kubecfg/cmd/root.go | // Copyright 2017 The kubecfg authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"encoding/json"
goflag "flag"
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
"github.com/ksonnet/kubecfg/metadata"
"github.com/ksonnet/kubecfg/template"
"github.com/ksonnet/kubecfg/utils"
// Register auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
const (
flagVerbose = "verbose"
flagJpath = "jpath"
flagExtVar = "ext-str"
flagExtVarFile = "ext-str-file"
flagTlaVar = "tla-str"
flagTlaVarFile = "tla-str-file"
flagResolver = "resolve-images"
flagResolvFail = "resolve-images-error"
flagAPISpec = "api-spec"
// For use in the commands (e.g., diff, apply, delete) that require either an
// environment or the -f flag.
flagFile = "file"
flagFileShort = "f"
componentsExtCodeKey = "__ksonnet/components"
)
var clientConfig clientcmd.ClientConfig
var overrides clientcmd.ConfigOverrides
var loadingRules clientcmd.ClientConfigLoadingRules
func init() {
RootCmd.PersistentFlags().CountP(flagVerbose, "v", "Increase verbosity. May be given multiple times.")
// The "usual" clientcmd/kubectl flags
loadingRules = *clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(&loadingRules, &overrides, os.Stdin)
RootCmd.PersistentFlags().Set("logtostderr", "true")
}
func bindJsonnetFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringSliceP(flagJpath, "J", nil, "Additional jsonnet library search path")
cmd.PersistentFlags().StringSliceP(flagExtVar, "V", nil, "Values of external variables")
cmd.PersistentFlags().StringSlice(flagExtVarFile, nil, "Read external variable from a file")
cmd.PersistentFlags().StringSliceP(flagTlaVar, "A", nil, "Values of top level arguments")
cmd.PersistentFlags().StringSlice(flagTlaVarFile, nil, "Read top level argument from a file")
cmd.PersistentFlags().String(flagResolver, "noop", "Change implementation of resolveImage native function. One of: noop, registry")
cmd.PersistentFlags().String(flagResolvFail, "warn", "Action when resolveImage fails. One of ignore,warn,error")
}
func bindClientGoFlags(cmd *cobra.Command) {
kflags := clientcmd.RecommendedConfigOverrideFlags("")
ep := &loadingRules.ExplicitPath
cmd.PersistentFlags().StringVar(ep, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, cmd.PersistentFlags(), kflags)
}
// RootCmd is the root of cobra subcommand tree
var RootCmd = &cobra.Command{
Use: "kubecfg",
Short: "Synchronise Kubernetes resources with config files",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
goflag.CommandLine.Parse([]string{})
flags := cmd.Flags()
out := cmd.OutOrStderr()
log.SetOutput(out)
logFmt := NewLogFormatter(out)
log.SetFormatter(logFmt)
verbosity, err := flags.GetCount(flagVerbose)
if err != nil {
return err
}
log.SetLevel(logLevel(verbosity))
return nil
},
}
// clientConfig.Namespace() is broken in client-go 3.0:
// namespace in config erroneously overrides explicit --namespace
func defaultNamespace() (string, error) {
if overrides.Context.Namespace != "" {
return overrides.Context.Namespace, nil
}
ns, _, err := clientConfig.Namespace()
return ns, err
}
func logLevel(verbosity int) log.Level {
switch verbosity {
case 0:
return log.InfoLevel
default:
return log.DebugLevel
}
}
type logFormatter struct {
escapes *terminal.EscapeCodes
colorise bool
}
// NewLogFormatter creates a new log.Formatter customised for writer
func NewLogFormatter(out io.Writer) log.Formatter {
var ret = logFormatter{}
if f, ok := out.(*os.File); ok {
ret.colorise = terminal.IsTerminal(int(f.Fd()))
ret.escapes = terminal.NewTerminal(f, "").Escape
}
return &ret
}
func (f *logFormatter) levelEsc(level log.Level) []byte {
switch level {
case log.DebugLevel:
return []byte{}
case log.WarnLevel:
return f.escapes.Yellow
case log.ErrorLevel, log.FatalLevel, log.PanicLevel:
return f.escapes.Red
default:
return f.escapes.Blue
}
}
func (f *logFormatter) Format(e *log.Entry) ([]byte, error) {
buf := bytes.Buffer{}
if f.colorise {
buf.Write(f.levelEsc(e.Level))
fmt.Fprintf(&buf, "%-5s ", strings.ToUpper(e.Level.String()))
buf.Write(f.escapes.Reset)
}
buf.WriteString(strings.TrimSpace(e.Message))
buf.WriteString("\n")
return buf.Bytes(), nil
}
func newExpander(cmd *cobra.Command) (*template.Expander, error) {
flags := cmd.Flags()
spec := template.Expander{}
var err error
spec.EnvJPath = filepath.SplitList(os.Getenv("KUBECFG_JPATH"))
spec.FlagJpath, err = flags.GetStringSlice(flagJpath)
if err != nil {
return nil, err
}
spec.ExtVars, err = flags.GetStringSlice(flagExtVar)
if err != nil {
return nil, err
}
spec.ExtVarFiles, err = flags.GetStringSlice(flagExtVarFile)
if err != nil {
return nil, err
}
spec.TlaVars, err = flags.GetStringSlice(flagTlaVar)
if err != nil {
return nil, err
}
spec.TlaVarFiles, err = flags.GetStringSlice(flagTlaVarFile)
if err != nil {
return nil, err
}
spec.Resolver, err = flags.GetString(flagResolver)
if err != nil {
return nil, err
}
spec.FailAction, err = flags.GetString(flagResolvFail)
if err != nil {
return nil, err
}
return &spec, nil
}
// For debugging
func dumpJSON(v interface{}) string {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
enc.SetIndent("", " ")
if err := enc.Encode(v); err != nil {
return err.Error()
}
return string(buf.Bytes())
}
func restClientPool(cmd *cobra.Command, envName *string) (dynamic.ClientPool, discovery.DiscoveryInterface, error) {
if envName != nil {
err := overrideCluster(*envName)
if err != nil {
return nil, nil, err
}
}
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(conf)
if err != nil {
return nil, nil, err
}
discoCache := utils.NewMemcachedDiscoveryClient(disco)
mapper := discovery.NewDeferredDiscoveryRESTMapper(discoCache, dynamic.VersionInterfaces)
pathresolver := dynamic.LegacyAPIPathResolverFunc
pool := dynamic.NewClientPool(conf, mapper, pathresolver)
return pool, discoCache, nil
}
type envSpec struct {
env *string
files []string
}
// addEnvCmdFlags adds the flags that are common to the family of commands
// whose form is `[<env>|-f <file-name>]`, e.g., `apply` and `delete`.
func addEnvCmdFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringArrayP(flagFile, flagFileShort, nil, "Filename or directory that contains the configuration to apply (accepts YAML, JSON, and Jsonnet)")
}
// parseEnvCmd parses the family of commands that come in the form `[<env>|-f
// <file-name>]`, e.g., `apply` and `delete`.
func parseEnvCmd(cmd *cobra.Command, args []string) (*envSpec, error) {
flags := cmd.Flags()
files, err := flags.GetStringArray(flagFile)
if err != nil {
return nil, err
}
var env *string
if len(args) == 1 {
env = &args[0]
}
return &envSpec{env: env, files: files}, nil
}
// overrideCluster ensures that the cluster URI specified in the environment is
// associated in the user's kubeconfig file during deployment to a ksonnet
// environment. We will error out if it is not.
//
// If the environment URI the user is attempting to deploy to is not the current
// kubeconfig context, we must manually override the client-go --cluster flag
// to ensure we are deploying to the correct cluster.
func overrideCluster(envName string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
wd := metadata.AbsPath(cwd)
metadataManager, err := metadata.Find(wd)
if err != nil {
return err
}
rawConfig, err := clientConfig.RawConfig()
if err != nil {
return err
}
var clusterURIs = make(map[string]string)
for name, cluster := range rawConfig.Clusters {
clusterURIs[cluster.Server] = name
}
//
// check to ensure that the environment we are trying to deploy to is
// created, and that the environment URI is located in kubeconfig.
//
log.Debugf("Validating deployment at '%s' with cluster URIs '%v'", envName, reflect.ValueOf(clusterURIs).MapKeys())
env, err := metadataManager.GetEnvironment(envName)
if err != nil {
return err
}
if _, ok := clusterURIs[env.URI]; ok {
clusterName := clusterURIs[env.URI]
log.Debugf("Overwriting --cluster flag with '%s'", clusterName)
overrides.Context.Cluster = clusterName
return nil
}
return fmt.Errorf("Attempting to deploy to environment '%s' at %s, but there are no clusters with that URI", envName, env.URI)
}
// expandEnvCmdObjs finds and expands templates for the family of commands of
// the form `[<env>|-f <file-name>]`, e.g., `apply` and `delete`. That is, if
// the user passes a list of files, we will expand all templates in those files,
// while if a user passes an environment name, we will expand all component
// files using that environment.
func expandEnvCmdObjs(cmd *cobra.Command, envSpec *envSpec, cwd metadata.AbsPath) ([]*unstructured.Unstructured, error) {
expander, err := newExpander(cmd)
if err != nil {
return nil, err
}
//
// Get all filenames that contain templates to expand. Importantly, we need to
// enforce the form `[<env-name>|-f <file-name>]`; that is, we need to make
// sure that the user either passed an environment name or a `-f` flag.
//
envPresent := envSpec.env != nil
filesPresent := len(envSpec.files) > 0
if !envPresent && !filesPresent {
return nil, fmt.Errorf("Must specify either an environment or a file list, or both")
}
fileNames := envSpec.files
if envPresent {
manager, err := metadata.Find(cwd)
if err != nil {
return nil, err
}
libPath, envLibPath := manager.LibPaths(*envSpec.env)
expander.FlagJpath = append([]string{string(libPath), string(envLibPath)}, expander.FlagJpath...)
if !filesPresent {
fileNames, err = manager.ComponentPaths()
if err != nil {
return nil, err
}
baseObjExtCode := fmt.Sprintf("%s=%s", componentsExtCodeKey, constructBaseObj(fileNames))
expander.ExtCodes = append([]string{baseObjExtCode})
}
}
//
// Expand templates.
//
return expander.Expand(fileNames)
}
// constructBaseObj constructs the base Jsonnet object that represents k-v
// pairs of component name -> component imports. For example,
//
// {
// foo: import "components/foo.jsonnet"
// }
func constructBaseObj(paths []string) string {
var obj bytes.Buffer
obj.WriteString("{\n")
for _, p := range paths {
ext := path.Ext(p)
if path.Ext(p) != ".jsonnet" {
continue
}
name := strings.TrimSuffix(path.Base(p), ext)
fmt.Fprintf(&obj, " %s: import \"%s\",\n", name, p)
}
obj.WriteString("}\n")
return obj.String()
}
| [
"\"KUBECFG_JPATH\""
] | [] | [
"KUBECFG_JPATH"
] | [] | ["KUBECFG_JPATH"] | go | 1 | 0 | |
commands/create.go | package commands
import (
"bytes"
"context"
"encoding/csv"
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/google/shlex"
"github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type createOptions struct {
name string
driver string
nodeName string
platform []string
actionAppend bool
actionLeave bool
use bool
flags string
configFile string
driverOpts []string
bootstrap bool
// upgrade bool // perform upgrade of the driver
}
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
ctx := appcontext.Context()
if in.name == "default" {
return errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
}
if in.actionLeave {
if in.name == "" {
return errors.Errorf("leave requires instance name")
}
if in.nodeName == "" {
return errors.Errorf("leave requires node name but --node not set")
}
}
if in.actionAppend {
if in.name == "" {
logrus.Warnf("append used without name, creating a new instance instead")
}
}
driverName := in.driver
if driverName == "" {
f, err := driver.GetDefaultFactory(ctx, dockerCli.Client(), true)
if err != nil {
return err
}
if f == nil {
return errors.Errorf("no valid drivers found")
}
driverName = f.Name()
}
if driver.GetFactory(driverName, true) == nil {
return errors.Errorf("failed to find driver %q", in.driver)
}
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
defer release()
name := in.name
if name == "" {
name, err = store.GenerateName(txn)
if err != nil {
return err
}
}
ng, err := txn.NodeGroupByName(name)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
if in.actionAppend && in.name != "" {
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
}
if in.actionLeave {
return errors.Errorf("failed to find instance %q for leave", name)
}
} else {
return err
}
}
if ng != nil {
if in.nodeName == "" && !in.actionAppend {
return errors.Errorf("existing instance for %s but no append mode, specify --node to make changes for existing instances", name)
}
}
if ng == nil {
ng = &store.NodeGroup{
Name: name,
}
}
if ng.Driver == "" || in.driver != "" {
ng.Driver = driverName
}
var flags []string
if in.flags != "" {
flags, err = shlex.Split(in.flags)
if err != nil {
return errors.Wrap(err, "failed to parse buildkit flags")
}
}
var ep string
if in.actionLeave {
if err := ng.Leave(in.nodeName); err != nil {
return err
}
} else {
if len(args) > 0 {
ep, err = validateEndpoint(dockerCli, args[0])
if err != nil {
return err
}
} else {
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
}
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
}
if in.driver == "kubernetes" {
// naming endpoint to make --append works
ep = (&url.URL{
Scheme: in.driver,
Path: "/" + in.name,
RawQuery: (&url.Values{
"deployment": {in.nodeName},
"kubeconfig": {os.Getenv("KUBECONFIG")},
}).Encode(),
}).String()
}
m, err := csvToMap(in.driverOpts)
if err != nil {
return err
}
if err := ng.Update(in.nodeName, ep, in.platform, len(args) > 0, in.actionAppend, flags, in.configFile, m); err != nil {
return err
}
}
if err := txn.Save(ng); err != nil {
return err
}
if in.use && ep != "" {
current, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
return err
}
}
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
return err
}
if in.bootstrap {
if _, err = boot(ctx, ngi); err != nil {
return err
}
}
fmt.Printf("%s\n", ng.Name)
return nil
}
func createCmd(dockerCli command.Cli) *cobra.Command {
var options createOptions
var drivers bytes.Buffer
for _, d := range driver.GetFactories() {
if len(drivers.String()) > 0 {
drivers.WriteString(", ")
}
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name()))
}
cmd := &cobra.Command{
Use: "create [OPTIONS] [CONTEXT|ENDPOINT]",
Short: "Create a new builder instance",
Args: cli.RequiresMaxArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runCreate(dockerCli, options, args)
},
}
flags := cmd.Flags()
flags.StringVar(&options.name, "name", "", "Builder instance name")
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
// hide builder persistent flag for this command
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd
}
func csvToMap(in []string) (map[string]string, error) {
m := make(map[string]string, len(in))
for _, s := range in {
csvReader := csv.NewReader(strings.NewReader(s))
fields, err := csvReader.Read()
if err != nil {
return nil, err
}
for _, v := range fields {
p := strings.SplitN(v, "=", 2)
if len(p) != 2 {
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
}
m[p[0]] = p[1]
}
}
return m, nil
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
sales_forecast/scoring/score.py | import numpy
import os
import math
from azureml.core.model import Model
from azureml.core.dataset import Dataset
from inference_schema.schema_decorators \
import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type \
import NumpyParameterType
import keras
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from azureml.core.run import Run
from azureml.core import Dataset, Datastore, Workspace
import argparse
import json
import pandas as pd
import numpy as np
from azureml.core.authentication import ServicePrincipalAuthentication
# from azureml.core.authentication import InteractiveLoginAuthentication
def tts(data):
data['date'] = pd.to_datetime(data['date'])
data['date'] = (data['date'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
(train, test) = data[0:-2000].values, data[-2000:].values
return (train, test)
def scale_data(train_set, test_set):
# apply Min Max Scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train_set[:, :4])
# reshape training set
train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])
train_set_scaled = scaler.transform(train_set[:, :4])
# reshape test set
test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])
test_set_scaled = scaler.transform(test_set[:, :4])
X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()
X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()
return X_train, y_train, X_test, y_test, scaler
def init():
# load the model from file into a global object
global model
model_path = Model.get_model_path(
os.getenv("AZUREML_MODEL_DIR").split('/')[-2])
print ("model path", model_path)
# try:
# print ("try")
# dataset = pd.read_csv('/var/azureml-app/train.csv')
# original_df = dataset.to_pandas_dataframe()
# except:
# print ("except")
# train_dataset = original_df.to_csv('train.csv', index=False)
# interactive_auth = InteractiveLoginAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4")
# ws = Workspace(subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0",
# resource_group="sales-mlops-rg",
# workspace_name="sales-mlops-ws",
# auth=interactive_auth)
# ws.get_details()
# print(original_df)
model = keras.models.load_model(model_path)
print("Current directory:", os.getcwd())
print("Model is loaded")
# date = '6/25/2020'
# store = 3
# item = 105
# price = 990
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
# input_sample = numpy.array([[date, store, item, price]])
# output_sample = numpy.array([4])
input_sample = numpy.array([[1591833600,34,759,690]])
output_sample = numpy.array([10])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data, request_headers):
global original_df
sp = ServicePrincipalAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4", service_principal_id="add8f304-2d88-45e3-94fa-ac6cf335d5df", service_principal_password="If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f")
ws = Workspace.get(name="sales-mlops-ws", auth = sp, subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0")
ws.get_details()
dataset = ws.datasets['salesforecast_ds']
original_df = dataset.to_pandas_dataframe()
# date = '6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
date = data[0][0]
prev_sales = []
(train, test) = tts(original_df)
X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)
first_date = original_df["date"][0]
for x in original_df.index:
last_date = original_df["date"][x]
print("last date", last_date)
days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)
total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)
print("days:", days_diff)
print("total_data_days:", total_data_days)
for i in original_df.index:
if (original_df["item"][i] == data[0][2] and original_df["store"][i] == data[0][1]):
prev_sales.append(original_df["sales"][i])
prev_sales_avg = 0
prev_sales_avg = (sum(prev_sales)) / total_data_days
forecast_result_array = []
test_set = data
test_set_scaled = scaler_object.transform(test_set)
X_test = test_set_scaled[:, :4]
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
print("y_pred:",y_pred)
result = y_pred[0][0][0]
result = round(result)
print("result:",result)
prev_sales_avg = round (prev_sales_avg)
next_day_prediction = math.ceil(result + prev_sales_avg)
prev_sales.append(next_day_prediction)
forecast_result_array.append(next_day_prediction)
if days_diff > 1:
for day in range(round(days_diff)):
total_data_days += 1
prev_sales_avg = sum(prev_sales) / total_data_days
prev_sales_avg = round(prev_sales_avg)
prev_sales.append(prev_sales_avg)
forecast_result_array.append(prev_sales_avg)
end_result = sum(forecast_result_array)
print("end result: ", end_result)
print(('{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
end_result
))
return {"result": end_result}
if __name__ == "__main__":
init()
# date ='6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
test = numpy.array([[date, store, item, price]])
#print("test:",test)
#test =numpy.array([[1591833600,34,759,690]])
prediction = run(test, {})
print("Test result: ", prediction)
| [] | [] | [
"AZUREML_MODEL_DIR"
] | [] | ["AZUREML_MODEL_DIR"] | python | 1 | 0 | |
examples/get_cluster_logs.go | /*
Copyright (c) 2019 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This example shows how to retrieve the contents of the logs of a cluster.
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/openshift-online/uhc-sdk-go"
cmv1 "github.com/openshift-online/uhc-sdk-go/clustersmgmt/v1"
)
func main() {
// Create a context:
ctx := context.Background()
// Create a logger that has the debug level enabled:
logger, err := sdk.NewGoLoggerBuilder().
Debug(true).
Build()
if err != nil {
log.Fatalf("Can't build logger: %v", err)
}
// Create the connection, and remember to close it:
token := os.Getenv("UHC_TOKEN")
connection, err := sdk.NewConnectionBuilder().
Logger(logger).
Tokens(token).
BuildContext(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't build connection: %v\n", err)
os.Exit(1)
}
defer connection.Close()
// Get the client for the resource that manages the collection of clusters:
clustersResource := connection.ClustersMgmt().V1().Clusters()
// Get the client for the resource that manages the collection of logs for the cluster that
// we are looking for. Note that this will not send any request to the server yet, so it
// will succeed even if that cluster doesn't exist.
logsCollection := clustersResource.Cluster("1Jam7Ejgpm7AbZshbgaA9TsM1SQ").Logs()
// Send the request to retrieve the collection of logs:
listResponse, err := logsCollection.List().SendContext(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't retrieve list of logs: %v\n", err)
os.Exit(1)
}
// The response obtained from the above list operation will contain the identifier of each
// log, but not the content. To obtain the content it is necessary to send a request for
// that specific log.
listResponse.Items().Each(func(log *cmv1.Log) bool {
logID := log.ID()
logResource := logsCollection.Log(logID)
getResponse, err := logResource.Get().SendContext(ctx)
if err != nil {
fmt.Fprintf(
os.Stderr,
"Can't retrive details of log '%s': %v\n",
logID, err,
)
os.Exit(1)
}
log = getResponse.Body()
logContent := log.Content()
fmt.Printf("%s:\n%s\n", logID, logContent)
return true
})
}
| [
"\"UHC_TOKEN\""
] | [] | [
"UHC_TOKEN"
] | [] | ["UHC_TOKEN"] | go | 1 | 0 | |
vendor/github.com/OpenPlatformSDN/client-go/tools/cache/mutation_detector.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"os"
"reflect"
"strconv"
"sync"
"time"
"github.com/OpenPlatformSDN/client-go/pkg/api"
"github.com/OpenPlatformSDN/client-go/pkg/runtime"
"github.com/OpenPlatformSDN/client-go/pkg/util/diff"
)
var mutationDetectionEnabled = false
func init() {
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
}
type CacheMutationDetector interface {
AddObject(obj interface{})
Run(stopCh <-chan struct{})
}
func NewCacheMutationDetector(name string) CacheMutationDetector {
if !mutationDetectionEnabled {
return dummyMutationDetector{}
}
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
}
type dummyMutationDetector struct{}
func (dummyMutationDetector) Run(stopCh <-chan struct{}) {
}
func (dummyMutationDetector) AddObject(obj interface{}) {
}
// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated
// It has a list of cached objects and their copies. I haven't thought of a way
// to see WHO is mutating it, just that it's getting mutated.
type defaultCacheMutationDetector struct {
name string
period time.Duration
lock sync.Mutex
cachedObjs []cacheObj
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
// This panic is intentional, since turning on this detection indicates you want a strong
// failure signal. This failure is effectively a p0 bug and you can't trust process results
// after a mutation anyway.
failureFunc func(message string)
}
// cacheObj holds the actual object and a copy
type cacheObj struct {
cached interface{}
copied interface{}
}
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
// we DON'T want protection from panics. If we're running this code, we want to die
go func() {
for {
d.CompareObjects()
select {
case <-stopCh:
return
case <-time.After(d.period):
}
}
}()
}
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
// but that covers the vast majority of our cached objects
func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
if _, ok := obj.(DeletedFinalStateUnknown); ok {
return
}
if _, ok := obj.(runtime.Object); !ok {
return
}
copiedObj, err := api.Scheme.Copy(obj.(runtime.Object))
if err != nil {
return
}
d.lock.Lock()
defer d.lock.Unlock()
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
}
func (d *defaultCacheMutationDetector) CompareObjects() {
d.lock.Lock()
defer d.lock.Unlock()
altered := false
for i, obj := range d.cachedObjs {
if !reflect.DeepEqual(obj.cached, obj.copied) {
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
altered = true
}
}
if altered {
msg := fmt.Sprintf("cache %s modified", d.name)
if d.failureFunc != nil {
d.failureFunc(msg)
return
}
panic(msg)
}
}
| [
"\"KUBE_CACHE_MUTATION_DETECTOR\""
] | [] | [
"KUBE_CACHE_MUTATION_DETECTOR"
] | [] | ["KUBE_CACHE_MUTATION_DETECTOR"] | go | 1 | 0 | |
vendor/github.com/containers/storage/store.go | package storage
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
// register all of the built-in drivers
_ "github.com/containers/storage/drivers/register"
"github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
stores []*store
storesLock sync.Mutex
)
// ROFileBasedStore wraps up the methods of the various types of file-based
// data stores that we implement which are needed for both read-only and
// read-write files.
type ROFileBasedStore interface {
Locker
// Load reloads the contents of the store from disk. It should be called
// with the lock held.
Load() error
}
// RWFileBasedStore wraps up the methods of various types of file-based data
// stores that we implement using read-write files.
type RWFileBasedStore interface {
// Save saves the contents of the store to disk. It should be called with
// the lock held, and Touch() should be called afterward before releasing the
// lock.
Save() error
}
// FileBasedStore wraps up the common methods of various types of file-based
// data stores that we implement.
type FileBasedStore interface {
ROFileBasedStore
RWFileBasedStore
}
// ROMetadataStore wraps a method for reading metadata associated with an ID.
type ROMetadataStore interface {
// Metadata reads metadata associated with an item with the specified ID.
Metadata(id string) (string, error)
}
// RWMetadataStore wraps a method for setting metadata associated with an ID.
type RWMetadataStore interface {
// SetMetadata updates the metadata associated with the item with the specified ID.
SetMetadata(id, metadata string) error
}
// MetadataStore wraps up methods for getting and setting metadata associated with IDs.
type MetadataStore interface {
ROMetadataStore
RWMetadataStore
}
// An ROBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type ROBigDataStore interface {
// BigData retrieves a (potentially large) piece of data associated with
// this ID, if it has previously been set.
BigData(id, key string) ([]byte, error)
// BigDataSize retrieves the size of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A RWImageBigDataStore wraps up how we store big-data associated with images.
type RWImageBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
// Pass github.com/containers/image/manifest.Digest as digestManifest
// to allow ByDigest to find images by their correct digests.
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
// A ContainerBigDataStore wraps up how we store big-data associated with containers.
type ContainerBigDataStore interface {
ROBigDataStore
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
}
// A FlaggableStore can have flags set and cleared on items which it manages.
type FlaggableStore interface {
// ClearFlag removes a named flag from an item in the store.
ClearFlag(id string, flag string) error
// SetFlag sets a named flag and its value on an item in the store.
SetFlag(id string, flag string, value interface{}) error
}
// StoreOptions is used for passing initialization options to GetStore(), for
// initializing a Store object and the underlying storage that it controls.
type StoreOptions struct {
// RunRoot is the filesystem path under which we can store run-time
// information, such as the locations of active mount points, that we
// want to lose if the host is rebooted.
RunRoot string `json:"runroot,omitempty"`
// GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"`
// GraphDriverName is the underlying storage driver that we'll be
// using. It only needs to be specified the first time a Store is
// initialized for a given RunRoot and GraphRoot.
GraphDriverName string `json:"driver,omitempty"`
// GraphDriverOptions are driver-specific options.
GraphDriverOptions []string `json:"driver-options,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root filesystem
// for use inside of a user namespace where UID mapping is being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
}
// Store wraps up the various types of file-based stores that we use into a
// singleton object that initializes and manages them all together.
type Store interface {
// RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
GraphDriverName() string
GraphOptions() []string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
// GraphDriver obtains and returns a handle to the graph Driver object used
// by the Store.
GraphDriver() (drivers.Driver, error)
// CreateLayer creates a new layer in the underlying storage driver,
// optionally having the specified ID (one will be assigned if none is
// specified), with the specified layer (or no layer) as its parent,
// and with optional names. (The writeable flag is ignored.)
CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error)
// PutLayer combines the functions of CreateLayer and ApplyDiff,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error)
// CreateImage creates a new image, optionally with the specified ID
// (one will be assigned if none is specified), with optional names,
// referring to a specified image, and with optional metadata. An
// image is a record which associates the ID of a layer with a
// additional bookkeeping information which the library stores for the
// convenience of its caller.
CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error)
// CreateContainer creates a new container, optionally with the
// specified ID (one will be assigned if none is specified), with
// optional names, using the specified image's top layer as the basis
// for the container's layer, and assigning the specified ID to that
// layer (one will be created if none is specified). A container is a
// layer which is associated with additional bookkeeping information
// which the library stores for the convenience of its caller.
CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
// Metadata retrieves the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to).
Metadata(id string) (string, error)
// SetMetadata updates the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to) to match
// the specified value. The metadata value can be retrieved at any
// time using Metadata, or using Layer, Image, or Container and reading
// the object directly.
SetMetadata(id, metadata string) error
// Exists checks if there is a layer, image, or container which has the
// passed-in ID or name.
Exists(id string) bool
// Status asks for a status report, in the form of key-value pairs,
// from the underlying storage driver. The contents vary from driver
// to driver.
Status() ([][2]string, error)
// Delete removes the layer, image, or container which has the
// passed-in ID or name. Note that no safety checks are performed, so
// this can leave images with references to layers which do not exist,
// and layers with references to parents which no longer exist.
Delete(id string) error
// DeleteLayer attempts to remove the specified layer. If the layer is the
// parent of any other layer, or is referred to by any images, it will return
// an error.
DeleteLayer(id string) error
// DeleteImage removes the specified image if it is not referred to by
// any containers. If its top layer is then no longer referred to by
// any other images and is not the parent of any other layers, its top
// layer will be removed. If that layer's parent is no longer referred
// to by any other images and is not the parent of any other layers,
// then it, too, will be removed. This procedure will be repeated
// until a layer which should not be removed, or the base layer, is
// reached, at which point the list of removed layers is returned. If
// the commit argument is false, the image and layers are not removed,
// but the list of layers which would be removed is still returned.
DeleteImage(id string, commit bool) (layers []string, err error)
// DeleteContainer removes the specified container and its layer. If
// there is no matching container, or if the container exists but its
// layer does not, an error will be returned.
DeleteContainer(id string) error
// Wipe removes all known layers, images, and containers.
Wipe() error
// Mount attempts to mount a layer, image, or container for access, and
// returns the pathname if it succeeds.
// Note if the mountLabel == "", the default label for the container
// will be used.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
Mount(id, mountLabel string) (string, error)
// Unmount attempts to unmount a layer, image, or container, given an ID, a
// name, or a mount path. Returns whether or not the layer is still mounted.
Unmount(id string, force bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
Mounted(id string) (int, error)
// Changes returns a summary of the changes which would need to be made
// to one layer to make its contents the same as a second layer. If
// the first layer is not specified, the second layer's parent is
// assumed. Each Change structure contains a Path relative to the
// layer's root directory, and a Kind which is either ChangeAdd,
// ChangeModify, or ChangeDelete.
Changes(from, to string) ([]archive.Change, error)
// DiffSize returns a count of the size of the tarstream which would
// specify the changes returned by Changes.
DiffSize(from, to string) (int64, error)
// Diff returns the tarstream which would specify the changes returned
// by Changes. If options are passed in, they can override default
// behaviors.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// ApplyDiff applies a tarstream to a layer. Information about the
// tarstream is cached with the layer. Typically, a layer which is
// populated using a tarstream will be expected to not be modified in
// any other way, either before or after the diff is applied.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
// LayerParentOwners returns the UIDs and GIDs of owners of parents of
// the layer's mountpoint for which the layer's UID and GID maps (if
// any are defined) don't contain corresponding IDs.
LayerParentOwners(id string) ([]int, []int, error)
// Layers returns a list of the currently known layers.
Layers() ([]Layer, error)
// Images returns a list of the currently known images.
Images() ([]Image, error)
// Containers returns a list of the currently known containers.
Containers() ([]Container, error)
// Names returns the list of names for a layer, image, or container.
Names(id string) ([]string, error)
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
SetNames(id string, names []string) error
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
// ImageBigData retrieves a (possibly large) chunk of named data
// associated with an image.
ImageBigData(id, key string) ([]byte, error)
// ImageBigDataSize retrieves the size of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data
// associated with an image. Pass
// github.com/containers/image/manifest.Digest as digestManifest to
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error)
// ListContainerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a container.
ListContainerBigData(id string) ([]string, error)
// ContainerBigData retrieves a (possibly large) chunk of named data
// associated with a container.
ContainerBigData(id, key string) ([]byte, error)
// ContainerBigDataSize retrieves the size of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
// ContainerSize computes the size of the container's layer and ancillary
// data. Warning: this is a potentially expensive operation.
ContainerSize(id string) (int64, error)
// Layer returns a specific layer.
Layer(id string) (*Layer, error)
// Image returns a specific image.
Image(id string) (*Image, error)
// ImagesByTopLayer returns a list of images which reference the specified
// layer as their top layer. They will have different IDs and names
// and may have different metadata, big data items, and flags.
ImagesByTopLayer(id string) ([]*Image, error)
// ImagesByDigest returns a list of images which contain a big data item
// named ImageDigestBigDataKey whose contents have the specified digest.
ImagesByDigest(d digest.Digest) ([]*Image, error)
// Container returns a specific container.
Container(id string) (*Container, error)
// ContainerByLayer returns a specific container based on its layer ID or
// name.
ContainerByLayer(id string) (*Container, error)
// ContainerDirectory returns a path of a directory which the caller
// can use to store data, specific to the container, which the library
// does not directly manage. The directory will be deleted when the
// container is deleted.
ContainerDirectory(id string) (string, error)
// SetContainerDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// directory.
SetContainerDirectoryFile(id, file string, data []byte) error
// FromContainerDirectory is a convenience function which reads
// the contents of the specified file relative to the container's
// directory.
FromContainerDirectory(id, file string) ([]byte, error)
// ContainerRunDirectory returns a path of a directory which the
// caller can use to store data, specific to the container, which the
// library does not directly manage. The directory will be deleted
// when the host system is restarted.
ContainerRunDirectory(id string) (string, error)
// SetContainerRunDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// run directory.
SetContainerRunDirectoryFile(id, file string, data []byte) error
// FromContainerRunDirectory is a convenience function which reads
// the contents of the specified file relative to the container's run
// directory.
FromContainerRunDirectory(id, file string) ([]byte, error)
// ContainerParentOwners returns the UIDs and GIDs of owners of parents
// of the container's layer's mountpoint for which the layer's UID and
// GID maps (if any are defined) don't contain corresponding IDs.
ContainerParentOwners(id string) ([]int, []int, error)
// Lookup returns the ID of a layer, image, or container with the specified
// name or ID.
Lookup(name string) (string, error)
// Shutdown attempts to free any kernel resources which are being used
// by the underlying driver. If "force" is true, any mounted (i.e., in
// use) layers are unmounted beforehand. If "force" is not true, then
// layers being in use is considered to be an error condition. A list
// of still-mounted layers is returned along with possible errors.
Shutdown(force bool) (layers []string, err error)
// Version returns version information, in the form of key-value pairs, from
// the storage package.
Version() ([][2]string, error)
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
}
// IDMappingOptions are used for specifying how ID mapping should be set up for
// a layer or container.
type IDMappingOptions struct {
// UIDMap and GIDMap are used for setting up a layer's root filesystem
// for use inside of a user namespace where ID mapping is being used.
// If HostUIDMapping/HostGIDMapping is true, no mapping of the
// respective type will be used. Otherwise, if UIDMap and/or GIDMap
// contain at least one mapping, one or both will be used. By default,
// if neither of those conditions apply, if the layer has a parent
// layer, the parent layer's mapping will be used, and if it does not
// have a parent layer, the mapping which was passed to the Store
// object when it was initialized will be used.
HostUIDMapping bool
HostGIDMapping bool
UIDMap []idtools.IDMap
GIDMap []idtools.IDMap
}
// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods.
type LayerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this layer. If nothing is specified, the layer will
// inherit settings from its parent layer or, if it has no parent
// layer, the Store object.
IDMappingOptions
// TemplateLayer is the ID of a layer whose contents will be used to
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
type ContainerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this container's layer. If nothing is specified, the
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
MountOpts []string
}
type store struct {
lastLoaded time.Time
runRoot string
graphLock Locker
graphRoot string
graphDriverName string
graphOptions []string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
graphDriver drivers.Driver
layerStore LayerStore
roLayerStores []ROLayerStore
imageStore ImageStore
roImageStores []ROImageStore
containerStore ContainerStore
digestLockRoot string
}
// GetStore attempts to find an already-created Store object matching the
// specified location and graph driver, and if it can't, it creates and
// initializes a new Store object, and the underlying storage that it controls.
//
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
// * `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
// if reexec.Init() {
// return
// }
func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = defaultStoreOptions
}
if options.GraphRoot != "" {
dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
}
options.GraphRoot = dir
}
if options.RunRoot != "" {
dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
}
options.RunRoot = dir
}
storesLock.Lock()
defer storesLock.Unlock()
for _, s := range stores {
if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
if options.GraphRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
}
if options.RunRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) {
return nil, err
}
}
graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
s := &store{
runRoot: options.RunRoot,
graphLock: graphLock,
graphRoot: options.GraphRoot,
graphDriverName: options.GraphDriverName,
graphOptions: options.GraphDriverOptions,
uidMap: copyIDMap(options.UIDMap),
gidMap: copyIDMap(options.GIDMap),
}
if err := s.load(); err != nil {
return nil, err
}
stores = append(stores, s)
return s, nil
}
func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
m := []idtools.IDMap{}
if idmap != nil {
m = make([]idtools.IDMap, len(idmap))
copy(m, idmap)
}
if len(m) > 0 {
return m[:]
}
return nil
}
func (s *store) RunRoot() string {
return s.runRoot
}
func (s *store) GraphDriverName() string {
return s.graphDriverName
}
func (s *store) GraphRoot() string {
return s.graphRoot
}
func (s *store) GraphOptions() []string {
return s.graphOptions
}
func (s *store) UIDMap() []idtools.IDMap {
return copyIDMap(s.uidMap)
}
func (s *store) GIDMap() []idtools.IDMap {
return copyIDMap(s.gidMap)
}
func (s *store) load() error {
driver, err := s.GraphDriver()
if err != nil {
return err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
driverPrefix := s.graphDriverName + "-"
rls, err := s.LayerStore()
if err != nil {
return err
}
s.layerStore = rls
if _, err := s.ROLayerStores(); err != nil {
return err
}
gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
if err := os.MkdirAll(gipath, 0700); err != nil {
return err
}
ris, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = ris
if _, err := s.ROImageStores(); err != nil {
return err
}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath)
if err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return err
}
s.containerStore = rcs
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
return err
}
return nil
}
// GetDigestLock returns a digest-specific Locker.
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
}
func (s *store) getGraphDriver() (drivers.Driver, error) {
if s.graphDriver != nil {
return s.graphDriver, nil
}
config := drivers.Options{
Root: s.graphRoot,
RunRoot: s.runRoot,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
}
driver, err := drivers.New(s.graphDriverName, config)
if err != nil {
return nil, err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
return driver, nil
}
func (s *store) GraphDriver() (drivers.Driver, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
return s.getGraphDriver()
}
// LayerStore obtains and returns a handle to the writeable layer store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) LayerStore() (LayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
if s.layerStore != nil {
return s.layerStore, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0700); err != nil {
return nil, err
}
rls, err := newLayerStore(rlpath, glpath, driver, s.uidMap, s.gidMap)
if err != nil {
return nil, err
}
s.layerStore = rls
return s.layerStore, nil
}
// ROLayerStores obtains additional read/only layer store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not part of the exported Store interface.
func (s *store) ROLayerStores() ([]ROLayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.roLayerStores != nil {
return s.roLayerStores, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
for _, store := range driver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
rls, err := newROLayerStore(rlpath, glpath, driver)
if err != nil {
return nil, err
}
s.roLayerStores = append(s.roLayerStores, rls)
}
return s.roLayerStores, nil
}
// ImageStore obtains and returns a handle to the writable image store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ImageStore() (ImageStore, error) {
if s.imageStore != nil {
return s.imageStore, nil
}
return nil, ErrLoadError
}
// ROImageStores obtains additional read/only image store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ROImageStores() ([]ROImageStore, error) {
if len(s.roImageStores) != 0 {
return s.roImageStores, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
for _, store := range driver.AdditionalImageStores() {
gipath := filepath.Join(store, driverPrefix+"images")
ris, err := newROImageStore(gipath)
if err != nil {
return nil, err
}
s.roImageStores = append(s.roImageStores, ris)
}
return s.roImageStores, nil
}
// ContainerStore obtains and returns a handle to the container store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ContainerStore() (ContainerStore, error) {
if s.containerStore != nil {
return s.containerStore, nil
}
return nil, ErrLoadError
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
rlstore, err := s.LayerStore()
if err != nil {
return nil, -1, err
}
rlstores, err := s.ROLayerStores()
if err != nil {
return nil, -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, -1, err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, -1, err
}
}
if id == "" {
id = stringid.GenerateRandomID()
}
if options == nil {
options = &LayerOptions{}
}
if options.HostUIDMapping {
options.UIDMap = nil
}
if options.HostGIDMapping {
options.GIDMap = nil
}
uidMap := options.UIDMap
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer
for _, l := range append([]ROLayerStore{rlstore}, rlstores...) {
lstore := l
if lstore != rlstore {
lstore.Lock()
defer lstore.Unlock()
if modified, err := lstore.Modified(); modified || err != nil {
if err = lstore.Load(); err != nil {
return nil, -1, err
}
}
}
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
parent = ilayer.ID
break
}
}
if ilayer == nil {
return nil, -1, ErrLayerUnknown
}
parentLayer = ilayer
containers, err := rcstore.Containers()
if err != nil {
return nil, -1, err
}
for _, container := range containers {
if container.LayerID == parent {
return nil, -1, ErrParentIsContainer
}
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = ilayer.UIDMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = ilayer.GIDMap
}
} else {
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = s.gidMap
}
}
var layerOptions *LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = &LayerOptions{IDMappingOptions: IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}}
} else {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
},
}
}
return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil)
return layer, err
}
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
if id == "" {
id = stringid.GenerateRandomID()
}
if layer != "" {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
var ilayer *Layer
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
ilayer, err = store.Get(layer)
if err == nil {
break
}
}
if ilayer == nil {
return nil, ErrLayerUnknown
}
layer = ilayer.ID
}
ristore, err := s.ImageStore()
if err != nil {
return nil, err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil, err
}
}
creationDate := time.Now().UTC()
if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate
}
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
}
func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) {
layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool {
// If the driver supports shifting and the layer has no mappings, we can use it.
if s.graphDriver.SupportsShifting() && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
return true
}
// If we want host mapping, and the layer uses mappings, it's not the best match.
if options.HostUIDMapping && len(layer.UIDMap) != 0 {
return false
}
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
return false
}
// If we don't care about the mapping, it's fine.
if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
return true
}
// Compare the maps.
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
}
var layer, parentLayer *Layer
allStores := append([]ROLayerStore{rlstore}, lstores...)
// Locate the image's top layer and its parent, if it has one.
for _, s := range allStores {
store := s
if store != rlstore {
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
}
// Walk the top layer list.
for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
if cLayer, err := store.Get(candidate); err == nil {
// We want the layer's parent, too, if it has one.
var cParentLayer *Layer
if cLayer.Parent != "" {
// Its parent should be in one of the stores, somewhere.
for _, ps := range allStores {
if cParentLayer, err = ps.Get(cLayer.Parent); err == nil {
break
}
}
if cParentLayer == nil {
continue
}
}
// If the layer matches the desired mappings, it's a perfect match,
// so we're actually done here.
if layerMatchesMappingOptions(cLayer, options) {
return cLayer, nil
}
// Record the first one that we found, even if it's not ideal, so that
// we have a starting point.
if layer == nil {
layer = cLayer
parentLayer = cParentLayer
}
}
}
}
if layer == nil {
return nil, ErrLayerUnknown
}
// The top layer's mappings don't match the ones we want, but it's in a read-only
// image store, so we can't create and add a mapped copy of the layer to the image.
// We'll have to do the mapping for the container itself, elsewhere.
if !createMappedLayer {
return layer, nil
}
// The top layer's mappings don't match the ones we want, and it's in an image store
// that lets us edit image metadata...
if istore, ok := ristore.(*imageStore); ok {
// ... so create a duplicate of the layer with the desired mappings, and
// register it as an alternate top layer in the image.
var layerOptions LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
} else {
layerOptions = LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
}
return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
}
layer = mappedLayer
}
return layer, nil
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
if options == nil {
options = &ContainerOptions{}
}
if options.HostUIDMapping {
options.UIDMap = nil
}
if options.HostGIDMapping {
options.GIDMap = nil
}
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
if id == "" {
id = stringid.GenerateRandomID()
}
var imageTopLayer *Layer
imageID := ""
uidMap := options.UIDMap
gidMap := options.GIDMap
idMappingsOptions := options.IDMappingOptions
if image != "" {
var imageHomeStore ROImageStore
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
var cimage *Image
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
cimage, err = store.Get(image)
if err == nil {
imageHomeStore = store
break
}
}
if cimage == nil {
return nil, ErrImageUnknown
}
imageID = cimage.ID
if cimage.TopLayer != "" {
createMappedLayer := imageHomeStore == istore
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
if err != nil {
return nil, err
}
imageTopLayer = ilayer
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = ilayer.UIDMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = ilayer.GIDMap
}
}
} else {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = s.gidMap
}
}
var layerOptions *LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
} else {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
},
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
plabel, _ := options.Flags["ProcessLabel"].(string)
mlabel, _ := options.Flags["MountLabel"].(string)
if (plabel == "" && mlabel != "") ||
(plabel != "" && mlabel == "") {
return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
processLabel, mountLabel, err := label.InitLabels(options.LabelOpts)
if err != nil {
return nil, err
}
options.Flags["ProcessLabel"] = processLabel
options.Flags["MountLabel"] = mountLabel
}
clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true)
if err != nil {
return nil, err
}
layer = clayer.ID
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
options.IDMappingOptions = IDMappingOptions{
HostUIDMapping: len(options.UIDMap) == 0,
HostGIDMapping: len(options.GIDMap) == 0,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
}
container, err := rcstore.Create(id, names, imageID, layer, metadata, options)
if err != nil || container == nil {
rlstore.Delete(layer)
}
return container, err
}
func (s *store) SetMetadata(id, metadata string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err := ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
return rlstore.SetMetadata(id, metadata)
}
if ristore.Exists(id) {
return ristore.SetMetadata(id, metadata)
}
if rcstore.Exists(id) {
return rcstore.SetMetadata(id, metadata)
}
return ErrNotAnID
}
func (s *store) Metadata(id string) (string, error) {
lstore, err := s.LayerStore()
if err != nil {
return "", err
}
lstores, err := s.ROLayerStores()
if err != nil {
return "", err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if store.Exists(id) {
return store.Metadata(id)
}
}
istore, err := s.ImageStore()
if err != nil {
return "", err
}
istores, err := s.ROImageStores()
if err != nil {
return "", err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if store.Exists(id) {
return store.Metadata(id)
}
}
cstore, err := s.ContainerStore()
if err != nil {
return "", err
}
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
return "", err
}
}
if cstore.Exists(id) {
return cstore.Metadata(id)
}
return "", ErrNotAnID
}
func (s *store) ListImageBigData(id string) ([]string, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
bigDataNames, err := store.BigDataNames(id)
if err == nil {
return bigDataNames, err
}
}
return nil, ErrImageUnknown
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
istore, err := s.ImageStore()
if err != nil {
return -1, err
}
istores, err := s.ROImageStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
size, err := store.BigDataSize(id, key)
if err == nil {
return size, nil
}
}
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
ristore, err := s.ImageStore()
if err != nil {
return "", err
}
stores, err := s.ROImageStores()
if err != nil {
return "", err
}
stores = append([]ROImageStore{ristore}, stores...)
for _, r := range stores {
ristore := r
ristore.RLock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return "", nil
}
}
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
return d, nil
}
}
return "", ErrDigestUnknown
}
func (s *store) ImageBigData(id, key string) ([]byte, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
data, err := store.BigData(id, key)
if err == nil {
return data, nil
}
}
return nil, ErrImageUnknown
}
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
ristore, err := s.ImageStore()
if err != nil {
return err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil
}
}
return ristore.SetBigData(id, key, data, digestManifest)
}
func (s *store) ImageSize(id string) (int64, error) {
var image *Image
lstore, err := s.LayerStore()
if err != nil {
return -1, errors.Wrapf(err, "error loading primary layer store data")
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, errors.Wrapf(err, "error loading additional layer stores")
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
}
var imageStore ROBigDataStore
istore, err := s.ImageStore()
if err != nil {
return -1, errors.Wrapf(err, "error loading primary image store data")
}
istores, err := s.ROImageStores()
if err != nil {
return -1, errors.Wrapf(err, "error loading additional image stores")
}
// Look for the image's record.
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if image, err = store.Get(id); err == nil {
imageStore = store
break
}
}
if image == nil {
return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
// Start with a list of the image's top layers.
queue := make(map[string]struct{})
for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
queue[layerID] = struct{}{}
}
visited := make(map[string]struct{})
// Walk all of the layers.
var size int64
for len(visited) < len(queue) {
for layerID := range queue {
// Visit each layer only once.
if _, ok := visited[layerID]; ok {
continue
}
visited[layerID] = struct{}{}
// Look for the layer and the store that knows about it.
var layerStore ROLayerStore
var layer *Layer
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
if layer, err = store.Get(layerID); err == nil {
layerStore = store
break
}
}
if layer == nil {
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
}
// The UncompressedSize is only valid if there's a digest to go with it.
n := layer.UncompressedSize
if layer.UncompressedDigest == "" {
// Compute the size.
n, err = layerStore.DiffSize("", layer.ID)
if err != nil {
return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
}
}
// Count this layer.
size += n
// Make a note to visit the layer's parent if we haven't already.
if layer.Parent != "" {
queue[layer.Parent] = struct{}{}
}
}
}
// Count big data items.
names, err := imageStore.BigDataNames(id)
if err != nil {
return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
}
for _, name := range names {
n, err := imageStore.BigDataSize(id, name)
if err != nil {
return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
}
size += n
}
return size, nil
}
func (s *store) ContainerSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
}
// Get the location of the container directory and container run directory.
// Do it before we lock the container store because they do, too.
cdir, err := s.ContainerDirectory(id)
if err != nil {
return -1, err
}
rdir, err := s.ContainerRunDirectory(id)
if err != nil {
return -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return -1, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return -1, err
}
}
// Read the container record.
container, err := rcstore.Get(id)
if err != nil {
return -1, err
}
// Read the container's layer's size.
var layer *Layer
var size int64
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
}
break
}
}
if layer == nil {
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
if err != nil {
return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
}
size += n
}
// Count the size of our container directory and container run directory.
n, err := directory.Size(cdir)
if err != nil {
return -1, err
}
size += n
n, err = directory.Size(rdir)
if err != nil {
return -1, err
}
size += n
return size, nil
}
func (s *store) ListContainerBigData(id string) ([]string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.BigDataNames(id)
}
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return -1, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return -1, err
}
}
return rcstore.BigDataSize(id, key)
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
return rcstore.BigDataDigest(id, key)
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.BigData(id, key)
}
func (s *store) SetContainerBigData(id, key string, data []byte) error {
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
return rcstore.SetBigData(id, key, data)
}
func (s *store) Exists(id string) bool {
lstore, err := s.LayerStore()
if err != nil {
return false
}
lstores, err := s.ROLayerStores()
if err != nil {
return false
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return false
}
}
if store.Exists(id) {
return true
}
}
istore, err := s.ImageStore()
if err != nil {
return false
}
istores, err := s.ROImageStores()
if err != nil {
return false
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return false
}
}
if store.Exists(id) {
return true
}
}
rcstore, err := s.ContainerStore()
if err != nil {
return false
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return false
}
}
if rcstore.Exists(id) {
return true
}
return false
}
func dedupeNames(names []string) []string {
seen := make(map[string]bool)
deduped := make([]string, 0, len(names))
for _, name := range names {
if _, wasSeen := seen[name]; !wasSeen {
seen[name] = true
deduped = append(deduped, name)
}
}
return deduped
}
func (s *store) SetNames(id string, names []string) error {
deduped := dedupeNames(names)
rlstore, err := s.LayerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
return rlstore.SetNames(id, deduped)
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
if ristore.Exists(id) {
return ristore.SetNames(id, deduped)
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
return rcstore.SetNames(id, deduped)
}
return ErrLayerUnknown
}
func (s *store) Names(id string) ([]string, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if l, err := store.Get(id); l != nil && err == nil {
return l.Names, nil
}
}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if i, err := store.Get(id); i != nil && err == nil {
return i.Names, nil
}
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
if c, err := rcstore.Get(id); c != nil && err == nil {
return c.Names, nil
}
return nil, ErrLayerUnknown
}
func (s *store) Lookup(name string) (string, error) {
lstore, err := s.LayerStore()
if err != nil {
return "", err
}
lstores, err := s.ROLayerStores()
if err != nil {
return "", err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if l, err := store.Get(name); l != nil && err == nil {
return l.ID, nil
}
}
istore, err := s.ImageStore()
if err != nil {
return "", err
}
istores, err := s.ROImageStores()
if err != nil {
return "", err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if i, err := store.Get(name); i != nil && err == nil {
return i.ID, nil
}
}
cstore, err := s.ContainerStore()
if err != nil {
return "", err
}
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
return "", err
}
}
if c, err := cstore.Get(name); c != nil && err == nil {
return c.ID, nil
}
return "", ErrLayerUnknown
}
func (s *store) DeleteLayer(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
if l, err := rlstore.Get(id); err != nil {
id = l.ID
}
layers, err := rlstore.Layers()
if err != nil {
return err
}
for _, layer := range layers {
if layer.Parent == id {
return ErrLayerHasChildren
}
}
images, err := ristore.Images()
if err != nil {
return err
}
for _, image := range images {
if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) {
return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID)
}
}
containers, err := rcstore.Containers()
if err != nil {
return err
}
for _, container := range containers {
if container.LayerID == id {
return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID)
}
}
return rlstore.Delete(id)
}
return ErrNotALayer
}
func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
ristore, err := s.ImageStore()
if err != nil {
return nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil, err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
layersToRemove := []string{}
if ristore.Exists(id) {
image, err := ristore.Get(id)
if err != nil {
return nil, err
}
id = image.ID
containers, err := rcstore.Containers()
if err != nil {
return nil, err
}
aContainerByImage := make(map[string]string)
for _, container := range containers {
aContainerByImage[container.ImageID] = container.ID
}
if container, ok := aContainerByImage[id]; ok {
return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
}
images, err := ristore.Images()
if err != nil {
return nil, err
}
layers, err := rlstore.Layers()
if err != nil {
return nil, err
}
childrenByParent := make(map[string]*[]string)
for _, layer := range layers {
parent := layer.Parent
if list, ok := childrenByParent[parent]; ok {
newList := append(*list, layer.ID)
childrenByParent[parent] = &newList
} else {
childrenByParent[parent] = &([]string{layer.ID})
}
}
otherImagesByTopLayer := make(map[string]string)
for _, img := range images {
if img.ID != id {
otherImagesByTopLayer[img.TopLayer] = img.ID
for _, layerID := range img.MappedTopLayers {
otherImagesByTopLayer[layerID] = img.ID
}
}
}
if commit {
if err = ristore.Delete(id); err != nil {
return nil, err
}
}
layer := image.TopLayer
lastRemoved := ""
for layer != "" {
if rcstore.Exists(layer) {
break
}
if _, ok := otherImagesByTopLayer[layer]; ok {
break
}
parent := ""
if l, err := rlstore.Get(layer); err == nil {
parent = l.Parent
}
hasOtherRefs := func() bool {
layersToCheck := []string{layer}
if layer == image.TopLayer {
layersToCheck = append(layersToCheck, image.MappedTopLayers...)
}
for _, layer := range layersToCheck {
if childList, ok := childrenByParent[layer]; ok && childList != nil {
children := *childList
for _, child := range children {
if child != lastRemoved {
return true
}
}
}
}
return false
}
if hasOtherRefs() {
break
}
lastRemoved = layer
if layer == image.TopLayer {
layersToRemove = append(layersToRemove, image.MappedTopLayers...)
}
layersToRemove = append(layersToRemove, lastRemoved)
layer = parent
}
} else {
return nil, ErrNotAnImage
}
if commit {
for _, layer := range layersToRemove {
if err = rlstore.Delete(layer); err != nil {
return nil, err
}
}
}
return layersToRemove, nil
}
func (s *store) DeleteContainer(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
if container, err := rcstore.Get(id); err == nil {
if rlstore.Exists(container.LayerID) {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
}
if err = rcstore.Delete(id); err != nil {
return err
}
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
if err = os.RemoveAll(gcpath); err != nil {
return err
}
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
if err = os.RemoveAll(rcpath); err != nil {
return err
}
return nil
}
}
return ErrNotAContainer
}
func (s *store) Delete(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err := ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
if container, err := rcstore.Get(id); err == nil {
if rlstore.Exists(container.LayerID) {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
if err = rcstore.Delete(id); err != nil {
return err
}
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata")
if err = os.RemoveAll(gcpath); err != nil {
return err
}
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata")
if err = os.RemoveAll(rcpath); err != nil {
return err
}
return nil
}
return ErrNotALayer
}
}
if ristore.Exists(id) {
return ristore.Delete(id)
}
if rlstore.Exists(id) {
return rlstore.Delete(id)
}
return ErrLayerUnknown
}
func (s *store) Wipe() error {
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rlstore, err := s.LayerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if err = rcstore.Wipe(); err != nil {
return err
}
if err = ristore.Wipe(); err != nil {
return err
}
return rlstore.Wipe()
}
func (s *store) Status() ([][2]string, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
return rlstore.Status()
}
func (s *store) Version() ([][2]string, error) {
return [][2]string{}, nil
}
func (s *store) Mount(id, mountLabel string) (string, error) {
container, err := s.Container(id)
var (
uidMap, gidMap []idtools.IDMap
mountOpts []string
)
if err == nil {
uidMap, gidMap = container.UIDMap, container.GIDMap
id = container.LayerID
mountOpts = container.MountOpts()
}
rlstore, err := s.LayerStore()
if err != nil {
return "", err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return "", err
}
}
if rlstore.Exists(id) {
options := drivers.MountOpts{
MountLabel: mountLabel,
UidMaps: uidMap,
GidMaps: gidMap,
Options: mountOpts,
}
return rlstore.Mount(id, options)
}
return "", ErrLayerUnknown
}
func (s *store) Mounted(id string) (int, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
rlstore, err := s.LayerStore()
if err != nil {
return 0, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return 0, err
}
}
return rlstore.Mounted(id)
}
func (s *store) Unmount(id string, force bool) (bool, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
rlstore, err := s.LayerStore()
if err != nil {
return false, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return false, err
}
}
if rlstore.Exists(id) {
return rlstore.Unmount(id, force)
}
return false, ErrLayerUnknown
}
func (s *store) Changes(from, to string) ([]archive.Change, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if store.Exists(to) {
return store.Changes(from, to)
}
}
return nil, ErrLayerUnknown
}
func (s *store) DiffSize(from, to string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if store.Exists(to) {
return store.DiffSize(from, to)
}
}
return -1, ErrLayerUnknown
}
func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if store.Exists(to) {
rc, err := store.Diff(from, to, options)
if rc != nil && err == nil {
wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
store.Unlock()
return err
})
return wrapped, nil
}
store.Unlock()
return rc, err
}
store.Unlock()
}
return nil, ErrLayerUnknown
}
func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
rlstore, err := s.LayerStore()
if err != nil {
return -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return -1, err
}
}
if rlstore.Exists(to) {
return rlstore.ApplyDiff(to, diff)
}
return -1, ErrLayerUnknown
}
func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
var layers []Layer
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeLayers, err := m(store, d)
if err != nil {
if errors.Cause(err) != ErrLayerUnknown {
return nil, err
}
continue
}
layers = append(layers, storeLayers...)
}
if len(layers) == 0 {
return nil, ErrLayerUnknown
}
return layers, nil
}
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
func (s *store) LayerSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if store.Exists(id) {
return store.Size(id)
}
}
return -1, ErrLayerUnknown
}
func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, nil, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
if rlstore.Exists(id) {
return rlstore.ParentOwners(id)
}
return nil, nil, ErrLayerUnknown
}
func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, nil, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, nil, err
}
}
container, err := rcstore.Get(id)
if err != nil {
return nil, nil, err
}
if rlstore.Exists(container.LayerID) {
return rlstore.ParentOwners(container.LayerID)
}
return nil, nil, ErrLayerUnknown
}
func (s *store) Layers() ([]Layer, error) {
var layers []Layer
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeLayers, err := store.Layers()
if err != nil {
return nil, err
}
layers = append(layers, storeLayers...)
}
return layers, nil
}
func (s *store) Images() ([]Image, error) {
var images []Image
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeImages, err := store.Images()
if err != nil {
return nil, err
}
images = append(images, storeImages...)
}
return images, nil
}
func (s *store) Containers() ([]Container, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.Containers()
}
func (s *store) Layer(id string) (*Layer, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
layer, err := store.Get(id)
if err == nil {
return layer, nil
}
}
return nil, ErrLayerUnknown
}
func (s *store) Image(id string) (*Image, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
image, err := store.Get(id)
if err == nil {
return image, nil
}
}
return nil, ErrImageUnknown
}
func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
images := []*Image{}
layer, err := s.Layer(id)
if err != nil {
return nil, err
}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
imageList, err := store.Images()
if err != nil {
return nil, err
}
for _, image := range imageList {
if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
images = append(images, &image)
}
}
}
return images, nil
}
func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
images := []*Image{}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, store := range append([]ROImageStore{istore}, istores...) {
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
imageList, err := store.ByDigest(d)
if err != nil && err != ErrImageUnknown {
return nil, err
}
images = append(images, imageList...)
}
return images, nil
}
func (s *store) Container(id string) (*Container, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.Get(id)
}
func (s *store) ContainerLayerID(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
container, err := rcstore.Get(id)
if err != nil {
return "", err
}
return container.LayerID, nil
}
func (s *store) ContainerByLayer(id string) (*Container, error) {
layer, err := s.Layer(id)
if err != nil {
return nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
containerList, err := rcstore.Containers()
if err != nil {
return nil, err
}
for _, container := range containerList {
if container.LayerID == layer.ID {
return &container, nil
}
}
return nil, ErrContainerUnknown
}
func (s *store) ContainerDirectory(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
id, err = rcstore.Lookup(id)
if err != nil {
return "", err
}
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(gcpath, 0700); err != nil {
return "", err
}
return gcpath, nil
}
func (s *store) ContainerRunDirectory(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
id, err = rcstore.Lookup(id)
if err != nil {
return "", err
}
middleDir := s.graphDriverName + "-containers"
rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return "", err
}
return rcpath, nil
}
func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
dir, err := s.ContainerDirectory(id)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
}
func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
dir, err := s.ContainerDirectory(id)
if err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(dir, file))
}
func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
dir, err := s.ContainerRunDirectory(id)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
}
func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
dir, err := s.ContainerRunDirectory(id)
if err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(dir, file))
}
func (s *store) Shutdown(force bool) ([]string, error) {
mounted := []string{}
modified := false
rlstore, err := s.LayerStore()
if err != nil {
return mounted, err
}
s.graphLock.Lock()
defer s.graphLock.Unlock()
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
layers, err := rlstore.Layers()
if err != nil {
return mounted, err
}
for _, layer := range layers {
if layer.MountCount == 0 {
continue
}
mounted = append(mounted, layer.ID)
if force {
for layer.MountCount > 0 {
_, err2 := rlstore.Unmount(layer.ID, force)
if err2 != nil {
if err == nil {
err = err2
}
break
}
modified = true
}
}
}
if len(mounted) > 0 && err == nil {
err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
}
if err == nil {
err = s.graphDriver.Cleanup()
s.graphLock.Touch()
modified = true
}
if modified {
rlstore.Touch()
}
return mounted, err
}
// Convert a BigData key name into an acceptable file name.
func makeBigDataBaseName(key string) string {
reader := strings.NewReader(key)
for reader.Len() > 0 {
ch, size, err := reader.ReadRune()
if err != nil || size != 1 {
break
}
if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') {
break
}
}
if reader.Len() > 0 {
return "=" + base64.StdEncoding.EncodeToString([]byte(key))
}
return key
}
func stringSliceWithoutValue(slice []string, value string) []string {
modified := make([]string, 0, len(slice))
for _, v := range slice {
if v == value {
continue
}
modified = append(modified, v)
}
return modified
}
func copyStringSlice(slice []string) []string {
if len(slice) == 0 {
return nil
}
ret := make([]string, len(slice))
copy(ret, slice)
return ret
}
func copyStringInt64Map(m map[string]int64) map[string]int64 {
ret := make(map[string]int64, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
ret := make(map[string]digest.Digest, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 {
return nil
}
ret := make([]digest.Digest, len(slice))
copy(ret, slice)
return ret
}
// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
ret := make(map[string]interface{}, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
// defaultConfigFile path to the system wide storage.conf file
const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
home, err := homeDir()
if err != nil {
return "", errors.Wrapf(err, "cannot determine users homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
return defaultConfigFile, nil
}
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
GraphRoot string `toml:"graphroot"`
Options struct{ config.OptionsConfig } `toml:"options"`
} `toml:"storage"`
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
return
}
}
config := new(tomlConfig)
if _, err := toml.Decode(string(data), config); err != nil {
fmt.Printf("Failed to parse %s %v\n", configFile, err.Error())
return
}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
}
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
if config.Storage.Options.Thinpool.AutoExtendPercent != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent))
}
if config.Storage.Options.Thinpool.AutoExtendThreshold != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold))
}
if config.Storage.Options.Thinpool.BaseSize != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize))
}
if config.Storage.Options.Thinpool.BlockSize != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize))
}
if config.Storage.Options.Thinpool.DirectLvmDevice != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice))
}
if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce))
}
if config.Storage.Options.Thinpool.Fs != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs))
}
if config.Storage.Options.Thinpool.LogLevel != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel))
}
if config.Storage.Options.Thinpool.MinFreeSpace != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace))
}
if config.Storage.Options.Thinpool.MkfsArg != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg))
}
if config.Storage.Options.Thinpool.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.Thinpool.MountOpt))
}
if config.Storage.Options.Thinpool.UseDeferredDeletion != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion))
}
if config.Storage.Options.Thinpool.UseDeferredRemoval != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval))
}
if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries))
}
for _, s := range config.Storage.Options.AdditionalImageStores {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
}
if config.Storage.Options.Size != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
}
if config.Storage.Options.OstreeRepo != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo))
}
if config.Storage.Options.SkipMountHome != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome))
}
if config.Storage.Options.MountProgram != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram))
}
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
}
if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return
}
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...)
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
}
if os.Getenv("STORAGE_DRIVER") != "" {
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
}
if os.Getenv("STORAGE_OPTS") != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
}
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
}
func init() {
defaultStoreOptions.RunRoot = "/var/run/containers/storage"
defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
defaultStoreOptions.GraphDriverName = ""
ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
}
// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
}
// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions
func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) {
mountOpts := []string{
".mountopt",
fmt.Sprintf("%s.mountopt", driver),
}
for _, option := range graphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
for _, m := range mountOpts {
if m == key {
return strings.Split(val, ","), nil
}
}
}
return nil, nil
}
| [
"\"STORAGE_DRIVER\"",
"\"STORAGE_DRIVER\"",
"\"STORAGE_OPTS\"",
"\"STORAGE_OPTS\""
] | [] | [
"STORAGE_OPTS",
"STORAGE_DRIVER"
] | [] | ["STORAGE_OPTS", "STORAGE_DRIVER"] | go | 2 | 0 | |
src/github.com/getlantern/autoupdate-server/server/github_test.go | package server
import (
"fmt"
"github.com/blang/semver"
"os"
"path"
"testing"
)
var testClient *ReleaseManager
var (
ghAccountOwner = "getlantern"
ghAccountRepository = "lantern"
)
func init() {
if v := os.Getenv("GH_ACCOUNT_OWNER"); v != "" {
ghAccountOwner = v
}
if v := os.Getenv("GH_ACCOUNT_REPOSITORY"); v != "" {
ghAccountRepository = v
}
}
func TestSplitUpdateAsset(t *testing.T) {
var err error
var info *AssetInfo
if info, err = getAssetInfo("update_darwin_386.dmg"); err != nil {
t.Fatal(fmt.Errorf("Failed to get asset info: %q", err))
}
if info.OS != OS.Darwin || info.Arch != Arch.X86 {
t.Fatal("Failed to identify update asset.")
}
if info, err = getAssetInfo("update_darwin_amd64.v1"); err != nil {
t.Fatal(fmt.Errorf("Failed to get asset info: %q", err))
}
if info.OS != OS.Darwin || info.Arch != Arch.X64 {
t.Fatal("Failed to identify update asset.")
}
if info, err = getAssetInfo("update_linux_arm"); err != nil {
t.Fatal(fmt.Errorf("Failed to get asset info: %q", err))
}
if info.OS != OS.Linux || info.Arch != Arch.ARM {
t.Fatal("Failed to identify update asset.")
}
if info, err = getAssetInfo("update_windows_386"); err != nil {
t.Fatal(fmt.Errorf("Failed to get asset info: %q", err))
}
if info.OS != OS.Windows || info.Arch != Arch.X86 {
t.Fatal("Failed to identify update asset.")
}
if _, err = getAssetInfo("update_osx_386"); err == nil {
t.Fatalf("Should have ignored the release, \"osx\" is not a valid OS value.")
}
}
func TestNewClient(t *testing.T) {
testClient = NewReleaseManager(ghAccountOwner, ghAccountRepository)
if testClient == nil {
t.Fatal("Failed to create new client.")
}
}
func TestListReleases(t *testing.T) {
if _, err := testClient.getReleases(); err != nil {
t.Fatal(fmt.Errorf("Failed to pull releases: %q", err))
}
}
func TestUpdateAssetsMap(t *testing.T) {
if err := testClient.UpdateAssetsMap(); err != nil {
t.Fatal(fmt.Errorf("Failed to update assets map: %q", err))
}
if testClient.updateAssetsMap == nil {
t.Fatal("Assets map should not be nil at this point.")
}
if len(testClient.updateAssetsMap) == 0 {
t.Fatal("Assets map is empty.")
}
if testClient.latestAssetsMap == nil {
t.Fatal("Assets map should not be nil at this point.")
}
if len(testClient.latestAssetsMap) == 0 {
t.Fatal("Assets map is empty.")
}
}
func TestDownloadOldestVersionAndUpgradeIt(t *testing.T) {
if len(testClient.updateAssetsMap) == 0 {
t.Fatal("Assets map is empty.")
}
oldestVersionMap := make(map[string]map[string]*Asset)
// Using the updateAssetsMap to look for the oldest version of each release.
for os := range testClient.updateAssetsMap {
for arch := range testClient.updateAssetsMap[os] {
var oldestAsset *Asset
for i := range testClient.updateAssetsMap[os][arch] {
asset := testClient.updateAssetsMap[os][arch][i]
if oldestAsset == nil {
oldestAsset = asset
} else {
if asset.v.LT(oldestAsset.v) {
oldestAsset = asset
}
}
}
if oldestAsset != nil {
if oldestVersionMap[os] == nil {
oldestVersionMap[os] = make(map[string]*Asset)
}
oldestVersionMap[os][arch] = oldestAsset
}
}
}
// Let's download each one of the oldest versions.
var err error
var p *Patch
if len(oldestVersionMap) == 0 {
t.Fatal("No older software versions to test with.")
}
tests := 0
for os := range oldestVersionMap {
for arch := range oldestVersionMap[os] {
asset := oldestVersionMap[os][arch]
newAsset := testClient.latestAssetsMap[os][arch]
t.Logf("Upgrading %v to %v (%s/%s)", asset.v, newAsset.v, os, arch)
if asset == newAsset {
t.Logf("Skipping version %s %s %s", os, arch, asset.v)
// Skipping
continue
}
// Generate a binary diff of the two assets.
if p, err = generatePatch(asset.URL, newAsset.URL); err != nil {
t.Fatal(fmt.Errorf("Unable to generate patch: %q", err))
}
// Apply patch.
var oldAssetFile string
if oldAssetFile, err = downloadAsset(asset.URL); err != nil {
t.Fatal(err)
}
var newAssetFile string
if newAssetFile, err = downloadAsset(newAsset.URL); err != nil {
t.Fatal(err)
}
patchedFile := "_tests/" + path.Base(asset.URL)
if err = bspatch(oldAssetFile, patchedFile, p.File); err != nil {
t.Fatal(fmt.Sprintf("Failed to apply binary diff: %q", err))
}
// Compare the two versions.
if fileHash(oldAssetFile) == fileHash(newAssetFile) {
t.Fatal("Nothing to update, probably not a good test case.")
}
if fileHash(patchedFile) != fileHash(newAssetFile) {
t.Fatal("File hashes after patch must be equal.")
}
var cs string
if cs, err = checksumForFile(patchedFile); err != nil {
t.Fatal("Could not get checksum for %s: %q", patchedFile, err)
}
if cs == asset.Checksum {
t.Fatal("Computed checksum for patchedFile must be different than the stored older asset checksum.")
}
if cs != newAsset.Checksum {
t.Fatal("Computed checksum for patchedFile must be equal to the stored newer asset checksum.")
}
var ss string
if ss, err = signatureForFile(patchedFile); err != nil {
t.Fatal("Could not get signature for %s: %q", patchedFile, err)
}
if ss == asset.Signature {
t.Fatal("Computed signature for patchedFile must be different than the stored older asset signature.")
}
if ss != newAsset.Signature {
t.Fatal("Computed signature for patchedFile must be equal to the stored newer asset signature.")
}
tests++
}
}
if tests == 0 {
t.Fatal("Seems like there is not any newer software version to test with.")
}
// Let's walk over the array again but using CheckForUpdate instead.
for os := range oldestVersionMap {
for arch := range oldestVersionMap[os] {
asset := oldestVersionMap[os][arch]
params := Params{
AppVersion: asset.v.String(),
OS: asset.OS,
Arch: asset.Arch,
Checksum: asset.Checksum,
}
// fmt.Printf("params: %s", params)
r, err := testClient.CheckForUpdate(¶ms)
if err != nil {
if err == ErrNoUpdateAvailable {
// That's OK, let's make sure.
newAsset := testClient.latestAssetsMap[os][arch]
if asset != newAsset {
t.Fatal("CheckForUpdate said no update was available!")
}
} else {
t.Fatal("CheckForUpdate: ", err)
}
}
if r.PatchType != PATCHTYPE_BSDIFF {
t.Fatal("Expecting no patch.")
}
if r.Version != testClient.latestAssetsMap[os][arch].v.String() {
t.Fatal("Expecting %v, got %v.", testClient.latestAssetsMap[os][arch].v, r.Version)
}
}
}
// Let's walk again using an odd checksum.
for os := range oldestVersionMap {
for arch := range oldestVersionMap[os] {
asset := oldestVersionMap[os][arch]
params := Params{
AppVersion: asset.v.String(),
OS: asset.OS,
Arch: asset.Arch,
Checksum: "?",
}
r, err := testClient.CheckForUpdate(¶ms)
if err != nil {
if err == ErrNoUpdateAvailable {
// That's OK, let's make sure.
newAsset := testClient.latestAssetsMap[os][arch]
if asset != newAsset {
t.Fatal("CheckForUpdate said no update was available!")
}
} else {
t.Fatal("CheckForUpdate: ", err)
}
}
if r.PatchType != PATCHTYPE_NONE {
t.Fatal("Expecting no patch.")
}
if r.Version != testClient.latestAssetsMap[os][arch].v.String() {
t.Fatal("Expecting %v, got %v.", testClient.latestAssetsMap[os][arch].v, r.Version)
}
}
}
}
func TestDownloadManotoBetaAndUpgradeIt(t *testing.T) {
if r := semver.MustParse("2.0.0+manoto").Compare(semver.MustParse("2.0.0+stable")); r != 0 {
t.Fatalf("Expecting 2.0.0+manoto to be equal to 2.0.0+stable, got: %d", r)
}
if r := semver.MustParse("2.0.0+manoto").Compare(semver.MustParse("2.0.1")); r != -1 {
t.Fatalf("Expecting 2.0.0+manoto to be lower than 2.0.1, got: %d", r)
}
if r := semver.MustParse("2.0.0+stable").Compare(semver.MustParse("9999.99.99")); r != -1 {
t.Fatalf("Expecting 2.0.0+manoto to be lower than 9999.99.99, got: %d", r)
}
if len(testClient.updateAssetsMap) == 0 {
t.Fatal("Assets map is empty.")
}
oldestVersionMap := make(map[string]map[string]*Asset)
// Using the updateAssetsMap to look for the oldest version of each release.
for os := range testClient.updateAssetsMap {
for arch := range testClient.updateAssetsMap[os] {
var oldestAsset *Asset
for i := range testClient.updateAssetsMap[os][arch] {
asset := testClient.updateAssetsMap[os][arch][i]
if asset.v.String() == semver.MustParse(manotoBeta8).String() {
if !buildStringContainsManoto(asset.v) {
t.Fatal(`Build string must contain the word "manoto"`)
}
oldestAsset = asset
}
}
if oldestAsset != nil {
if oldestVersionMap[os] == nil {
oldestVersionMap[os] = make(map[string]*Asset)
}
oldestVersionMap[os][arch] = oldestAsset
}
}
}
// Let's download each one of the oldest versions.
if len(oldestVersionMap) == 0 {
t.Fatal("No older software versions to test with.")
}
// Let's walk over the array again but using CheckForUpdate instead.
for os := range oldestVersionMap {
for arch := range oldestVersionMap[os] {
asset := oldestVersionMap[os][arch]
params := Params{
AppVersion: asset.v.String(),
OS: asset.OS,
Arch: asset.Arch,
Checksum: asset.Checksum,
}
if params.AppVersion != manotoBeta8 {
t.Fatal("Expecting Manoto beta8.")
}
r, err := testClient.CheckForUpdate(¶ms)
if err != nil {
t.Fatal("CheckForUpdate: ", err)
}
t.Logf("Upgrading %v to %v (%s/%s)", asset.v, r.Version, os, arch)
if r.Version != manotoBeta8Upgrade {
t.Fatal("Expecting %s.", manotoBeta8Upgrade)
}
}
}
}
| [
"\"GH_ACCOUNT_OWNER\"",
"\"GH_ACCOUNT_REPOSITORY\""
] | [] | [
"GH_ACCOUNT_REPOSITORY",
"GH_ACCOUNT_OWNER"
] | [] | ["GH_ACCOUNT_REPOSITORY", "GH_ACCOUNT_OWNER"] | go | 2 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"strings"
_ "github.com/vbatts/git-validation/rules/danglingwhitespace"
_ "github.com/vbatts/git-validation/rules/dco"
_ "github.com/vbatts/git-validation/rules/messageregexp"
_ "github.com/vbatts/git-validation/rules/shortsubject"
"github.com/vbatts/git-validation/validate"
)
var (
flCommitRange = flag.String("range", "", "use this commit range instead (implies -no-travis)")
flListRules = flag.Bool("list-rules", false, "list the rules registered")
flRun = flag.String("run", "", "comma delimited list of rules to run. Defaults to all.")
flVerbose = flag.Bool("v", false, "verbose")
flDebug = flag.Bool("D", false, "debug output")
flQuiet = flag.Bool("q", false, "less output")
flDir = flag.String("d", ".", "git directory to validate from")
flNoGithub = flag.Bool("no-github", false, "disables Github Actions environment checks (when env GITHUB_ACTIONS=true is set)")
flNoTravis = flag.Bool("no-travis", false, "disables travis environment checks (when env TRAVIS=true is set)")
flTravisPROnly = flag.Bool("travis-pr-only", true, "when on travis, only run validations if the CI-Build is checking pull-request build")
)
func main() {
flag.Parse()
if *flDebug {
os.Setenv("DEBUG", "1")
}
if *flQuiet {
os.Setenv("QUIET", "1")
}
if *flListRules {
for _, r := range validate.RegisteredRules {
fmt.Printf("%q -- %s\n", r.Name, r.Description)
}
return
}
if *flTravisPROnly && strings.ToLower(os.Getenv("TRAVIS_PULL_REQUEST")) == "false" {
fmt.Printf("only to check travis PR builds and this not a PR build. yielding.\n")
return
}
// rules to be used
var rules []validate.Rule
for _, r := range validate.RegisteredRules {
// only those that are Default
if r.Default {
rules = append(rules, r)
}
}
// or reduce the set being run to what the user provided
if *flRun != "" {
rules = validate.FilterRules(validate.RegisteredRules, validate.SanitizeFilters(*flRun))
}
if os.Getenv("DEBUG") != "" {
log.Printf("%#v", rules) // XXX maybe reduce this list
}
var commitRange = *flCommitRange
if commitRange == "" {
if strings.ToLower(os.Getenv("TRAVIS")) == "true" && !*flNoTravis {
if os.Getenv("TRAVIS_COMMIT_RANGE") != "" {
commitRange = strings.Replace(os.Getenv("TRAVIS_COMMIT_RANGE"), "...", "..", 1)
} else if os.Getenv("TRAVIS_COMMIT") != "" {
commitRange = os.Getenv("TRAVIS_COMMIT")
}
}
// https://docs.github.com/en/actions/reference/environment-variables
if strings.ToLower(os.Getenv("GITHUB_ACTIONS")) == "true" && !*flNoGithub {
commitRange = fmt.Sprintf("%s..%s", os.Getenv("GITHUB_SHA"), "HEAD")
}
}
runner, err := validate.NewRunner(*flDir, rules, commitRange, *flVerbose)
if err != nil {
log.Fatal(err)
}
if err := runner.Run(); err != nil {
log.Fatal(err)
}
_, fail := runner.Results.PassFail()
if fail > 0 {
fmt.Printf("%d commits to fix\n", fail)
os.Exit(1)
}
}
| [
"\"TRAVIS_PULL_REQUEST\"",
"\"DEBUG\"",
"\"TRAVIS\"",
"\"TRAVIS_COMMIT_RANGE\"",
"\"TRAVIS_COMMIT_RANGE\"",
"\"TRAVIS_COMMIT\"",
"\"TRAVIS_COMMIT\"",
"\"GITHUB_ACTIONS\"",
"\"GITHUB_SHA\""
] | [] | [
"TRAVIS_PULL_REQUEST",
"GITHUB_ACTIONS",
"TRAVIS",
"TRAVIS_COMMIT",
"GITHUB_SHA",
"DEBUG",
"TRAVIS_COMMIT_RANGE"
] | [] | ["TRAVIS_PULL_REQUEST", "GITHUB_ACTIONS", "TRAVIS", "TRAVIS_COMMIT", "GITHUB_SHA", "DEBUG", "TRAVIS_COMMIT_RANGE"] | go | 7 | 0 | |
corp104/cmd/url_implicit.go | package cmd
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jws"
"github.com/ory/go-convenience/stringslice"
"github.com/ory/hydra/pkg"
"github.com/ory/hydra/rand/sequence"
"github.com/spf13/cobra"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/lestrrat-go/jwx/jwk"
"github.com/lestrrat-go/jwx/jwt"
goauth2 "golang.org/x/oauth2"
)
// tokenUserCmd represents the token command
var urlImplicitCmd = &cobra.Command{
Use: "implicit",
Short: "Initiates OIDC implicit requests",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
scopes, _ := cmd.Flags().GetStringSlice("scope")
prompt, _ := cmd.Flags().GetStringSlice("prompt")
maxAge, _ := cmd.Flags().GetInt("max-age")
redirectUrl, _ := cmd.Flags().GetString("redirect")
authURL, _ := cmd.Flags().GetString("auth-url")
clientID, _ := cmd.Flags().GetString("client-id")
if clientID == "" {
fmt.Print(cmd.UsageString())
fmt.Println("Please provide a Client ID using flags --client-id, or environment variables OAUTH2_CLIENT_ID.")
return
}
signingJwk, _ := cmd.Flags().GetString("signing-jwk")
if !stringslice.Has(scopes, "openid") {
scopes = append(scopes, "openid")
}
config := goauth2.Config{
ClientID: clientID,
Endpoint: goauth2.Endpoint{
TokenURL: "",
AuthURL: authURL,
},
RedirectURL: redirectUrl,
Scopes: scopes,
}
state, err := sequence.RuneSequence(24, sequence.AlphaLower)
pkg.Must(err, "Could not generate random state: %s", err)
nonce, err := sequence.RuneSequence(24, sequence.AlphaLower)
pkg.Must(err, "Could not generate random state: %s", err)
authCodeURL := config.AuthCodeURL(string(state)) + "&nonce=" + string(nonce) + "&prompt=" + strings.Join(prompt, "+") + "&max_age=" + strconv.Itoa(maxAge)
authCodeURL = strings.Replace(authCodeURL, "response_type=code", "response_type=id_token", 1)
u, err := url.Parse(c.GetClusterURLWithoutTailingSlashOrFail(cmd))
if err != nil {
panic(err.Error())
}
t := jwt.New()
t.Set(jwt.IssuerKey, clientID)
t.Set(jwt.AudienceKey, u.String())
t.Set(jwt.IssuedAtKey, time.Now())
t.Set(jwt.ExpirationKey, time.Now().Add(time.Duration(30) * time.Minute))
t.Set("client_id", clientID)
t.Set("redirect_uri", redirectUrl)
t.Set("scope", "openid")
t.Set("state", string(state))
t.Set("nonce", string(nonce))
t.Set("max_age", strconv.Itoa(maxAge))
t.Set("response_type", "id_token")
t.Set("prompt", strings.Join(prompt, "+"))
buf, err := json.MarshalIndent(t, "", " ")
if err != nil {
panic(err.Error())
}
kid, privateKey := extractECDSAPrivateKey([]byte(signingJwk))
header := buildHeader(kid)
signedBuf, err := jws.Sign(buf, jwa.ES256, privateKey, jws.WithHeaders(&header))
if err != nil {
panic(err.Error())
}
authCodeURL = authCodeURL + "&request=" + string(signedBuf)
fmt.Printf("Copy the following url to browser: \n%s\n", authCodeURL)
},
}
func init() {
urlCmd.AddCommand(urlImplicitCmd)
urlImplicitCmd.Flags().StringSlice("scope", []string{"openid"}, "Request OAuth2 scope")
urlImplicitCmd.Flags().StringSlice("prompt", []string{}, "Set the OpenID Connect prompt parameter")
urlImplicitCmd.Flags().Int("max-age", 0, "Set the OpenID Connect max_age parameter")
urlImplicitCmd.Flags().String("client-id", os.Getenv("OAUTH2_CLIENT_ID"), "Use the provided OAuth 2.0 Client ID, defaults to environment variable OAUTH2_CLIENT_ID")
urlImplicitCmd.Flags().String("signing-jwk", "", "Client's JSON Web Key document representing the client's private key used to sign the software statement")
urlImplicitCmd.Flags().String("redirect", "", "Force a redirect url")
urlImplicitCmd.Flags().String("auth-url", "", "Usually it is enough to specify the `endpoint` flag, but if you want to force the authorization url, use this flag")
urlImplicitCmd.PersistentFlags().String("endpoint", os.Getenv("HYDRA_URL"), "Set the URL where ORY Hydra is hosted, defaults to environment variable HYDRA_URL")
}
func extractECDSAPrivateKey(jwks []byte) (string, *ecdsa.PrivateKey) {
j, err := jwk.Parse(jwks)
if err != nil {
panic(err.Error())
}
for _, keys := range j.Keys {
key, err := keys.Materialize()
if err != nil {
panic(err.Error())
}
if v, ok := key.(*ecdsa.PrivateKey); ok {
return keys.KeyID(), v
}
}
return "", nil
}
func buildHeader(kid string) (jws.StandardHeaders) {
var header jws.StandardHeaders
header.Set(`typ`, "JWT")
header.Set("kid", strings.Replace(kid, "private", "public", 1))
return header
}
| [
"\"OAUTH2_CLIENT_ID\"",
"\"HYDRA_URL\""
] | [] | [
"OAUTH2_CLIENT_ID",
"HYDRA_URL"
] | [] | ["OAUTH2_CLIENT_ID", "HYDRA_URL"] | go | 2 | 0 | |
cmd/overdone/main.go | package main
import (
"context"
"encoding/json"
"flag"
"log"
"net/http"
"os"
"os/signal"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/scnewma/overdone/inmem"
httplogging "github.com/scnewma/overdone/pkg/http/logging"
"github.com/scnewma/overdone/pkg/tasks"
"github.com/scnewma/overdone/pkg/utils"
)
const (
defaultPort = "8080"
)
func main() {
port := os.Getenv("PORT")
if utils.IsBlank(port) {
port = defaultPort
}
httpAddr := flag.String("http-addr", ":"+port, "HTTP Listen Address")
timeout := flag.Duration("graceful-timeout", time.Second*15, "The duration the server will wait to for existing connections to finish before shutdown")
flag.Parse()
tr := inmem.NewRepository()
ts := tasks.NewService(tr)
a := App{EnableLogging: true}
a.Initialize(ts)
a.Run(*httpAddr, *timeout)
}
// App bridges the gap between the business logic and the web server by
// listening for HTTP requests and calling the correct application service
type App struct {
Router *mux.Router
Service tasks.Service
EnableLogging bool
}
// Initialize sets up the routes for the web server
func (a *App) Initialize(ts tasks.Service) {
a.Service = ts
a.Router = mux.NewRouter()
a.initializeRoutes()
a.initializeMiddleware()
}
// Run starts the web server on the given address
func (a *App) Run(addr string, timeout time.Duration) {
srv := &http.Server{
Addr: addr,
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: a.Router,
}
go func() {
log.Printf("transport=http address=%s message=listening", addr)
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}()
// accept graceful shutdowns via INTERRUPT
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c // block until signal received
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// block up to timout period for connections to close
srv.Shutdown(ctx)
log.Println("shutting down")
os.Exit(0)
}
func (a *App) initializeRoutes() {
a.Router.HandleFunc("/tasks", a.getTasks).Methods("GET")
a.Router.HandleFunc("/tasks", a.createTask).Methods("POST")
a.Router.HandleFunc("/tasks/{id:[0-9]+}", a.getTask).Methods("GET")
a.Router.HandleFunc("/tasks/{id:[0-9]+}/complete", a.completeTask).Methods("PUT")
}
func (a *App) initializeMiddleware() {
if a.EnableLogging {
a.Router.Use(loggingMiddleware)
}
}
func loggingMiddleware(next http.Handler) http.Handler {
return httplogging.NewApacheLoggingHandler(next, os.Stdout)
}
func (a *App) getTasks(w http.ResponseWriter, r *http.Request) {
tasks, err := a.Service.LoadAll()
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJSON(w, http.StatusOK, tasks)
}
func (a *App) getTask(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, err := strconv.Atoi(vars["id"])
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid user ID")
return
}
t, err := a.Service.LoadByID(id)
if err != nil {
switch err {
case tasks.ErrNotFound:
respondWithError(w, http.StatusNotFound, "Task not found")
default:
respondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
respondWithJSON(w, http.StatusOK, t)
}
func (a *App) createTask(w http.ResponseWriter, r *http.Request) {
var body struct {
Content string `json:"content"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
task, err := a.Service.Create(body.Content)
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
}
respondWithJSON(w, http.StatusCreated, task)
}
func (a *App) completeTask(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, err := strconv.Atoi(vars["id"])
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid user ID")
return
}
t, err := a.Service.MarkComplete(id)
if err != nil {
switch err {
case tasks.ErrNotFound:
respondWithError(w, http.StatusNotFound, "Task not found")
default:
respondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
respondWithJSON(w, http.StatusOK, t)
}
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
airflow/providers/google/marketing_platform/example_dags/example_analytics.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use Google Analytics 360.
"""
import os
from airflow import models
from airflow.providers.google.marketing_platform.operators.analytics import (
GoogleAnalyticsDataImportUploadOperator, GoogleAnalyticsDeletePreviousDataUploadsOperator,
GoogleAnalyticsGetAdsLinkOperator, GoogleAnalyticsListAccountsOperator,
GoogleAnalyticsModifyFileHeadersDataImportOperator, GoogleAnalyticsRetrieveAdsLinksListOperator,
)
from airflow.utils import dates
ACCOUNT_ID = os.environ.get("GA_ACCOUNT_ID", "123456789")
BUCKET = os.environ.get("GMP_ANALYTICS_BUCKET", "test-airflow-analytics-bucket")
BUCKET_FILENAME = "data.csv"
WEB_PROPERTY_ID = os.environ.get("GA_WEB_PROPERTY", "UA-12345678-1")
WEB_PROPERTY_AD_WORDS_LINK_ID = os.environ.get(
"GA_WEB_PROPERTY_AD_WORDS_LINK_ID", "rQafFTPOQdmkx4U-fxUfhj"
)
DATA_ID = "kjdDu3_tQa6n8Q1kXFtSmg"
with models.DAG(
"example_google_analytics",
schedule_interval=None, # Override to match your needs,
start_date=dates.days_ago(1),
) as dag:
# [START howto_marketing_platform_list_accounts_operator]
list_account = GoogleAnalyticsListAccountsOperator(task_id="list_account")
# [END howto_marketing_platform_list_accounts_operator]
# [START howto_marketing_platform_get_ads_link_operator]
get_ad_words_link = GoogleAnalyticsGetAdsLinkOperator(
web_property_ad_words_link_id=WEB_PROPERTY_AD_WORDS_LINK_ID,
web_property_id=WEB_PROPERTY_ID,
account_id=ACCOUNT_ID,
task_id="get_ad_words_link",
)
# [END howto_marketing_platform_get_ads_link_operator]
# [START howto_marketing_platform_retrieve_ads_links_list_operator]
list_ad_words_link = GoogleAnalyticsRetrieveAdsLinksListOperator(
task_id="list_ad_link", account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
# [END howto_marketing_platform_retrieve_ads_links_list_operator]
upload = GoogleAnalyticsDataImportUploadOperator(
task_id="upload",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
delete = GoogleAnalyticsDeletePreviousDataUploadsOperator(
task_id="delete",
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
transform = GoogleAnalyticsModifyFileHeadersDataImportOperator(
task_id="transform",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
)
upload >> [delete, transform]
| [] | [] | [
"GA_WEB_PROPERTY_AD_WORDS_LINK_ID",
"GA_ACCOUNT_ID",
"GMP_ANALYTICS_BUCKET",
"GA_WEB_PROPERTY"
] | [] | ["GA_WEB_PROPERTY_AD_WORDS_LINK_ID", "GA_ACCOUNT_ID", "GMP_ANALYTICS_BUCKET", "GA_WEB_PROPERTY"] | python | 4 | 0 | |
test/test_system/test_requests.py | from requests import Session, exceptions
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from unittest import TestCase
from datetime import datetime
import logging
log = logging.getLogger(__name__)
class TestRequests(TestCase):
"""
(Not testing code in this project)
This is just testing my understanding of retry and exceptions in
requests / urllib3. It was not easy to work out what exceptions to
expect so I'm keeping this code as a reminder.
"""
def test_retries_500(self):
retries = 5
timeout = 2
session = Session()
start = datetime.now()
result = session.get("https://httpbin.org/status/500", timeout=timeout)
self.assertEqual(result.status_code, 500)
elapsed = datetime.now() - start
retry = Retry(
total=retries,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=frozenset(["GET", "POST"]),
raise_on_status=False,
)
session.mount("https://", HTTPAdapter(max_retries=retry))
start = datetime.now()
result = session.get("https://httpbin.org/status/500", timeout=timeout)
elapsed2 = datetime.now() - start
self.assertEqual(result.status_code, 500)
self.assertGreater(elapsed2, elapsed * (retries - 1))
def test_retries_timeout(self):
retries = 3
timeout = 1
retry_error = False
session = Session()
retry = Retry(
total=retries,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=frozenset(["GET", "POST"]),
raise_on_status=False,
)
session.mount("https://", HTTPAdapter(max_retries=retry))
start = datetime.now()
try:
_ = session.get("https://httpbin.org/delay/5", timeout=timeout)
except exceptions.ConnectionError as e:
retry_error = True
print(e)
elapsed = datetime.now() - start
self.assertEqual(retry_error, True)
self.assertGreater(elapsed.seconds, retries * timeout)
| [] | [] | [] | [] | [] | python | null | null | null |
tests/apps/perf/actorjava/src/main/java/io/dapr/apps/actor/actors/DemoActorTimerImpl.java | /*
* Copyright (c) Microsoft Corporation and Dapr Contributors.
* Licensed under the MIT License.
*/
package io.dapr.apps.actor.actors;
import static org.apache.commons.lang3.math.NumberUtils.createDouble;
import static org.apache.commons.lang3.math.NumberUtils.isParsable;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.Calendar;
import java.util.Random;
import java.util.TimeZone;
import io.dapr.actors.ActorId;
import io.dapr.actors.runtime.AbstractActor;
import io.dapr.actors.runtime.ActorRuntimeContext;
import lombok.extern.slf4j.Slf4j;
/**ß
* Implementation of the DemoActor for the server side.
*/
@Slf4j
public class DemoActorTimerImpl extends AbstractActor implements DemoActorTimer {
private static final Random RANDOM = new Random(37);
/**
* Format to output date and time.
*/
private final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
/**
* Ratio to which messages are logged, default is 1.0 (100%)
*/
private final double logRatio;
/**
* This is the constructor of an actor implementation.
*
* @param runtimeContext The runtime context object which contains objects such as the state provider.
* @param id The id of this actor.
*/
public DemoActorTimerImpl(ActorRuntimeContext runtimeContext, ActorId id) {
super(runtimeContext, id);
String logRatioString = System.getenv("LOG_RATIO");
double logRatioTemp = isParsable(logRatioString) ? createDouble(logRatioString) : 1;
logRatioTemp = logRatioTemp < 0 ? 0 : logRatioTemp;
logRatioTemp = logRatioTemp > 1 ? 1 : logRatioTemp;
this.logRatio = logRatioTemp;
}
@Override
public void registerDemoActorTimer() {
super.registerActorTimer(
"sayTimer",
"say",
"Hello World!",
Duration.ofSeconds(10),
Duration.ofSeconds(10))
.subscribe(
avoid -> log.info("timer registered successfully"),
error -> log.warn(error.getMessage()),
() -> log.info("registerTimer completed"));
}
/**
* Prints a message and appends the timestamp.
*
* @param something Something to be said.
* @return Timestamp.
*/
@Override
public String say(String something) {
Calendar utcNow = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
String utcNowAsString = dateFormat.format(utcNow.getTime());
String timestampedMessage = something == null ? "" : something + " @ " + utcNowAsString;
// Handles the request by printing message.
if (RANDOM.nextDouble() < logRatio) {
log.info("Server say method for actor " + super.getId() + ": " + timestampedMessage);
}
// Now respond with current timestamp.
return utcNowAsString;
}
@Override
public void noOp() {
// No-op to test performance without app logic impacting numbers.
}
}
| [
"\"LOG_RATIO\""
] | [] | [
"LOG_RATIO"
] | [] | ["LOG_RATIO"] | java | 1 | 0 | |
internal/pkg/server/unified-logging/handler_it_test.go | /*
* Copyright 2020 Nalej
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
RUN_INTEGRATION_TEST=true
IT_SM_ADDRESS=localhost:8800
IT_UL_COORD_ADDRESS=localhost:8323
IT_ORGMGR_ADDRESS=localhost:8950
*/
package unified_logging
import (
"fmt"
"github.com/nalej/authx/pkg/interceptor"
"github.com/nalej/grpc-application-manager-go"
"github.com/nalej/grpc-authx-go"
"github.com/nalej/grpc-log-download-manager-go"
"github.com/nalej/grpc-organization-manager-go"
"github.com/nalej/grpc-public-api-go"
"github.com/nalej/grpc-utils/pkg/test"
"github.com/nalej/public-api/internal/pkg/server/ithelpers"
"github.com/nalej/public-api/internal/pkg/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/rs/zerolog/log"
"google.golang.org/grpc"
"google.golang.org/grpc/test/bufconn"
"os"
"time"
)
var _ = ginkgo.Describe("Unified Logging", func() {
if !utils.RunIntegrationTests() {
log.Warn().Msg("Integration tests are skipped")
return
}
var (
systemModelAddress = os.Getenv("IT_SM_ADDRESS")
unifiedLoggingAddress = os.Getenv("IT_UL_COORD_ADDRESS")
logManagerAddress = os.Getenv("LOG_DOWNLOAD_ADDRESS")
orgManagerAddress = os.Getenv("IT_ORGMGR_ADDRESS")
)
if systemModelAddress == "" || unifiedLoggingAddress == "" || logManagerAddress == "" || orgManagerAddress == "" {
ginkgo.Fail("missing environment variables")
}
// gRPC server
var server *grpc.Server
// grpc test listener
var listener *bufconn.Listener
// client
var ulClient grpc_application_manager_go.UnifiedLoggingClient
var ulConn *grpc.ClientConn
var orgClient grpc_organization_manager_go.OrganizationsClient
var smConn *grpc.ClientConn
var orgConn *grpc.ClientConn
var client grpc_public_api_go.UnifiedLoggingClient
var lmClient grpc_log_download_manager_go.LogDownloadManagerClient
var lmConn *grpc.ClientConn
var organization, appInstance, sgInstance string
var token string
var devToken string
var operToken string
var from, to int64
ginkgo.BeforeSuite(func() {
listener = test.GetDefaultListener()
authConfig := ithelpers.GetAllAuthConfig()
server = grpc.NewServer(interceptor.WithServerAuthxInterceptor(interceptor.NewConfig(authConfig, "secret", ithelpers.AuthHeader)))
smConn = utils.GetConnection(systemModelAddress)
orgConn = utils.GetConnection(orgManagerAddress)
orgClient = grpc_organization_manager_go.NewOrganizationsClient(orgConn)
ulConn = utils.GetConnection(unifiedLoggingAddress)
ulClient = grpc_application_manager_go.NewUnifiedLoggingClient(ulConn)
lmConn = utils.GetConnection(logManagerAddress)
lmClient = grpc_log_download_manager_go.NewLogDownloadManagerClient(lmConn)
conn, err := test.GetConn(*listener)
gomega.Expect(err).To(gomega.Succeed())
manager := NewManager(ulClient, lmClient)
handler := NewHandler(manager)
grpc_public_api_go.RegisterUnifiedLoggingServer(server, handler)
test.LaunchServer(server, listener)
client = grpc_public_api_go.NewUnifiedLoggingClient(conn)
// Need organization, application descriptor, application instance, service group instance
organization = ithelpers.CreateOrganization(fmt.Sprintf("testOrg-%d", ginkgo.GinkgoRandomSeed()), orgClient).GetOrganizationId()
// Instances don't have to exist, we search for them anyway and get empty result
appInstance = fmt.Sprintf("testAppInstance-%d", ginkgo.GinkgoRandomSeed())
sgInstance = fmt.Sprintf("testSGInstance-%d", ginkgo.GinkgoRandomSeed())
from = 0
to = time.Now().UnixNano()
token = ithelpers.GenerateToken("[email protected]",
organization, "Owner", "secret",
[]grpc_authx_go.AccessPrimitive{grpc_authx_go.AccessPrimitive_ORG})
devToken = ithelpers.GenerateToken("[email protected]", organization, "Developer", "secret",
[]grpc_authx_go.AccessPrimitive{grpc_authx_go.AccessPrimitive_PROFILE, grpc_authx_go.AccessPrimitive_APPS})
operToken = ithelpers.GenerateToken("[email protected]", organization, "Operator", "secret",
[]grpc_authx_go.AccessPrimitive{grpc_authx_go.AccessPrimitive_PROFILE, grpc_authx_go.AccessPrimitive_RESOURCES})
})
ginkgo.AfterSuite(func() {
testCleaner := ithelpers.NewTestCleaner(smConn)
testCleaner.DeleteOrganizationDescriptors(organization)
server.Stop()
listener.Close()
smConn.Close()
ulConn.Close()
orgConn.Close()
})
ginkgo.Context("search", func() {
ginkgo.It("should be able to search logs of an application instance", func() {
tests := make([]utils.TestResult, 0)
tests = append(tests, utils.TestResult{Token: token, Success: true, Msg: "Owner"})
tests = append(tests, utils.TestResult{Token: devToken, Success: true, Msg: "Developer"})
tests = append(tests, utils.TestResult{Token: operToken, Success: false, Msg: "Operator"})
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
AppInstanceId: appInstance,
}
for _, test := range tests {
ginkgo.By(test.Msg)
ctx, cancel := ithelpers.GetContext(test.Token)
defer cancel()
result, err := client.Search(ctx, request)
if test.Success {
gomega.Expect(err).To(gomega.Succeed())
gomega.Expect(result.OrganizationId).Should(gomega.Equal(organization))
gomega.Expect(result.From).Should(gomega.BeNil())
gomega.Expect(result.To).Should(gomega.BeNil())
} else {
gomega.Expect(err).NotTo(gomega.Succeed())
}
}
})
ginkgo.It("should be able to search logs of a service group instance", func() {
tests := make([]utils.TestResult, 0)
tests = append(tests, utils.TestResult{Token: token, Success: true, Msg: "Owner"})
tests = append(tests, utils.TestResult{Token: devToken, Success: true, Msg: "Developer"})
tests = append(tests, utils.TestResult{Token: operToken, Success: false, Msg: "Operator"})
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
AppInstanceId: appInstance,
ServiceGroupInstanceId: sgInstance,
}
for _, test := range tests {
ginkgo.By(test.Msg)
ctx, cancel := ithelpers.GetContext(test.Token)
defer cancel()
result, err := client.Search(ctx, request)
if test.Success {
gomega.Expect(err).To(gomega.Succeed())
gomega.Expect(result.OrganizationId).Should(gomega.Equal(organization))
gomega.Expect(result.From).Should(gomega.BeNil())
gomega.Expect(result.To).Should(gomega.BeNil())
} else {
gomega.Expect(err).NotTo(gomega.Succeed())
}
}
})
ginkgo.It("should be able to search logs with a message filter", func() {
tests := make([]utils.TestResult, 0)
tests = append(tests, utils.TestResult{Token: token, Success: true, Msg: "Owner"})
tests = append(tests, utils.TestResult{Token: devToken, Success: true, Msg: "Developer"})
tests = append(tests, utils.TestResult{Token: operToken, Success: false, Msg: "Operator"})
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
AppInstanceId: appInstance,
ServiceGroupInstanceId: sgInstance,
MsgQueryFilter: "message filter",
}
for _, test := range tests {
ginkgo.By(test.Msg)
ctx, cancel := ithelpers.GetContext(test.Token)
defer cancel()
result, err := client.Search(ctx, request)
if test.Success {
gomega.Expect(err).To(gomega.Succeed())
gomega.Expect(result.OrganizationId).Should(gomega.Equal(organization))
gomega.Expect(result.From).Should(gomega.BeNil())
gomega.Expect(result.To).Should(gomega.BeNil())
} else {
gomega.Expect(err).NotTo(gomega.Succeed())
}
}
})
ginkgo.It("should be able to search logs with a time constraint", func() {
tests := make([]utils.TestResult, 0)
tests = append(tests, utils.TestResult{Token: token, Success: true, Msg: "Owner"})
tests = append(tests, utils.TestResult{Token: devToken, Success: true, Msg: "Developer"})
tests = append(tests, utils.TestResult{Token: operToken, Success: false, Msg: "Operator"})
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
AppInstanceId: appInstance,
From: from,
To: to,
}
for _, test := range tests {
ginkgo.By(test.Msg)
ctx, cancel := ithelpers.GetContext(test.Token)
defer cancel()
result, err := client.Search(ctx, request)
if test.Success {
gomega.Expect(err).To(gomega.Succeed())
gomega.Expect(result.OrganizationId).Should(gomega.Equal(organization))
// We don't check from/to, as we're dealing with empty data in this
// test. This means there are no real minimum and maximum timestamps
// and from/to are nil.
gomega.Expect(result.GetFrom()).Should(gomega.BeNil())
gomega.Expect(result.GetTo()).Should(gomega.BeNil())
} else {
gomega.Expect(err).NotTo(gomega.Succeed())
}
}
})
ginkgo.It("should be able to retrieve logs in descending order", func() {
tests := make([]utils.TestResult, 0)
tests = append(tests, utils.TestResult{Token: token, Success: true, Msg: "Owner"})
tests = append(tests, utils.TestResult{Token: devToken, Success: true, Msg: "Developer"})
tests = append(tests, utils.TestResult{Token: operToken, Success: false, Msg: "Operator"})
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
AppInstanceId: appInstance,
Order: &grpc_public_api_go.OrderOptions{Order: grpc_public_api_go.Order_ASC, Field: "timestamp"},
}
for _, test := range tests {
ginkgo.By(test.Msg)
ctx, cancel := ithelpers.GetContext(test.Token)
defer cancel()
result, err := client.Search(ctx, request)
if test.Success {
gomega.Expect(err).To(gomega.Succeed())
gomega.Expect(result.OrganizationId).Should(gomega.Equal(organization))
} else {
gomega.Expect(err).NotTo(gomega.Succeed())
}
}
})
ginkgo.It("should not accept an empty search request", func() {
request := &grpc_public_api_go.SearchRequest{}
ctx, cancel := ithelpers.GetContext(token)
defer cancel()
_, err := client.Search(ctx, request)
gomega.Expect(err).NotTo(gomega.Succeed())
})
ginkgo.It("should not accept a search request without application instance", func() {
request := &grpc_public_api_go.SearchRequest{
OrganizationId: organization,
}
ctx, cancel := ithelpers.GetContext(token)
defer cancel()
_, err := client.Search(ctx, request)
gomega.Expect(err).NotTo(gomega.Succeed())
})
})
})
| [
"\"IT_SM_ADDRESS\"",
"\"IT_UL_COORD_ADDRESS\"",
"\"LOG_DOWNLOAD_ADDRESS\"",
"\"IT_ORGMGR_ADDRESS\""
] | [] | [
"IT_SM_ADDRESS",
"IT_ORGMGR_ADDRESS",
"LOG_DOWNLOAD_ADDRESS",
"IT_UL_COORD_ADDRESS"
] | [] | ["IT_SM_ADDRESS", "IT_ORGMGR_ADDRESS", "LOG_DOWNLOAD_ADDRESS", "IT_UL_COORD_ADDRESS"] | go | 4 | 0 | |
djangovirtualpos/models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import base64
import json
import re
import cgi
###########################################
# Sistema de depuración
from bs4 import BeautifulSoup
from debug import dlprint
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.conf import settings
from django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator
from django.db.models import Sum
from django.http import HttpResponse
from django.utils import timezone
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import random
import urllib2, urllib
import urlparse
import hashlib
from django.utils import translation
from Crypto.Cipher import DES3
from Crypto.Hash import SHA256, HMAC
from lxml import etree
import datetime
import time
from decimal import Decimal
from djangovirtualpos.util import dictlist, localize_datetime
from django.utils.translation import ugettext_lazy as _
import requests
from bs4 import BeautifulSoup
VPOS_TYPES = (
("ceca", _("TPV Virtual - Confederación Española de Cajas de Ahorros (CECA)")),
("paypal", _("Paypal")),
("redsys", _("TPV Redsys")),
("santanderelavon", _("TPV Santander Elavon")),
("bitpay", _("TPV Bitpay")),
)
## Relación entre tipos de TPVs y clases delegadas
VPOS_CLASSES = {
"ceca": "VPOSCeca",
"redsys": "VPOSRedsys",
"paypal": "VPOSPaypal",
"santanderelavon": "VPOSSantanderElavon",
"bitpay": "VPOSBitpay",
}
########################################################################
## Obtiene la clase delegada a partir del tipo de TPV.
## La clase delegada ha de estar definida en el
## diccionario TPV_CLASSES en vpos.models.
def get_delegated_class(virtualpos_type):
try:
# __name__ Es el nombre del módulo actual, esto es,
# un str con el contenido "vpos.models"
# __import__(__name__) es el objeto módulo "vpos".
# __import__(__name__, globals(), locals(), ["models"])
# carga el objeto módulo "vpos.models"
mdl = __import__(__name__, globals(), locals(), ["models"])
# getattr obtiene un atributo de un objeto, luego sacamos el
# objeto clase a partir de su nombre y del objeto módulo "vpos.models"
cls = getattr(mdl, VPOS_CLASSES[virtualpos_type])
return cls
except KeyError:
raise ValueError(_(u"The virtual point of sale {0} does not exist").format(virtualpos_type))
####################################################################
## Opciones del campo STATUS
## STATUS: estado en el que se encuentra la operación de pago
VPOS_STATUS_CHOICES = (
("pending", _(u"Pending")),
("completed", _(u"Completed")),
("failed", _(u"Failed")),
("partially_refunded", _(u"Partially Refunded")),
("completely_refunded", _(u"Completely Refunded")),
)
VPOS_REFUND_STATUS_CHOICES = (
("pending", _(u"Pending")),
("completed", _(u"Completed")),
("failed", _(u"Failed")),
)
####################################################################
## Tipos de estado del TPV
VIRTUALPOS_STATE_TYPES = (
("testing", "Pruebas"),
("production", "Producción")
)
####################################################################
## Operación de pago de TPV
class VPOSPaymentOperation(models.Model):
"""
Configuratión del pago para un TPV
"""
amount = models.DecimalField(max_digits=6, decimal_places=2, null=False, blank=False,
verbose_name=u"Coste de la operación")
description = models.CharField(max_length=512, null=False, blank=False, verbose_name=u"Descripción de la venta")
url_ok = models.CharField(max_length=255, null=False, blank=False, verbose_name=u"URL de OK",
help_text=u"URL a la que redirige la pasarela bancaria cuando la compra ha sido un éxito")
url_nok = models.CharField(max_length=255, null=False, blank=False, verbose_name=u"URL de NOK",
help_text=u"URL a la que redirige la pasarela bancaria cuando la compra ha fallado")
operation_number = models.CharField(max_length=255, null=False, blank=False, verbose_name=u"Número de operación")
confirmation_code = models.CharField(max_length=255, null=True, blank=False,
verbose_name="Código de confirmación enviado por el banco.")
confirmation_data = models.TextField(null=True, blank=False,
verbose_name="POST enviado por la pasarela bancaria al confirmar la compra.")
sale_code = models.CharField(max_length=512, null=False, blank=False, verbose_name=u"Código de la venta",
help_text=u"Código de la venta según la aplicación.")
status = models.CharField(max_length=64, choices=VPOS_STATUS_CHOICES, null=False, blank=False,
verbose_name=u"Estado del pago")
response_code = models.CharField(max_length=255, null=True, blank=False,
verbose_name=u"Código de respuesta con estado de aceptación o denegación de la operación.")
creation_datetime = models.DateTimeField(verbose_name="Fecha de creación del objeto")
last_update_datetime = models.DateTimeField(verbose_name="Fecha de última actualización del objeto")
type = models.CharField(max_length=16, choices=VPOS_TYPES, default="", verbose_name="Tipo de TPV")
virtual_point_of_sale = models.ForeignKey("VirtualPointOfSale", parent_link=True, related_name="payment_operations", null=False)
environment = models.CharField(max_length=255, choices=VIRTUALPOS_STATE_TYPES, default="", blank=True, verbose_name="Entorno del TPV")
@property
def vpos(self):
return self.virtual_point_of_sale
@property
def total_amount_refunded(self):
return self.refund_operations.filter(status='completed').aggregate(Sum('amount'))['amount__sum']
# Comprueba si un pago ha sido totalmente debuelto y cambia el estado en coherencias.
def compute_payment_refunded_status(self):
if self.total_amount_refunded == self.amount:
self.status = "completely_refunded"
elif self.total_amount_refunded < self.amount:
dlprint('Devolución parcial de pago.')
self.status = "partially_refunded"
elif self.total_amount_refunded > self.amount:
raise ValueError(u'ERROR. Este caso es imposible, no se puede reembolsar una cantidad superior al pago.')
self.save()
## Guarda el objeto en BD, en realidad lo único que hace es actualizar los datetimes
def save(self, *args, **kwargs):
"""
Guarda el objeto en BD, en realidad lo único que hace es actualizar los datetimes.
El datetime de actualización se actualiza siempre, el de creación sólo al guardar de nuevas.
"""
# Datetime con el momento actual en UTC
now_datetime = datetime.datetime.now()
# Si no se ha guardado aún, el datetime de creación es la fecha actual
if not self.id:
self.creation_datetime = localize_datetime(now_datetime)
# El datetime de actualización es la fecha actual
self.last_update_datetime = localize_datetime(now_datetime)
# Llamada al constructor del padre
super(VPOSPaymentOperation, self).save(*args, **kwargs)
####################################################################
####################################################################
# Excepción para indicar que la operación charge ha devuelto una respuesta incorrecta o de fallo
class VPOSCantCharge(Exception): pass
# Excepción para indicar que no se ha implementado una operación para un tipo de TPV en particular.
class VPOSOperationNotImplemented(Exception): pass
# Cuando se produce un error al realizar una operación en concreto.
class VPOSOperationException(Exception): pass
# La operacióm ya fue confirmada anteriormente mediante otra notificación recibida
class VPOSOperationAlreadyConfirmed(Exception): pass
####################################################################
## Clase que contiene las operaciones de pago de forma genérica
## actúa de fachada de forma que el resto del software no conozca
##
class VirtualPointOfSale(models.Model):
"""
Clases que actúa como clase base para la relación de especialización.
Cada clase especializada estará relacionada con ésta en una relación
uno a uno.
Esta clase no podrá tener un campo para mantener las relaciones con
las clases especializadas ya que cada una estará en tablas diferentes.
Para este modelo se crea la tabla de forma automática con syncdb.
"""
## Nombre único del TPV
name = models.CharField(max_length=128, null=False, blank=False, verbose_name="Nombre")
## Nombre único del banco que tiene asociado el TPV
bank_name = models.CharField(max_length=128, null=False, blank=False, verbose_name="Nombre de la entidad bancaria")
## Tipo de TPV. Indica la naturaleza del TPV.
type = models.CharField(max_length=16, choices=VPOS_TYPES, default="", verbose_name="Tipo de TPV")
## Nombre del distribuidor del plan
distributor_name = models.CharField(null=False, blank=True, max_length=512,
verbose_name="Razón social del distribuidor",
help_text="Razón social del distribuidor.")
## CIF del organizador del plan
distributor_cif = models.CharField(null=False, blank=True, max_length=150,
verbose_name="CIF del distribuidor",
help_text="C.I.F. del distribuidor.")
## Estado del TPV: por si es de pruebas o de producción
environment = models.CharField(max_length=16, null=False, blank=False, choices=VIRTUALPOS_STATE_TYPES,
default="testing",
verbose_name="Entorno de ejecución del TPV",
help_text="Entorno de ejecución en el que se encuentra el TPV. Una vez que el TPV esté en entorno de 'producción' no cambie a entorno de 'pruebas' a no ser que esté seguro de lo que hace.")
## Permite realizar devoluciones parciales
has_partial_refunds = models.BooleanField(default=False, verbose_name="Indica si tiene devoluciones parciales.",
help_text="Indica si se pueden realizar devoluciones por un importe menor que el total de la venta (por ejemplo, para devolver tickets individuales).")
## Permite realizar devoluciones totales
has_total_refunds = models.BooleanField(default=False, verbose_name="Indica si tiene devoluciones totales.",
help_text="Indica si se pueden realizar devoluciones por un importe igual al total de la venta.")
## Borrados lógicos
is_erased = models.BooleanField(default=False, verbose_name="Indica si el TPV está eliminado.",
help_text="Indica si el TPV está eliminado de forma lógica.")
## Configuración de la operación de pago (actúa también como registro de la operación de pago)
operation = None
## Objeto en el que se delegan las llamadas específicas de la pasarela de pagos dependiente del tipo de TPV
delegated = None
class Meta:
ordering = ['name']
verbose_name = "virtual point of sale"
verbose_name_plural = "virtual points of sale"
permissions = (
("view_virtualpointofsale", "View Virtual Points of Sale"),
)
def __unicode__(self):
return self.name
def meta(self):
"""Obtiene la metainformación de objetos de este modelo."""
return self._meta
@property
def operation_prefix(self):
"""
Prefijo de operación asociado a este TPV.
Se consulta al delegado para obtenerlo.
:return: string | None
"""
self._init_delegated()
# Algunos tipos de TPV no tienen este atributo (PayPal)
if hasattr(self.delegated, "operation_number_prefix"):
prefix = getattr(self.delegated, "operation_number_prefix")
else:
prefix = "n/a"
return prefix
####################################################################
## Elimina de forma lógica el objeto
def erase(self):
self.is_erased = True
self.save()
####################################################################
## Obtiene el texto de ayuda del tipo del TPV
def get_type_help(self):
return dict(VPOS_TYPES)[self.type]
####################################################################
## Devuelve el TPV específico
@property
def specific_vpos(self):
delegated_class = get_delegated_class(self.type)
try:
return delegated_class.objects.get(parent_id=self.id)
except delegated_class.DoesNotExist as e:
raise ValueError(u" No existe ningún vpos del tipo {0} con el identificador {1}".format(self.type, self.id))
####################################################################
## Constructor: Inicializa el objeto TPV
def _init_delegated(self):
"""
Devuelve la configuración del TPV como una instancia del
modelo hijo asociado.
Como, en función del TPV, la configuración se almacena en tablas
diferentes y, por lo tanto, cada una tiene su propio modelo,
el resultado devuelto por esta función será una instancia del
modelo correspondiente.
Este método habrá que actualizarlo cada vez que se añada un
TPV nuevo.
"""
self.delegated = None
delegated_class = get_delegated_class(self.type)
try:
self.delegated = delegated_class.objects.get(parent_id=self.id)
except delegated_class.DoesNotExist as e:
raise ValueError(
unicode(e) + u" No existe ningún vpos del tipo {0} con el identificador {1}".format(self.type, self.id))
# Necesito los datos dinámicos de mi padre, que es un objeto de
# la clase Tpv, si usásemos directamente desde el delegated
# self.parent, se traería de la BD los datos de ese objeto
# y nosotros queremos sus datos y además, sus atributos dinámicos
self.delegated.parent = self
return self.delegated
####################################################################
## Obtiene un objeto TPV a partir de una serie de filtros
@staticmethod
def get(**kwargs):
vpos = VirtualPointOfSale.objects.get(**kwargs)
vpos._init_delegated()
return vpos
####################################################################
## Paso 1.1. Configuración del pago
def configurePayment(self, amount, description, url_ok, url_nok, sale_code):
"""
Configura el pago por TPV.
Prepara el objeto TPV para
- Pagar una cantidad concreta
- Establecera una descripción al pago
- Establecer las URLs de OK y NOK
- Alamacenar el código de venta de la operación
"""
if type(amount) == int or type(amount) == Decimal:
amount = float(amount)
if type(amount) != float or amount < 0.0:
raise ValueError(u"La cantidad debe ser un flotante positivo")
if sale_code is None or sale_code == "":
raise ValueError(u"El código de venta no puede estar vacío")
if description is None or description == "":
raise ValueError(u"La descripción de la venta no puede estar vacía")
# Estas dos condiciones se han de eliminar
# si algún TPV no utiliza url_ok y url_nok
if url_ok is None or type(url_ok) != str or url_ok == "":
raise ValueError(u"La url_ok no puede estar vacía. Ha de ser un str.")
if url_nok is None or type(url_nok) != str or url_nok == "":
raise ValueError(u"La url_nok no puede estar vacía. Ha de ser un str.")
# Creación de la operación
# (se guarda cuando se tenga el número de operación)
self.operation = VPOSPaymentOperation(
amount=amount, description=description, url_ok=url_ok, url_nok=url_nok,
sale_code=sale_code, status="pending",
virtual_point_of_sale=self, type=self.type, environment=self.environment
)
# Configuración específica (requiere que exista self.operation)
self.delegated.configurePayment()
####################################################################
## Paso 1.2. Preparación del TPV y Generación del número de operación
def setupPayment(self):
"""
Prepara el TPV.
Genera el número de operación y prepara el proceso de pago.
"""
if self.operation is None:
raise Exception(u"No se ha configurado la operación, ¿ha llamado a vpos.configurePayment antes?")
# Comprobamos que no se tenga ya un segundo código de operación
# de TPV para el mismo código de venta
# Si existe, devolvemos el número de operación existente
stored_operations = VPOSPaymentOperation.objects.filter(
sale_code=self.operation.sale_code,
status="pending",
virtual_point_of_sale_id=self.operation.virtual_point_of_sale_id
)
if stored_operations.count() >= 1:
self.operation = stored_operations[0]
return self.delegated.setupPayment(operation_number=self.operation.operation_number)
# No existe un código de operación de TPV anterior para
# este código de venta, por lo que generamos un número de operación nuevo
# Comprobamos que el número de operación generado por el delegado
# es único en la tabla de TpvPaymentOperation
operation_number = None
while operation_number is None or VPOSPaymentOperation.objects.filter(
operation_number=operation_number).count() > 0:
operation_number = self.delegated.setupPayment()
dlprint("entra al delegado para configurar el operation number:{0}".format(operation_number))
# Asignamos el número de operación único
self.operation.operation_number = operation_number
self.operation.save()
dlprint("Operation {0} creada en BD".format(operation_number))
return self.operation.operation_number
####################################################################
## Paso 1.3. Obtiene los datos de pago
## Este método será el que genere los campos del formulario de pago
## que se rellenarán desde el cliente (por Javascript)
def getPaymentFormData(self, *args, **kwargs):
if self.operation.operation_number is None:
raise Exception(u"No se ha generado el número de operación, ¿ha llamado a vpos.setupPayment antes?")
data = self.delegated.getPaymentFormData(*args, **kwargs)
data["type"] = self.type
return data
####################################################################
## Paso 2. Envío de los datos de la transacción (incluyendo "amount")
## a la pasarela bancaria, a partir del número de operación.
## TODO: no se implementa hasta que sea necesario.
def getPaymentDetails(self):
pass
####################################################################
## Paso 3.1. Obtiene el número de operación y los datos que nos
## envíe la pasarela de pago para luego realizar la verificación.
@staticmethod
def receiveConfirmation(request, virtualpos_type):
delegated_class = get_delegated_class(virtualpos_type)
delegated = delegated_class.receiveConfirmation(request)
if delegated:
vpos = delegated.parent
return vpos
return False
####################################################################
## Paso 3.2. Realiza la verificación de los datos enviados por
## la pasarela de pago, para comprobar si el pago ha de marcarse
## como pagado
def verifyConfirmation(self):
dlprint("vpos.verifyConfirmation")
return self.delegated.verifyConfirmation()
####################################################################
## Paso 3.3 Enviar respuesta al TPV,
## la respuesta pueden ser la siguientes:
####################################################################
## Paso 3.3a Completar el pago.
## Última comunicación con el TPV.
## La comunicación real sólo se realiza en PayPal y Santander Elavon, dado que en CECA
## y otros tienen una verificación y una respuesta con "OK".
## En cualquier caso, es necesario que la aplicación llame a este
## método para terminar correctamente el proceso.
def charge(self):
# Bloquear otras transacciones
VPOSPaymentOperation.objects.select_for_update().filter(id=self.operation.id)
# Realizamos el cargo
response = self.delegated.charge()
# Cambiamos el estado de la operación
self.operation.status = "completed"
self.operation.save()
dlprint("Operation {0} actualizada en charge()".format(self.operation.operation_number))
# Devolvemos el cargo
return response
####################################################################
## Paso 3.3b1. Error en verificación.
## No se ha podido recuperar la instancia de TPV de la respuesta del
## banco. Se devuelve una respuesta de Nok específica por tipo de TPV.
@staticmethod
def staticResponseNok(vpos_type):
dlprint("vpos.staticResponseNok")
delegated_class = get_delegated_class(vpos_type)
dummy_delegated = delegated_class()
return dummy_delegated.responseNok()
####################################################################
## Paso 3.3b2. Error en verificación.
## Si ha habido un error en la veritificación, se ha de dar una
## respuesta negativa a la pasarela bancaria.
def responseNok(self, extended_status=""):
dlprint("vpos.responseNok")
self.operation.status = "failed"
if extended_status:
self.operation.status = u"{0}. {1}".format(self.operation.status, extended_status)
self.operation.save()
return self.delegated.responseNok()
####################################################################
## Paso R1 (Refund) Configura el TPV en modo devolución y ejecuta la operación
## TODO: Se implementa solo para Redsys
def refund(self, operation_sale_code, refund_amount, description):
"""
1. Realiza las comprobaciones necesarias, para determinar si la operación es permitida,
(en caso contrario se lanzan las correspondientes excepciones).
2. Crea un objeto VPOSRefundOperation (con estado pendiente).
3. Llama al delegado, que implementa las particularidades para la comunicación con el TPV concreto.
4. Actualiza el estado del pago, según se encuentra 'parcialmente devuelto' o 'totalmente devuelto'.
5. Actualiza el estado de la devolución a 'completada' o 'fallada'.
@param operation_sale_code: Código del pago que pretendemos reembolsar.
@param refund_amount: Cantidad del pago que reembolsamos
@param description: Descripción del motivo por el cual se realiza la devolución.
"""
try:
# Cargamos la operación sobre la que vamos a realizar la devolución.
payment_operation = VPOSPaymentOperation.objects.get(sale_code=operation_sale_code, status='completed')
except ObjectDoesNotExist:
raise Exception(u"No se puede cargar una operación anterior completada con el código"
u" {0}".format(operation_sale_code))
if (not self.has_total_refunds) and (not self.has_partial_refunds):
raise Exception(u"El TPV no admite devoluciones, ni totales, ni parciales")
if refund_amount > payment_operation.amount:
raise Exception(u"Imposible reembolsar una cantidad superior a la del pago")
if (refund_amount < payment_operation.amount) and (not self.has_partial_refunds):
raise Exception(u"Configuración del TPV no permite realizar devoluciones parciales")
if (refund_amount == payment_operation.amount) and (not self.has_total_refunds):
raise Exception(u"Configuración del TPV no permite realizar devoluciones totales")
# Creamos la operación, marcandola como pendiente.
self.operation = VPOSRefundOperation(amount=refund_amount,
description=description,
operation_number=payment_operation.operation_number,
status='pending',
payment=payment_operation)
self.operation.save()
# Llamamos al delegado que implementa la funcionalidad en particular.
refund_response = self.delegated.refund(operation_sale_code, refund_amount, description)
if refund_response:
refund_status = 'completed'
else:
refund_status = 'failed'
self.operation.status = refund_status
self.operation.save()
# Calcula el nuevo estado del pago, en función de la suma de las devoluciones,
# (pudiendolo marcas como "completely_refunded" o "partially_refunded").
payment_operation.compute_payment_refunded_status()
return refund_response
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
dlprint("vpos.refund_response_ok")
return self.delegated.refund_response_ok()
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
dlprint("vpos.refund_response_nok")
return self.delegated.refund_response_nok()
########################################################################################################################
class VPOSRefundOperation(models.Model):
"""
Entidad que gestiona las devoluciones de pagos realizados.
Las devoluciones pueden ser totales o parciales, por tanto un "pago" tiene una relación uno a muchos con "devoluciones".
"""
amount = models.DecimalField(max_digits=6, decimal_places=2, null=False, blank=False, verbose_name=u"Cantidad de la devolución")
description = models.CharField(max_length=512, null=False, blank=False, verbose_name=u"Descripción de la devolución")
operation_number = models.CharField(max_length=255, null=False, blank=False, verbose_name=u"Número de operación")
status = models.CharField(max_length=64, choices=VPOS_REFUND_STATUS_CHOICES, null=False, blank=False, verbose_name=u"Estado de la devolución")
creation_datetime = models.DateTimeField(verbose_name="Fecha de creación del objeto")
last_update_datetime = models.DateTimeField(verbose_name="Fecha de última actualización del objeto")
payment = models.ForeignKey(VPOSPaymentOperation, on_delete=models.PROTECT, related_name="refund_operations")
@property
def virtual_point_of_sale(self):
return self.payment.virtual_point_of_sale
## Guarda el objeto en BD, en realidad lo único que hace es actualizar los datetimes
def save(self, *args, **kwargs):
"""
Guarda el objeto en BD, en realidad lo único que hace es actualizar los datetimes.
El datetime de actualización se actualiza siempre, el de creación sólo al guardar de nuevas.
"""
# Datetime con el momento actual en UTC
now_datetime = datetime.datetime.now()
# Si no se ha guardado aún, el datetime de creación es la fecha actual
if not self.id:
self.creation_datetime = localize_datetime(now_datetime)
# El datetime de actualización es la fecha actual
self.last_update_datetime = localize_datetime(now_datetime)
# Llamada al constructor del padre
super(VPOSRefundOperation, self).save(*args, **kwargs)
########################################################################################################################
########################################################################################################################
####################################################### TPV Ceca #######################################################
########################################################################################################################
########################################################################################################################
class VPOSCeca(VirtualPointOfSale):
"""Información de configuración del TPV Virtual CECA"""
regex_number = re.compile("^\d*$")
regex_operation_number_prefix = re.compile("^[A-Za-z0-9]*$")
# Relación con el padre (TPV).
# Al poner el signo "+" como "related_name" evitamos que desde el padre
# se pueda seguir la relación hasta aquí (ya que cada uno de las clases
# que heredan de ella estará en una tabla y sería un lío).
parent = models.OneToOneField(VirtualPointOfSale, parent_link=True, related_name="+", null=False, db_column="vpos_id")
# Identifica al comercio, será facilitado por la caja en el proceso de alta
merchant_id = models.CharField(max_length=9, null=False, blank=False, verbose_name="MerchantID",
validators=[MinLengthValidator(9), MaxLengthValidator(9),
RegexValidator(regex=regex_number,
message="Asegúrese de que todos los caracteres son números")])
# Identifica la caja, será facilitado por la caja en el proceso de alta
acquirer_bin = models.CharField(max_length=10, null=False, blank=False, verbose_name="AcquirerBIN",
validators=[MinLengthValidator(10), MaxLengthValidator(10),
RegexValidator(regex=regex_number,
message="Asegúrese de que todos los caracteres son números")])
# Identifica el terminal, será facilitado por la caja en el proceso de alta
terminal_id = models.CharField(max_length=8, null=False, blank=False, verbose_name="TerminalID",
validators=[MinLengthValidator(8), MaxLengthValidator(8),
RegexValidator(regex=regex_number,
message="Asegúrese de que todos los caracteres son números")])
# Clave de cifrado para el entorno de pruebas
encryption_key_testing = models.CharField(max_length=10, null=False, blank=False,
verbose_name="Encryption Key para el entorno de pruebas",
validators=[MinLengthValidator(8), MaxLengthValidator(10)])
# Clave de cifrado para el entorno de producción
encryption_key_production = models.CharField(max_length=10, null=False, blank=True,
verbose_name="Encryption Key para el entorno de producción",
validators=[MinLengthValidator(8), MaxLengthValidator(10)])
# Prefijo del número de operación usado para identicar al servidor desde el que se realiza la petición
operation_number_prefix = models.CharField(max_length=20, null=False, blank=True,
verbose_name="Prefijo del número de operación",
validators=[MinLengthValidator(0), MaxLengthValidator(20),
RegexValidator(regex=regex_operation_number_prefix,
message="Asegúrese de sólo use caracteres alfanuméricos")])
# Clave de cifrado según el entorno
encryption_key = None
# El TPV de CECA consta de dos entornos en funcionamiento, uno para pruebas y otro para producción
CECA_URL = {
"production": "https://pgw.ceca.es/cgi-bin/tpv",
"testing": "http://tpv.ceca.es:8000/cgi-bin/tpv"
}
# Los códigos de idioma a utilizar son los siguientes
IDIOMAS = {"es": "1", "en": "6", "fr": "7", "de": "8", "pt": "9", "it": "10"}
# URL de pago que variará según el entorno
url = None
# Identifica el importe de la venta, siempre será un número entero y donde los dos últimos dígitos representan los decimales
importe = None
# Tipo de pago que soporta
pago_soportado = "SSL"
# Cifrado que será usado en la generación de la firma
cifrado = "SHA1"
# Campo específico para realizar el pago, actualmente será 2
exponente = "2"
# Identifica el tipo de moneda
tipo_moneda = "978"
# Idioma por defecto a usar. Español
idioma = "1"
# marca de tiempo de recepción de la notificación de pago OK. Nuestro sistema debe dar una respuesta de
# OK o NOK antes de 30 segundos. Transcurrido este periodo, CECA anula la operación de forma automática
# y no notifica de nada (!)
confirmation_timestamp = None
####################################################################
## Inicia el valor de la clave de cifrado en función del entorno
def __init_encryption_key__(self):
# Clave de cifrado según el entorno
if self.parent.environment == "testing":
self.encryption_key = self.encryption_key_testing
elif self.parent.environment == "production":
self.encryption_key = self.encryption_key_production
else:
raise ValueError(u"Entorno {0} no válido")
####################################################################
## Constructor del TPV CECA
def __init__(self, *args, **kwargs):
super(VPOSCeca, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.name
@classmethod
def form(cls):
from forms import VPOSCecaForm
return VPOSCecaForm
####################################################################
## Paso 1.1. Configuración del pago
def configurePayment(self, **kwargs):
# URL de pago según el entorno
self.url = self.CECA_URL[self.parent.environment]
# Formato para Importe: según ceca, ha de tener un formato de entero positivo
self.importe = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
# Idioma de la pasarela, por defecto es español, tomamos
# el idioma actual y le asignamos éste
self.idioma = self.IDIOMAS["es"]
lang = translation.get_language()
if lang in self.IDIOMAS:
self.idioma = self.IDIOMAS[lang]
####################################################################
## Paso 1.2. Preparación del TPV y Generación del número de operación
def setupPayment(self, operation_number=None, code_len=40):
"""
Inicializa el número de operación si no se indica uno
explícitamente en los argumentos.
"""
if operation_number:
return operation_number
operation_number = ''
for i in range(code_len):
operation_number += random.choice('ABCDEFGHJKLMNPQRSTUWXYZ23456789')
# Si en settings tenemos un prefijo del número de operación
# se lo añadimos delante, con carácter "-" entre medias
if self.operation_number_prefix:
operation_number = self.operation_number_prefix + "-" + operation_number
return operation_number[0:code_len]
return operation_number
####################################################################
## Paso 1.3. Obtiene los datos de pago
## Este método será el que genere los campos del formulario de pago
## que se rellenarán desde el cliente (por Javascript)
def getPaymentFormData(self):
data = {
# Identifica al comercio, será facilitado por la caja
"MerchantID": self.merchant_id,
# Identifica a la caja, será facilitado por la caja
"AcquirerBIN": self.acquirer_bin,
# Identifica al terminal, será facilitado por la caja
"TerminalID": self.terminal_id,
# URL determinada por el comercio a la que CECA devolverá el control en caso de que la operación finalice correctamente
"URL_OK": self.parent.operation.url_ok,
# URL determinada por el comercio a la que CECA devolverá el control en caso de que la operación NO finalice correctamente
"URL_NOK": self.parent.operation.url_nok,
# Cadena de caracteres calculada por el comercio
"Firma": self._sending_signature(),
# Tipo de cifrado que se usará para el cifrado de la firma
"Cifrado": self.cifrado,
# Identifica el número de pedido, factura, albarán, etc
"Num_operacion": self.parent.operation.operation_number,
# Importe de la operación sin formatear. Siempre será entero con los dos últimos dígitos usados para los centimos
"Importe": self.importe,
# Codigo ISO-4217 correspondiente a la moneda en la que se efectúa el pago
"TipoMoneda": self.tipo_moneda,
# Actualmente siempre será 2
"Exponente": self.exponente,
# Valor fijo: SSL
"Pago_soportado": self.pago_soportado,
# Código de idioma
"Idioma": self.idioma,
# Opcional. Campo reservado para mostrar información en la página de pago
"Descripcion": self.parent.operation.description
}
form_data = {
"data": data,
"action": self.url,
"enctype": "application/x-www-form-urlencoded",
"method": "post"
}
return form_data
####################################################################
## Paso 3.1. Obtiene el número de operación y los datos que nos
## envíe la pasarela de pago.
@staticmethod
def receiveConfirmation(request, **kwargs):
# Almacén de operaciones
try:
operation = VPOSPaymentOperation.objects.get(operation_number=request.POST.get("Num_operacion"))
operation.confirmation_data = {"GET": request.GET.dict(), "POST": request.POST.dict()}
operation.confirmation_code = request.POST.get("Referencia")
operation.save()
dlprint("Operation {0} actualizada en receiveConfirmation()".format(operation.operation_number))
vpos = operation.virtual_point_of_sale
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación, esto es fundamental
# para luego calcular la firma
vpos._init_delegated()
vpos.operation = operation
# Marca de tiempo de recepción de notificación. Debemos completar todo el proceso (es decir,
# invocar charge()) antes de 30 segundos o CECA anula la operación. Como margen de seguridad,
# intentaremos hacerlo todo en menos de 20 segundos. Si se supera esa cota de tiempo, se
# devuelve una exepción y se anula la operacion.
vpos.delegated.confirmation_timestamp = time.time()
# Iniciamos los valores recibidos en el delegado
# Identifica al comercio
vpos.delegated.merchant_id = request.POST.get("MerchantID")
# Identifica a la caja
vpos.delegated.acquirer_bin = request.POST.get("AcquirerBIN")
# Identifica al terminal
vpos.delegated.terminal_id = request.POST.get("TerminalID")
# Identifica el número de pedido, factura, albarán, etc
vpos.delegated.num_operacion = request.POST.get("Num_operacion")
# Importe de la operación sin formatear
vpos.delegated.importe = request.POST.get("Importe")
# Corresponde a la moneda en la que se efectúa el pago
vpos.delegated.tipo_moneda = request.POST.get("TipoMoneda")
# Actualmente siempre será 2
vpos.delegated.exponente = request.POST.get("Exponente")
# Idioma de la operación
vpos.delegated.idioma = request.POST.get("Idioma")
# Código ISO del país de la tarjeta que ha realizado la operación
vpos.delegated.pais = request.POST.get("Pais")
# Los 200 primeros caracteres de la operación
vpos.delegated.descripcion = request.POST.get("Descripcion")
# Valor único devuelto por la pasarela. Imprescindible para realizar cualquier tipo de reclamación y/o anulación
vpos.delegated.referencia = request.POST.get("Referencia")
# Valor asignado por la entidad emisora a la hora de autorizar una operación
vpos.delegated.num_aut = request.POST.get("Num_aut")
# Es una cadena de caracteres calculada por CECA firmada por SHA1
vpos.delegated.firma = request.POST.get("Firma")
dlprint(u"Lo que recibimos de CECA: ")
dlprint(request.POST)
return vpos.delegated
####################################################################
## Paso 3.2. Verifica que los datos enviados desde
## la pasarela de pago identifiquen a una operación de compra.
def verifyConfirmation(self):
# Comprueba si el envío es correcto
firma_calculada = self._verification_signature()
dlprint("Firma recibida " + self.firma)
dlprint("Firma calculada " + firma_calculada)
verified = (self.firma == firma_calculada)
return verified
####################################################################
## Paso 3.3a. Realiza el cobro y genera la respuesta a la pasarela y
## comunicamos con la pasarela de pago para que marque la operación
## como pagada. Sólo se usa en CECA. Para que el programa sea capaz de discernir a partir
## de la respuesta recibida desde el Comercio si todo ha funcionado correctamente
def charge(self):
dlprint("responseOk")
# Si han transcurrido más de 20 segundos anulamos la operación debido
# a que CECA la anulará si pasan más de 30 sin dar la respuesta. Nosotros
# nos quedamos en 20 como margen de seguridad por el overhead de otras
# operaciones.
elapsed = time.time() - self.confirmation_timestamp
if elapsed > 12:
dlprint(
u"AVISO: se ha superado el margen de tiempo para devolver la respuesta: {0}s. Lanzando excepción.".format(
elapsed))
raise Exception(u"Se ha superado el margen de tiempo en generar la respuesta.")
operation = self.parent.operation
dlprint(u"antes de save")
operation.confirmation_data = u"{0}\n\n{1}".format(operation.confirmation_data, u"XXXXXXXXXXXXXXXXXXXXXXXXXX")
operation.save()
dlprint(u"después de save")
return HttpResponse("$*$OKY$*$")
####################################################################
## Paso 3.3b. Si ha habido un error en el pago, se ha de dar una
## respuesta negativa a la pasarela bancaria.
def responseNok(self, **kwargs):
dlprint("responseNok")
return HttpResponse("")
####################################################################
## Paso R1. (Refund) Configura el TPV en modo devolución
## TODO: No implementado
def refund(self, operation_sale_code, refund_amount, description):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para CECA.")
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para CECA.")
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para CECA.")
####################################################################
## Generador de firma para el envío
def _sending_signature(self):
"""Calcula la firma a incorporar en el formulario de pago"""
self.__init_encryption_key__()
dlprint("Clave de cifrado es {0}".format(self.encryption_key))
signature = "{encryption_key}{merchant_id}{acquirer_bin}{terminal_id}{num_operacion}{importe}{tipo_moneda}{exponente}SHA1{url_ok}{url_nok}".format(
encryption_key=self.encryption_key,
merchant_id=self.merchant_id,
acquirer_bin=self.acquirer_bin,
terminal_id=self.terminal_id,
num_operacion=self.parent.operation.operation_number,
importe=self.importe,
tipo_moneda=self.tipo_moneda,
exponente=self.exponente,
url_ok=self.parent.operation.url_ok,
url_nok=self.parent.operation.url_nok
)
dlprint("\tencryption_key {0}".format(self.encryption_key))
dlprint("\tmerchant_id {0}".format(self.merchant_id))
dlprint("\tacquirer_bin {0}".format(self.acquirer_bin))
dlprint("\tterminal_id {0}".format(self.terminal_id))
dlprint("\tnum_operacion {0}".format(self.parent.operation.operation_number))
dlprint("\timporte {0}".format(self.importe))
dlprint("\ttipo_moneda {0}".format(self.tipo_moneda))
dlprint("\texponente {0}".format(self.exponente))
dlprint("\turl_ok {0}".format(self.parent.operation.url_ok))
dlprint("\turl_nok {0}".format(self.parent.operation.url_nok))
dlprint("FIRMA {0}".format(signature))
return hashlib.sha1(signature).hexdigest()
####################################################################
## Generador de firma para la verificación
def _verification_signature(self):
self.__init_encryption_key__()
"""Calcula la firma de verificación"""
dlprint("Clave de cifrado es ".format(self.encryption_key))
signature = "{encryption_key}{merchant_id}{acquirer_bin}{terminal_id}{num_operacion}{importe}{tipo_moneda}{exponente}{referencia}".format(
encryption_key=self.encryption_key,
merchant_id=self.merchant_id,
acquirer_bin=self.acquirer_bin,
terminal_id=self.terminal_id,
num_operacion=self.parent.operation.operation_number,
importe=self.importe,
tipo_moneda=self.tipo_moneda,
exponente=self.exponente,
referencia=self.parent.operation.confirmation_code,
)
dlprint("\tencryption_key {0}".format(self.encryption_key))
dlprint("\tmerchant_id {0}".format(self.merchant_id))
dlprint("\tacquirer_bin {0}".format(self.acquirer_bin))
dlprint("\tterminal_id {0}".format(self.terminal_id))
dlprint("\tnum_operacion {0}".format(self.parent.operation.operation_number))
dlprint("\timporte {0}".format(self.importe))
dlprint("\ttipo_moneda {0}".format(self.tipo_moneda))
dlprint("\texponente {0}".format(self.exponente))
dlprint("\treferencia {0}".format(self.parent.operation.confirmation_code))
dlprint("FIRMA {0}".format(signature))
return hashlib.sha1(signature).hexdigest()
########################################################################################################################
########################################################################################################################
###################################################### TPV Redsys ######################################################
########################################################################################################################
########################################################################################################################
AUTHORIZATION_TYPE = "authorization"
PREAUTHORIZATION_TYPE = "pre-authorization"
OPERATIVE_TYPES = (
(AUTHORIZATION_TYPE, u"autorización"),
(PREAUTHORIZATION_TYPE, u"pre-autorización"),
)
class VPOSRedsys(VirtualPointOfSale):
"""Información de configuración del TPV Virtual Redsys"""
## Todo TPV tiene una relación con los datos generales del TPV
parent = models.OneToOneField(VirtualPointOfSale, parent_link=True, related_name="+", null=False, db_column="vpos_id")
# Expresión regular usada en la identificación del servidor
regex_number = re.compile("^\d*$")
regex_operation_number_prefix = re.compile("^\d+$")
# Código FUC asignado al comercio
merchant_code = models.CharField(max_length=9, null=False, blank=False, verbose_name="MerchantCode")
# Confirmation URL that will be used by the virtual POS
merchant_response_url = models.URLField(max_length=64, null=False, blank=False, verbose_name="MerchantURL",
help_text=u"Confirmation URL that will be used by the virtual POS")
# Número de terminal que le asignará su banco
terminal_id = models.CharField(max_length=3, null=False, blank=False, verbose_name="TerminalID")
# Habilita mecanismo de preautorización + confirmación o anulación.
operative_type = models.CharField(max_length=512, choices=OPERATIVE_TYPES, default=AUTHORIZATION_TYPE, verbose_name=u"Tipo de operativa")
# Clave de cifrado SHA-256 para el entorno de prueba
encryption_key_testing_sha256 = models.CharField(max_length=64, null=True, default=None,
verbose_name="Encryption Key SHA-256 para el entorno de pruebas")
# Clave de cifrado SHA-256 para el entorno de producción
encryption_key_production_sha256 = models.CharField(max_length=64, null=True, default=None,
verbose_name="Encryption Key SHA-256 para el entorno de producción")
# Prefijo del número de operación usado para identicar al servidor desde el que se realiza la petición, el tamaño máximo sera de 3 caracteres numéricos
operation_number_prefix = models.CharField(max_length=3, null=False, blank=True,
verbose_name="Prefijo del número de operación",
validators=[MinLengthValidator(0), MaxLengthValidator(3),
RegexValidator(regex=regex_operation_number_prefix,
message="Asegúrese de sólo use caracteres numéricos")])
# Clave que se va usar para esta operación
encryption_key = None
# Códigos de respuesta
DS_RESPONSE_CODES = {
"0101": u"Tarjeta Caducada.",
"0102": u"Tarjeta en excepción transitoria o bajo sospecha de fraude.",
"0104": u"Operación no permitida para esa tarjeta o terminal.",
"0106": u"Intentos de PIN excedidos.",
"0116": u"Disponible Insuficiente.",
"0118": u"Tarjeta no Registrada.",
"0125": u"Tarjeta no efectiva.",
"0129": u"Código de seguridad (CVV2/CVC2) incorrecto.",
"0180": u"Tarjeta ajena al servicio.",
"0184": u"Error en la autenticación del titular.",
"0190": u"Denegación sin especificar motivo.",
"0191": u"Fecha de caducidad errónea.",
"0202": u"Tarjeta en excepción transitoria o bajo sospecha de fraude con retirada de tarjeta.",
"0904": u"Comercio no registrado en FUC.",
"0909": u"Error de sistema.",
"0912": u"Emisor no disponible.",
"0913": u"Pedido repetido.",
"0944": u"Sesión Incorrecta.",
"0950": u"Operación de devolución no permitida.",
"9064": u"Número de posiciones de la tarjeta incorrecto.",
"9078": u"No existe método de pago válido para esa tarjeta.",
"9093": u"Tarjeta no existente.",
"9094": u"Rechazo servidores internacionales.",
"9104": u"Comercio con “titular seguro” y titular sin clave de compra segura.",
"9218": u"El comercio no permite op. seguras por entrada /operaciones.",
"9253": u"Tarjeta no cumple el check-digit.",
"9256": u"El comercio no puede realizar preautorizaciones.",
"9257": u"Esta tarjeta no permite operativa de preautorizaciones.",
"9261": u"Operación detenida por superar el control de restricciones en la entrada al SIS.",
"9912": u"Emisor no disponible.",
"9913": u"Error en la confirmación que el comercio envía al TPV Virtual (solo aplicable en la opción de sincronización SOAP).",
"9914": u"Confirmación “KO” del comercio (solo aplicable en la opción de sincronización SOAP).",
"9915": u"A petición del usuario se ha cancelado el pago.",
"9928": u"Anulación de autorización en diferido realizada por el SIS (proceso batch).",
"9929": u"Anulación de autorización en diferido realizada por el comercio.",
"9997": u"Se está procesando otra transacción en SIS con la misma tarjeta.",
"9998": u"Operación en proceso de solicitud de datos de tarjeta.",
"9999": u"Operación que ha sido redirigida al emisor a autenticar.",
}
# Códigos de error SISxxxx
DS_ERROR_CODES = {
'SIS0001': u'Error en la generación de HTML',
'SIS0002': u'Error al generar el XML de la clase de datos',
'SIS0003': u'Error al crear el gestor de mensajes price',
'SIS0004': u'Error al montar el mensaje para pago móvil',
'SIS0005': u'Error al desmontar la respuesta de un pago móvil',
'SIS0006': u'Error al provocar un ROLLBACK de una transacción',
'SIS0007': u'Error al desmontar XML',
'SIS0008': u'Error falta Ds_Merchant_MerchantCode ',
'SIS0009': u'Error de formato en Ds_Merchant_MerchantCode',
'SIS0010': u'Error falta Ds_Merchant_Terminal',
'SIS0011': u'Error de formato en Ds_Merchant_Terminal',
'SIS0012': u'Error, no se pudo crear el componente de conexión con Stratus',
'SIS0013': u'Error, no se pudo cerrar el componente de conexión con Stratus',
'SIS0014': u'Error de formato en Ds_Merchant_Order',
'SIS0015': u'Error falta Ds_Merchant_Currency',
'SIS0016': u'Error de formato en Ds_Merchant_Currency',
'SIS0017': u'Error no se admiten operaciones en pesetas -- DEPRECATED !!!!',
'SIS0018': u'Error falta Ds_Merchant_Amount',
'SIS0019': u'Error de formato en Ds_Merchant_Amount',
'SIS0020': u'Error falta Ds_Merchant_MerchantSignature',
'SIS0021': u'Error la Ds_Merchant_MerchantSignature viene vacía',
'SIS0022': u'Error de formato en Ds_Merchant_TransactionType',
'SIS0023': u'Error Ds_Merchant_TransactionType desconocido. Pago Adicional: Si no se permite pago Adicional (porque el comercio no es de la Entidad o no hay pago adicional en métodos de pago -> SIS0023 Transation type invalido)',
'SIS0024': u'Error Ds_Merchant_ConsumerLanguage tiene mas de 3 posiciones',
'SIS0025': u'Error de formato en Ds_Merchant_ConsumerLanguage',
'SIS0026': u'Error No existe el comercio / terminal enviado en TZF',
'SIS0027': u'Error Moneda enviada por el comercio es diferente a la de la TZF',
'SIS0028': u'Error Comercio / terminal está dado de baja',
'SIS0029': u'Error al montar el mensaje para pago con tarjeta',
'SIS0030': u'Error en un pago con tarjeta ha llegado un tipo de operación que no es ni pago ni preautorización',
'SIS0031': u'Método de pago no definido',
'SIS0032': u'Error al montar el mensaje para una devolución',
'SIS0033': u'Error en un pago con móvil ha llegado un tipo de operación que no es ni pago ni preautorización',
'SIS0034': u'Error de acceso a la base de datos',
'SIS0035': u'Error al recuperar los datos de la sesión desde un XML',
'SIS0036': u'Error al tomar los datos para Pago Móvil desde el XML',
'SIS0037': u'El número de teléfono no es válido',
'SIS0038': u'Error en java (errores varios)',
'SIS0039': u'Error al tomar los datos para Pago Tarjeta desde el XML',
'SIS0040': u'Error el comercio / terminal no tiene ningún método de pago asignado',
'SIS0041': u'Error en el cálculo de la HASH de datos del comercio.',
'SIS0042': u'La firma enviada no es correcta',
'SIS0043': u'Error al realizar la notificación on-line',
'SIS0044': u'Error al tomar los datos para Pago Finanet desde el XML',
'SIS0045': u'Error al montar el mensaje para pago Finanet',
'SIS0046': u'El bin de la tarjeta no está dado de alta en FINANET',
'SIS0047': u'Error al montar el mensaje para preautorización móvil',
'SIS0048': u'Error al montar el mensaje para preautorización tarjeta',
'SIS0049': u'Error al montar un mensaje de anulación',
'SIS0050': u'Error al montar un mensaje de repetición de anulación',
'SIS0051': u'Error número de pedido repetido',
'SIS0052': u'Error al montar el mensaje para una confirmación',
'SIS0053': u'Error al montar el mensaje para una preautenticación por referencia',
'SIS0054': u'Error no existe operación sobre la que realizar la devolución',
'SIS0055': u'Error existe más de un pago con el mismo número de pedido',
'SIS0056': u'La operación sobre la que se desea devolver no está autorizada',
'SIS0057': u'El importe a devolver supera el permitido',
'SIS0058': u'Inconsistencia de datos, en la validación de una confirmación ',
'SIS0059': u'Error no existe operación sobre la que realizar la confirmación',
'SIS0060': u'Ya existe una confirmación asociada a la preautorización',
'SIS0061': u'La preautorización sobre la que se desea confirmar no está autorizada',
'SIS0062': u'El importe a confirmar supera el permitido',
'SIS0063': u'Error. Número de tarjeta no disponible',
'SIS0064': u'Error. Número de posiciones de la tarjeta incorrecto',
'SIS0065': u'Error. El número de tarjeta no es numérico',
'SIS0066': u'Error. Mes de caducidad no disponible',
'SIS0067': u'Error. El mes de la caducidad no es numérico',
'SIS0068': u'Error. El mes de la caducidad no es válido',
'SIS0069': u'Error. Año de caducidad no disponible',
'SIS0070': u'Error. El Año de la caducidad no es numérico',
'SIS0071': u'Tarjeta caducada',
'SIS0072': u'Operación no anulable',
'SIS0073': u'Error al analizar la respuesta de una anulación',
'SIS0074': u'Error falta Ds_Merchant_Order',
'SIS0075': u'Error el Ds_Merchant_Order tiene menos de 4 posiciones o más de 12 (Para algunas operativas el límite es 10 en lugar de 12)',
'SIS0076': u'Error el Ds_Merchant_Order no tiene las cuatro primeras posiciones numéricas',
'SIS0077': u'Error de formato en Ds_Merchant_Order',
'SIS0078': u'Método de pago no disponible',
'SIS0079': u'Error en realizar pago tarjeta',
'SIS0080': u'Error al tomar los datos para Pago tarjeta desde el XML',
'SIS0081': u'La sesión es nueva, se han perdido los datos almacenados',
'SIS0082': u'Error procesando operaciones pendientes en el arranque',
'SIS0083': u'El sistema no está arrancado (Se está arrancado)',
'SIS0084': u'El valor de Ds_Merchant_Conciliation es nulo',
'SIS0085': u'El valor de Ds_Merchant_Conciliation no es numérico',
'SIS0086': u'El valor de Ds_Merchant_Conciliation no ocupa 6 posiciones',
'SIS0087': u'El valor de Ds_Merchant_Session es nulo',
'SIS0088': u'El valor de Ds_Merchant_Session no es numérico',
'SIS0089': u'El valor de caducidad no ocupa 4 posiciones',
'SIS0090': u'El valor del ciers representado de BBVA es nulo',
'SIS0091': u'El valor del ciers representado de BBVA no es numérico',
'SIS0092': u'El valor de caducidad es nulo',
'SIS0093': u'Tarjeta no encontrada en la tabla de rangos',
'SIS0094': u'La tarjeta no fue autenticada como 3D Secure',
'SIS0095': u'Error al intentar validar la tarjeta como 3DSecure',
'SIS0096': u'El formato utilizado para los datos 3DSecure es incorrecto',
'SIS0097': u'Valor del campo Ds_Merchant_CComercio no válido',
'SIS0098': u'Valor del campo Ds_Merchant_CVentana no válido',
'SIS0099': u'Error al desmontar los datos para Pago 3D Secure desde el XML',
'SIS0100': u'Error al desmontar los datos para PagoPIN desde el XML',
'SIS0101': u'Error al desmontar los datos para PantallaPIN desde el XML',
'SIS0102': u'Error No se recibió el resultado de la autenticación',
'SIS0103': u'Error Mandando SisMpiTransactionRequestMessage al Merchant Plugin',
'SIS0104': u'Error calculando el bloque de PIN',
'SIS0105': u'Error, la referencia es nula o vacía',
'SIS0106': u'Error al montar los datos para RSisPantallaSPAUCAF.xsl',
'SIS0107': u'Error al desmontar los datos para PantallaSPAUCAF desde el XML',
'SIS0108': u'Error al desmontar los datos para pagoSPAUCAF desde el XML',
'SIS0109': u'Error El número de tarjeta no se corresponde con el seleccionado originalmente ',
'SIS0110': u'Error La fecha de caducidad de la tarjeta no se corresponde con el seleccionado originalmente',
'SIS0111': u'Error El campo Ucaf_Authentication_Data no tiene la longitud requerida',
'SIS0112': u'Error El tipo de transacción especificado en Ds_Merchant_Transaction_Type no está permitido',
'SIS0113': u'Excepción producida en el servlet de operaciones',
'SIS0114': u'Error, se ha llamado con un GET al servlet de operaciones',
'SIS0115': u'Error no existe operación sobre la que realizar el pago de la cuota',
'SIS0116': u'La operación sobre la que se desea pagar una cuota no es una operación válida',
'SIS0117': u'La operación sobre la que se desea pagar una cuota no está autorizada',
'SIS0118': u'Se ha excedido el importe total de las cuotas',
'SIS0119': u'Valor del campo Ds_Merchant_DateFrecuency no válido',
'SIS0120': u'Valor del campo Ds_Merchant_ChargeExpiryDate no válido',
'SIS0121': u'Valor del campo Ds_Merchant_SumTotal no válido',
'SIS0122': u'Error en formato numérico. Antiguo Valor del campo Ds_Merchant_DateFrecuency o no Ds_Merchant_SumTotal tiene formato incorrecto',
'SIS0123': u'Se ha excedido la fecha tope para realizar transacciones',
'SIS0124': u'No ha transcurrido la frecuencia mínima en un pago recurrente sucesivo',
'SIS0125': u'Error en código java validando cuota',
'SIS0126': u'Error la operación no se puede marcar como pendiente',
'SIS0127': u'Error la generando datos Url OK CANCEL',
'SIS0128': u'Error se quiere generar una anulación sin p2',
'SIS0129': u'Error, se ha detectado un intento masivo de peticiones desde la ip',
'SIS0130': u'Error al regenerar el mensaje',
'SIS0131': u'Error en la firma de los datos del SAS',
'SIS0132': u'La fecha de Confirmación de Autorización no puede superar en más de 7 días a la de Preautorización.',
'SIS0133': u'La fecha de Confirmación de Autenticación no puede superar en más de 45 días a la de Autenticación Previa.',
'SIS0134': u'El valor del Ds_MerchantCiers enviado por BBVA no es válido',
'SIS0135': u'Error generando un nuevo valor para el IDETRA',
'SIS0136': u'Error al montar el mensaje de notificación',
'SIS0137': u'Error al intentar validar la tarjeta como 3DSecure NACIONAL',
'SIS0138': u'Error debido a que existe una Regla del ficheros de reglas que evita que se produzca la Autorización',
'SIS0139': u'Error el pago recurrente inicial está duplicado',
'SIS0140': u'Error al interpretar la respuesta de Stratus para una preautenticación por referencia',
'SIS0141': u'Error formato no correcto para 3DSecure',
'SIS0142': u'Tiempo excedido para el pago',
'SIS0143': u'No viene el campo laOpcion en el formulario enviado',
'SIS0144': u'El campo laOpcion recibido del formulario tiene un valor desconocido para el servlet',
'SIS0145': u'Error al montar el mensaje para P2P',
'SIS0146': u'Transacción P2P no reconocida',
'SIS0147': u'Error al tomar los datos para Pago P2P desde el XML',
'SIS0148': u'Método de pago no disponible o no válido para P2P',
'SIS0149': u'Error al obtener la referencia para operación P2P',
'SIS0150': u'Error al obtener la clave para operación P2P',
'SIS0151': u'Error al generar un objeto desde el XML',
'SIS0152': u'Error en operación P2P. Se carece de datos',
'SIS0153': u'Error, el número de días de operación P2P no es correcto',
'SIS0154': u'Error el mail o el teléfono de T2 son obligatorios (operación P2P)',
'SIS0155': u'Error obteniendo datos de operación P2P',
'SIS0156': u'Error la operación no es P2P Tipo 3',
'SIS0157': u'Error no se encuentra la operación P2P original',
'SIS0158': u'Error, la operación P2P original no está en el estado correcto',
'SIS0159': u'Error, la clave de control de operación P2P no es válida ',
'SIS0160': u'Error al tomar los datos para una operación P2P tipo 3',
'SIS0161': u'Error en el envío de notificación P2P',
'SIS0162': u'Error tarjeta de carga micropago no tiene pool asociado',
'SIS0163': u'Error tarjeta de carga micropago no autenticable',
'SIS0164': u'Error la recarga para micropagos sólo permite euros',
'SIS0165': u'Error la T1 de la consulta no coincide con la de la operación P2P original',
'SIS0166': u'Error el nombre del titular de T1 es obligatorio',
'SIS0167': u'Error la operación está bloqueada por superar el número de intentos fallidosde introducción del código por parte de T2',
'SIS0168': u'No existe terminal AMEX asociada',
'SIS0169': u'Valor PUCE Ds_Merchant_MatchingData no válido',
'SIS0170': u'Valor PUCE Ds_Acquirer_Identifier no válido',
'SIS0171': u'Valor PUCE Ds_Merchant_Csb no válido',
'SIS0172': u'Valor PUCE Ds_Merchant_MerchantCode no válido',
'SIS0173': u'Valor PUCE Ds_Merchant_UrlOK no válido',
'SIS0174': u'Error calculando el resultado PUCE',
'SIS0175': u'Error al montar el mensaje PUCE',
'SIS0176': u'Error al tratar el mensaje de petición P2P procedente de Stratus.',
'SIS0177': u'Error al descomponer el mensaje de Envío de fondos en una operación P2P iniciada por Stratus.',
'SIS0178': u'Error al montar el XML con los datos de envío para una operación P2P',
'SIS0179': u'Error P2P Móvil, el teléfono no tiene asociada tarjeta',
'SIS0180': u'El telecode es nulo o vacía para operación P2P',
'SIS0181': u'Error al montar el XML con los datos recibidos',
'SIS0182': u'Error al montar el mensaje PRICE / Error al tratar el mensaje de petición Cobro de Recibo',
'SIS0183': u'Error al montar el XML de respuesta',
'SIS0184': u'Error al tratar el XML de Recibo',
'SIS0186': u'Error en entrada Banco Sabadell. Faltan datos',
'SIS0187': u'Error al montar el mensaje de respuesta a Stratus (Error Formato)',
'SIS0188': u'Error al desmontar el mensaje price en una petición P2P procedente de Stratus',
'SIS0190': u'Error al intentar mandar el mensaje SMS',
'SIS0191': u'Error, El mail del beneficiario no coincide con el indicado en la recepción P2P',
'SIS0192': u'Error, La clave de mail del beneficiario no es correcta en la recepción P2P',
'SIS0193': u'Error comprobando monedas para DCC',
'SIS0194': u'Error problemas con la aplicación del cambio y el mostrado al titular',
'SIS0195': u'Error en pago PIN. No llegan los datos',
'SIS0196': u'Error las tarjetas de operación P2P no son del mismo procesador',
'SIS0197': u'Error al obtener los datos de cesta de la compra en operación tipo pasarela',
'SIS0198': u'Error el importe supera el límite permitido para el comercio',
'SIS0199': u'Error el número de operaciones supera el límite permitido para el comercio',
'SIS0200': u'Error el importe acumulado supera el límite permitido para el comercio',
'SIS0201': u'Se ha producido un error inesperado al realizar la conexión con el VDS',
'SIS0202': u'Se ha producido un error en el envío del mensaje',
'SIS0203': u'No existe ningún método definido para el envío del mensaje',
'SIS0204': u'No se ha definido una URL válida para el envío de mensajes',
'SIS0205': u'Error al generar la firma, es posible que el mensaje no sea válido o esté incompleto',
'SIS0206': u'No existe una clave asociada al BID especificado',
'SIS0207': u'La consulta no ha devuelto ningún resultado',
'SIS0208': u'La operación devuelta por el SIS no coincide con la petición',
'SIS0209': u'No se han definido parámetros para realizar la consulta',
'SIS0210': u'Error al validar el mensaje, faltan datos: BID',
'SIS0211': u'Error en la validación de la firma ',
'SIS0212': u'La respuesta recibida no se corresponde con la petición. Referencias de mensaje distintas',
'SIS0213': u'Errores devueltos por el VDS',
'SIS0214': u'El comercio no permite devoluciones. Se requiere usar firma ampliada.',
'SIS0215': u'Operación no permitida para TPV’s virtuales de esta entidad.',
'SIS0216': u'Error Ds_Merchant_CVV2 tiene más de 3 posiciones',
'SIS0217': u'Error de formato en Ds_Merchant_CVV2',
'SIS0218': u'El comercio no permite operaciones seguras por entrada XML',
'SIS0219': u'Error el número de operaciones de la tarjeta supera el límite permitido para el comercio',
'SIS0220': u'Error el importe acumulado de la tarjeta supera el límite permitido para el comercio',
'SIS0221': u'Error el CVV2 es obligatorio',
'SIS0222': u'Ya existe una anulación asociada a la preautorización',
'SIS0223': u'La preautorización que se desea anular no está autorizada',
'SIS0224': u'El comercio no permite anulaciones por no tener firma ampliada',
'SIS0225': u'Error no existe operación sobre la que realizar la anulación',
'SIS0226': u'Inconsistencia de datos, en la validación de una anulación',
'SIS0227': u'Valor del campo Ds_Merchant_TransactionDate no válido',
'SIS0228': u'Sólo se puede hacer pago aplazado con tarjeta de crédito On-us',
'SIS0229': u'No existe el código de pago aplazado solicitado',
'SIS0230': u'El comercio no permite pago fraccionado',
'SIS0231': u'No hay forma de pago aplicable para el cliente',
'SIS0232': u'Error. Forma de pago no disponible',
'SIS0233': u'Error. Forma de pago desconocida',
'SIS0234': u'Error. Nombre del titular de la cuenta no disponible',
'SIS0235': u'Error. Campo Sis_Numero_Entidad no disponible',
'SIS0236': u'Error. El campo Sis_Numero_Entidad no tiene la longitud requerida',
'SIS0237': u'Error. El campo Sis_Numero_Entidad no es numérico',
'SIS0238': u'Error. Campo Sis_Numero_Oficina no disponible',
'SIS0239': u'Error. El campo Sis_Numero_Oficina no tiene la longitud requerida',
'SIS0240': u'Error. El campo Sis_Numero_Oficina no es numérico',
'SIS0241': u'Error. Campo Sis_Numero_DC no disponible',
'SIS0242': u'Error. El campo Sis_Numero_DC no tiene la longitud requerida',
'SIS0243': u'Error. El campo Sis_Numero_DC no es numérico',
'SIS0244': u'Error. Campo Sis_Numero_Cuenta no disponible',
'SIS0245': u'Error. El campo Sis_Numero_Cuenta no tiene la longitud requerida',
'SIS0246': u'Error. El campo Sis_Numero_Cuenta no es numérico',
'SIS0247': u'Dígito de Control de Cuenta Cliente no válido',
'SIS0248': u'El comercio no permite pago por domiciliación',
'SIS0249': u'Error al realizar pago por domiciliación',
'SIS0250': u'Error al tomar los datos del XML para realizar Pago por Transferencia',
'SIS0251': u'El comercio no permite pago por transferencia',
'SIS0252': u'El comercio no permite el envío de tarjeta',
'SIS0253': u'Tarjeta no cumple check-digit',
'SIS0254': u'El número de operaciones de la IP supera el límite permitido por el comercio',
'SIS0255': u'El importe acumulado por la IP supera el límite permitido por el comercio',
'SIS0256': u'El comercio no puede realizar preautorizaciones',
'SIS0257': u'Esta tarjeta no permite operativa de preautorizaciones',
'SIS0258': u'Inconsistencia de datos, en la validación de una confirmación',
'SIS0259': u'No existe la operación original para notificar o consultar',
'SIS0260': u'Entrada incorrecta al SIS',
'SIS0261': u'Operación detenida por superar el control de restricciones en la entrada al SIS',
'SIS0262': u'Moneda no permitida para operación de transferencia o domiciliación ',
'SIS0263': u'Error calculando datos para procesar operación en su banca online',
'SIS0264': u'Error procesando datos de respuesta recibidos desde su banca online',
'SIS0265': u'Error de firma en los datos recibidos desde su banca online',
'SIS0266': u'No se pueden recuperar los datos de la operación recibida desde su banca online',
'SIS0267': u'La operación no se puede procesar por no existir Código Cuenta Cliente',
'SIS0268': u'La operación no se puede procesar por este canal',
'SIS0269': u'No se pueden realizar devoluciones de operaciones de domiciliación no descargadas',
'SIS0270': u'El comercio no puede realizar preautorizaciones en diferido',
'SIS0271': u'Error realizando pago-autenticación por WebService',
'SIS0272': u'La operación a autorizar por WebService no se puede encontrar',
'SIS0273': u'La operación a autorizar por WebService está en un estado incorrecto',
'SIS0274': u'Tipo de operación desconocida o no permitida por esta entrada al SIS',
'SIS0275': u'Error Premio: Premio sin IdPremio',
'SIS0276': u'Error Premio: Unidades del Premio a redimir no numéricas.',
'SIS0277': u'Error Premio: Error general en el proceso.',
'SIS0278': u'Error Premio: Error en el proceso de consulta de premios',
'SIS0279': u'Error Premio: El comercio no tiene activada la operativa de fidelización',
'SIS0280': u'Reglas V3.0 : excepción por regla con Nivel de gestión usuario Interno.',
'SIS0281': u'Reglas V3.0 : excepción por regla con Nivel de gestión usuario Entidad.',
'SIS0282': u'Reglas V3.0 : excepción por regla con Nivel de gestión usuario Comercio/MultiComercio de una entidad.',
'SIS0283': u'Reglas V3.0 : excepción por regla con Nivel de gestión usuario Comercio-Terminal.',
'SIS0284': u'Pago Adicional: error no existe operación sobre la que realizar el PagoAdicional',
'SIS0285': u'Pago Adicional: error tiene más de una operación sobre la que realizar el Pago Adicional',
'SIS0286': u'Pago Adicional: La operación sobre la que se quiere hacer la operación adicional no está Aceptada',
'SIS0287': u'Pago Adicional: la Operación ha sobrepasado el importe para el Pago Adicional.',
'SIS0288': u'Pago Adicional: No se puede realizar otro pago Adicional. Se ha superado el número de pagos adicionales permitidos sobre la operación.',
'SIS0289': u'Pago Adicional: El importe del pago Adicional supera el máximo días permitido.',
'SIS0290': u'Control de Fraude: Bloqueo por control de Seguridad',
'SIS0291': u'Control de Fraude: Bloqueo por lista Negra control de IP',
'SIS0292': u'Control de Fraude: Bloqueo por lista Negra control de Tarjeta',
'SIS0293': u'Control de Fraude: Bloqueo por Lista negra evaluación de Regla',
'SIS0294': u'Tarjetas Privadas BBVA: La tarjeta no es Privada de BBVA (uno-e). No seadmite el envío de DS_MERCHANT_PAY_TYPE.',
'SIS0295': u'Error de duplicidad de operación. Se puede intentar de nuevo',
'SIS0296': u'Error al validar los datos de la Operación de Tarjeta en Archivo Inicial',
'SIS0297': u'Número de operaciones sucesivas de Tarjeta en Archivo superado',
'SIS0298': u'El comercio no permite realizar operaciones de Tarjeta en Archivo',
'SIS0299': u'Error en la llamada a PayPal',
'SIS0300': u'Error en los datos recibidos de PayPal',
'SIS0301': u'Error en pago con PayPal',
'SIS0302': u'Moneda no válida para pago con PayPal',
'SIS0303': u'Esquema de la entidad es 4B',
'SIS0304': u'No se permite pago fraccionado si la tarjeta no es de FINCONSUM',
'SIS0305': u'No se permite pago fraccionado FINCONSUM en moneda diferente de euro',
'SIS0306': u'Valor de Ds_Merchant_PrepaidCard no válido',
'SIS0307': u'Operativa de tarjeta regalo no permitida',
'SIS0308': u'Tiempo límite para recarga de tarjeta regalo superado',
'SIS0309': u'Error faltan datos adicionales para realizar la recarga de tarjeta prepago',
'SIS0310': u'Valor de Ds_Merchant_Prepaid_Expiry no válido',
'SIS0311': u'Error al montar el mensaje para consulta de comisión en recarga de tarjeta prepago ',
'SIS0312': u'Error en petición StartCheckoutSession con V.me',
'SIS0313': u'Petición de compra mediante V.me no permitida',
'SIS0314': u'Error en pago V.me',
'SIS0315': u'Error analizando petición de autorización de V.me',
'SIS0316': u'Error en petición de autorización de V.me',
'SIS0317': u'Error montando respuesta a autorización de V.me',
'SIS0318': u'Error en retorno del pago desde V.me',
'SIS0319': u'El comercio no pertenece al grupo especificado en Ds_Merchant_Group',
'SIS0321': u'El identificador indicado en Ds_Merchant_Identifier no está asociado al comercio',
'SIS0322': u'Error de formato en Ds_Merchant_Group',
'SIS0323': u'Para tipo de operación F es necesario el campo Ds_Merchant_Customer_Mobile o Ds_Merchant_Customer_Mail',
'SIS0324': u'Para tipo de operación F. Imposible enviar link al titular',
'SIS0325': u'Se ha pedido no mostrar pantallas pero no se ha enviado ningún identificador de tarjeta',
'SIS0326': u'Se han enviado datos de tarjeta en fase primera de un pago con dos fases',
'SIS0327': u'No se ha enviado ni móvil ni email en fase primera de un pago con dos fases',
'SIS0328': u'Token de pago en dos fases inválido',
'SIS0329': u'No se puede recuperar el registro en la tabla temporal de pago en dos fases',
'SIS0330': u'Fechas incorrectas de pago dos fases',
'SIS0331': u'La operación no tiene un estado válido o no existe.',
'SIS0332': u'El importe de la operación original y de la devolución debe ser idéntico',
'SIS0333': u'Error en una petición a MasterPass Wallet',
'SIS0334': u'Bloqueo regla operativa grupos definidos por la entidad',
'SIS0335': u'Ds_Merchant_Recharge_Commission no válido',
'SIS0336': u'Error realizando petición de redirección a Oasys',
'SIS0337': u'Error calculando datos de firma para redirección a Oasys',
'SIS0338': u'No se encuentra la operación Oasys en la BD',
'SIS0339': u'El comercio no dispone de pago Oasys',
'SIS0340': u'Respuesta recibida desde Oasys no válida',
'SIS0341': u'Error en la firma recibida desde Oasys',
'SIS0342': u'El comercio no permite realizar operaciones de pago de tributos',
'SIS0343': u'El parámetro Ds_Merchant_Tax_Reference falta o es incorrecto',
'SIS0344': u'El usuario ha elegido aplazar el pago, pero no ha aceptado las condiciones de las cuotas',
'SIS0345': u'El usuario ha elegido un número de plazos incorrecto',
'SIS0346': u'Error de formato en parámetro DS_MERCHANT_PAY_TYPE',
'SIS0347': u'El comercio no está configurado para realizar la consulta de BIN.',
'SIS0348': u'El BIN indicado en la consulta no se reconoce',
'SIS0349': u'Los datos de importe y DCC enviados no coinciden con los registrados en SIS',
'SIS0350': u'No hay datos DCC registrados en SIS para este número de pedido',
'SIS0351': u'Autenticación prepago incorrecta',
'SIS0352': u'El tipo de firma del comercio no permite esta operativa',
'SIS0353': u'El comercio no tiene definida una clave 3DES válida',
'SIS0354': u'Error descifrando petición al SIS',
'SIS0355': u'El comercio-terminal enviado en los datos cifrados no coincide con el enviado en la petición',
'SIS0356': u'Existen datos de entrada para control de fraude y el comercio no tiene activo control de fraude',
'SIS0357': u'Error en parametros enviados. El comercio tiene activo control de fraude y no existe campo ds_merchant_merchantscf',
'SIS0358': u'La entidad no dispone de pago Oasys',
'SIS0370': u'Error en formato Scf_Merchant_Nif. Longitud máxima 16',
'SIS0371': u'Error en formato Scf_Merchant_Name. Longitud máxima 30',
'SIS0372': u'Error en formato Scf_Merchant_First_Name. Longitud máxima 30 ',
'SIS0373': u'Error en formato Scf_Merchant_Last_Name. Longitud máxima 30',
'SIS0374': u'Error en formato Scf_Merchant_User. Longitud máxima 45',
'SIS0375': u'Error en formato Scf_Affinity_Card. Valores posibles \'S\' o \'N\'. Longitud máxima 1',
'SIS0376': u'Error en formato Scf_Payment_Financed. Valores posibles \'S\' o \'N\'. Longitud máxima 1',
'SIS0377': u'Error en formato Scf_Ticket_Departure_Point. Longitud máxima 30',
'SIS0378': u'Error en formato Scf_Ticket_Destination. Longitud máxima 30',
'SIS0379': u'Error en formato Scf_Ticket_Departure_Date. Debe tener formato yyyyMMddHHmmss.',
'SIS0380': u'Error en formato Scf_Ticket_Num_Passengers. Longitud máxima 1.',
'SIS0381': u'Error en formato Scf_Passenger_Dni. Longitud máxima 16.',
'SIS0382': u'Error en formato Scf_Passenger_Name. Longitud máxima 30.',
'SIS0383': u'Error en formato Scf_Passenger_First_Name. Longitud máxima 30.',
'SIS0384': u'Error en formato Scf_Passenger_Last_Name. Longitud máxima 30.',
'SIS0385': u'Error en formato Scf_Passenger_Check_Luggage. Valores posibles \'S\' o \'N\'. Longitud máxima 1.',
'SIS0386': u'Error en formato Scf_Passenger_Special_luggage. Valores posibles \'S\' o \'N\'. Longitud máxima 1.',
'SIS0387': u'Error en formato Scf_Passenger_Insurance_Trip. Valores posibles \'S\' o \'N\'. Longitud máxima 1.',
'SIS0388': u'Error en formato Scf_Passenger_Type_Trip. Valores posibles \'N\' o \'I\'. Longitud máxima 1.',
'SIS0389': u'Error en formato Scf_Passenger_Pet. Valores posibles \'S\' o \'N\'. Longitud máxima 1.',
'SIS0390': u'Error en formato Scf_Order_Channel. Valores posibles \'M\'(móvil), \'P\'(PC) o \'T\'(Tablet)',
'SIS0391': u'Error en formato Scf_Order_Total_Products. Debe tener formato numérico y longitud máxima de 3.',
'SIS0392': u'Error en formato Scf_Order_Different_Products. Debe tener formato numérico y longitud máxima de 3.',
'SIS0393': u'Error en formato Scf_Order_Amount. Debe tener formato numérico y longitud máxima de 19.',
'SIS0394': u'Error en formato Scf_Order_Max_Amount. Debe tener formato numérico y longitud máxima de 19.',
'SIS0395': u'Error en formato Scf_Order_Coupon. Valores posibles \'S\' o \'N\'',
'SIS0396': u'Error en formato Scf_Order_Show_Type. Debe longitud máxima de 30.',
'SIS0397': u'Error en formato Scf_Wallet_Identifier',
'SIS0398': u'Error en formato Scf_Wallet_Client_Identifier',
'SIS0399': u'Error en formato Scf_Merchant_Ip_Address',
'SIS0400': u'Error en formato Scf_Merchant_Proxy',
'SIS0401': u'Error en formato Ds_Merchant_Mail_Phone_Number. Debe ser numérico y de longitud máxima 19',
'SIS0402': u'Error en llamada a SafetyPay para solicitar token url',
'SIS0403': u'Error en proceso de solicitud de token url a SafetyPay',
'SIS0404': u'Error en una petición a SafetyPay',
'SIS0405': u'Solicitud de token url denegada',
'SIS0406': u'El sector del comercio no está permitido para realizar un pago de premio de apuesta',
'SIS0407': u'El importe de la operación supera el máximo permitido para realizar un pago de premio de apuesta',
'SIS0408': u'La tarjeta debe de haber operado durante el último año para poder realizar un pago de premio de apuesta',
'SIS0409': u'La tarjeta debe ser una Visa o MasterCard nacional para realizar un pago de premio de apuesta',
'SIS0410': u'Bloqueo por Operación con Tarjeta Privada del Cajamar, en comercio que no es de Cajamar',
'SIS0411': u'No existe el comercio en la tabla de datos adicionales de RSI Directo',
'SIS0412': u'La firma enviada por RSI Directo no es correcta',
'SIS0413': u'La operación ha sido denegada por Lynx',
'SIS0414': u'El plan de ventas no es correcto',
'SIS0415': u'El tipo de producto no es correcto',
'SIS0416': u'Importe no permitido en devolución ',
'SIS0417': u'Fecha de devolución no permitida',
'SIS0418': u'No existe plan de ventas vigente',
'SIS0419': u'Tipo de cuenta no permitida',
'SIS0420': u'El comercio no dispone de formas de pago para esta operación',
'SIS0421': u'Tarjeta no permitida. No es producto Agro',
'SIS0422': u'Faltan datos para operación Agro',
'SIS0423': u'CNPJ del comercio incorrecto',
'SIS0424': u'No se ha encontrado el establecimiento',
'SIS0425': u'No se ha encontrado la tarjeta',
'SIS0426': u'Enrutamiento no valido para comercio Corte Ingles.',
'SIS0427': u'La conexión con CECA no ha sido posible para el comercio Corte Ingles.',
'SIS0428': u'Operación debito no segura',
'SIS0429': u'Error en la versión enviada por el comercio (Ds_SignatureVersion)',
'SIS0430': u'Error al decodificar el parámetro Ds_MerchantParameters',
'SIS0431': u'Error del objeto JSON que se envía codificado en el parámetro Ds_MerchantParameters',
'SIS0432': u'Error FUC del comercio erróneo',
'SIS0433': u'Error Terminal del comercio erróneo',
'SIS0434': u'Error ausencia de número de pedido en la op. del comercio',
'SIS0435': u'Error en el cálculo de la firma',
'SIS0436': u'Error en la construcción del elemento padre <REQUEST>',
'SIS0437': u'Error en la construcción del elemento <DS_SIGNATUREVERSION>',
'SIS0438': u'Error en la construcción del elemento <DATOSENTRADA>',
'SIS0439': u'Error en la construcción del elemento <DS_SIGNATURE>',
'SIS0440': u'Error al crear pantalla MyBank',
'SIS0441': u'Error no tenemos bancos para Mybank',
'SIS0442': u'Error al realizar el pago Mybank',
'SIS0443': u'No se permite pago en terminales ONEY con tarjetas ajenas',
'SIS0445': u'Error gestionando referencias con Stratus',
'SIS0444': u'Se está intentando acceder usando firmas antiguas y el comercio está configurado como HMAC SHA256',
'SIS0446': u'Para terminales Oney es obligatorio indicar la forma de pago',
'SIS0447': u'Error, se está utilizando una referencia que se generó con un adquirente distinto al adquirente que la utiliza.',
'SIS0448': u'Error, la tarjeta de la operación es DINERS y el comercio no tiene el método de pago "Pago DINERS"',
'SIS0449': u'Error, el tipo de pago de la operación es Tradicional(A), la tarjeta de la operación no es DINERS ni JCB ni AMEX y el comercio tiene el método de pago "Prohibir Pago A"',
'SIS0450': u'Error, el tipo de pago de la operación es Tradicional(A), la tarjeta de la operación es AMEX y el comercio tiene los métodos de pago "Pago Amex y Prohibir Pago A AMEX"',
'SIS0451': u'Error, la operación es Host to Host con tipo de pago Tradicional(A), la tarjeta de la operación no es DINERS ni JCB ni AMEX y el comercio tiene el método de pago "Prohibir Pago A"',
'SIS0452': u'Error, la tarjeta de la operación es 4B y el comercio no tiene el método de pago "Tarjeta 4B"',
'SIS0453': u'Error, la tarjeta de la operación es JCB y el comercio no tiene el método de pago "Pago JCB"',
'SIS0454': u'Error, la tarjeta de la operación es AMEX y el comercio no tiene el método de pago "Pago Amex"',
'SIS0455': u'Error, el comercio no tiene el método de pago "Tarjetas Propias" y la tarjeta no está registrada como propia. ',
'SIS0456': u'Error, se aplica el método de pago "Verified By Visa" con Respuesta [VEReq, VERes] = U y el comercio no tiene los métodos de pago "Pago U y Pago U Nacional"',
'SIS0457': u'Error, se aplica el método de pago "MasterCard SecureCode" con Respuesta [VEReq, VERes] = N con tarjeta MasterCard Comercial y el comercio no tiene el método de pago "MasterCard Comercial"',
'SIS0458': u'Error, se aplica el método de pago "MasterCard SecureCode" con Respuesta [VEReq, VERes] = U con tarjeta MasterCard Comercial y el comercio no tiene el método de pago "MasterCard Comercial"',
'SIS0459': u'Error, se aplica el método de pago "JCB Secure" con Respuesta [VEReq, VERes]= U y el comercio no tiene el método de pago "Pago JCB"',
'SIS0460': u'Error, se aplica el método de pago "AMEX SafeKey" con Respuesta [VEReq, VERes] = N y el comercio no tiene el método de pago "Pago AMEX"',
'SIS0461': u'Error, se aplica el método de pago "AMEX SafeKey" con Respuesta [VEReq, VERes] = U y el comercio no tiene el método de pago "Pago AMEX"',
'SIS0462': u'Error, se aplica el método de pago "Verified By Visa","MasterCard SecureCode","JCB Secure" o "AMEX SafeKey" y la operación es Host To Host',
'SIS0463': u'Error, se selecciona un método de pago que no está entre los permitidos por el SIS para ser ejecutado',
'SIS0464': u'Error, el resultado de la autenticación 3DSecure es "NO_3DSECURE" con tarjeta MasterCard Comercial y el comercio no tiene el método de pago "MasterCard Comercial"',
'SIS0465': u'Error, el resultado de la autenticación 3DSecure es "NO_3DSECURE", la tarjeta no es Visa, ni Amex, ni JCB, ni Master y el comercio no tiene el método de pago "Tradicional Mundial" ',
}
ALLOW_PAYMENT_BY_REFERENCE = True
# El TPV de RedSys consta de dos entornos en funcionamiento, uno para pruebas y otro para producción
REDSYS_URL = {
"production": "https://sis.redsys.es/sis/realizarPago",
"testing": "https://sis-t.redsys.es:25443/sis/realizarPago"
}
# Idiomas soportados por RedSys
IDIOMAS = {"es": "001", "en": "002", "ca": "003", "fr": "004", "de": "005", "pt": "009", "it": "007"}
# URL de pago que variará según el entorno
url = None
# Importe de la venta
importe = None
# Tipo de cifrado usado en la generación de la firma
cifrado = "SHA1"
# Tipo de moneda usada en la operación, en este caso sera Euros
tipo_moneda = "978"
# Indica qué tipo de transacción se utiliza, en función del parámetro enable_preauth-policy puede ser:
# 0 - Autorización
# 1 - Preautorización
transaction_type = None
# Idioma por defecto a usar. Español
idioma = "001"
# En modo SOAP, string con "<Request>...</Request>" completo. Es necesario para calcular la firma
soap_request = None
## Inicia el valor de la clave de cifrado en función del entorno
def __init_encryption_key__(self):
# Clave de cifrado según el entorno
if self.parent.environment == "testing":
self.encryption_key = self.encryption_key_testing_sha256
elif self.parent.environment == "production":
self.encryption_key = self.encryption_key_production_sha256
else:
raise ValueError(u"Entorno {0} no válido".format(self.parent.environment))
if not self.encryption_key:
raise ValueError(u"La clave de cifrado para {0} no es válida".format(self.parent.environment))
# Algunos métodos utilizados más adelante necesitan que sea un str
self.encryption_key = str(self.encryption_key)
####################################################################
## Constructor del TPV REDSYS
def __init__(self, *args, **kwargs):
super(VPOSRedsys, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.name
@classmethod
def form(cls):
from forms import VPOSRedsysForm
return VPOSRedsysForm
####################################################################
## Paso 1.1. Configuración del pago
def configurePayment(self, **kwargs):
# URL de pago según el entorno
self.url = self.REDSYS_URL[self.parent.environment]
# Configurar el tipo de transacción se utiliza, en función del parámetro enable_preauth-policy.
if self.operative_type == PREAUTHORIZATION_TYPE:
dlprint(u"Configuracion TPV en modo Pre-Autorizacion")
self.transaction_type = "1"
elif self.operative_type == AUTHORIZATION_TYPE:
dlprint(u"Configuracion TPV en modo Autorizacion")
self.transaction_type = "0"
# Formato para Importe: según redsys, ha de tener un formato de entero positivo, con las dos últimas posiciones
# ocupadas por los decimales
self.importe = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
if self.importe == "000":
self.importe = "0"
# Idioma de la pasarela, por defecto es español, tomamos
# el idioma actual y le asignamos éste
self.idioma = self.IDIOMAS["es"]
lang = translation.get_language()
if lang in self.IDIOMAS:
self.idioma = self.IDIOMAS[lang]
####################################################################
## Paso 1.2. Preparación del TPV y Generación del número de operación
def setupPayment(self, operation_number=None, code_len=12):
"""
Devuelve un número de operación para los pagos al TPV Redsys.
Nótese que los 4 primeros carateres son dígitos, el resto
pueden ser dígitos o carecteres alfabéticos.
"""
operation_number = ''
if operation_number:
return operation_number
if self.operation_number_prefix:
operation_number = self.operation_number_prefix
# Los 4 primeros dígitos deben ser numéricos, forzosamente
for i in range(4 - len(operation_number)):
operation_number += random.choice('23456789')
# El resto de los dígitos pueden ser alfanuméricos
for i in range(code_len - 4):
operation_number += random.choice('ABCDEFGHJKLMNPQRSTUWXYZ23456789')
return operation_number
####################################################################
## Paso 1.3. Obtiene los datos de pago
## Este método será el que genere los campos del formulario de pago
## que se rellenarán desde el cliente (por Javascript)
def getPaymentFormData(self, reference_number=False):
order_data = {
# Indica el importe de la venta
"DS_MERCHANT_AMOUNT": self.importe,
# Indica el número de operacion
"DS_MERCHANT_ORDER": self.parent.operation.operation_number,
# Código FUC asignado al comercio
"DS_MERCHANT_MERCHANTCODE": self.merchant_code,
# Indica el tipo de moneda a usar
"DS_MERCHANT_CURRENCY": self.tipo_moneda,
# Indica que tipo de transacción se utiliza
"DS_MERCHANT_TRANSACTIONTYPE": self.transaction_type,
# Indica el terminal
"DS_MERCHANT_TERMINAL": self.terminal_id,
# Obligatorio si se tiene confirmación online.
"DS_MERCHANT_MERCHANTURL": self.merchant_response_url,
# URL a la que se redirige al usuario en caso de que la venta haya sido satisfactoria
"DS_MERCHANT_URLOK": self.parent.operation.url_ok,
# URL a la que se redirige al usuario en caso de que la venta NO haya sido satisfactoria
"DS_MERCHANT_URLKO": self.parent.operation.url_nok,
# Se mostrará al titular en la pantalla de confirmación de la compra
"DS_MERCHANT_PRODUCTDESCRIPTION": self.parent.operation.description,
# Indica el valor del idioma
"DS_MERCHANT_CONSUMERLANGUAGE": self.idioma,
# Representa la suma total de los importes de las cuotas
"DS_MERCHANT_SUMTOTAL": self.importe,
}
# En caso de que tenga referencia
if reference_number:
# Puede ser una petición de referencia
if reference_number.lower() == "request":
order_data["DS_MERCHANT_IDENTIFIER"] = "REQUIRED"
if "?" in order_data["DS_MERCHANT_MERCHANTURL"]:
order_data["DS_MERCHANT_MERCHANTURL"] += "&request_reference=1"
else:
order_data["DS_MERCHANT_MERCHANTURL"] += "?request_reference=1"
# o en cambio puede ser el envío de una referencia obtenida antes
else:
order_data["DS_MERCHANT_IDENTIFIER"] = reference_number
json_order_data = json.dumps(order_data)
packed_order_data = base64.b64encode(json_order_data)
data = {
"Ds_SignatureVersion": "HMAC_SHA256_V1",
"Ds_MerchantParameters": packed_order_data,
"Ds_Signature": self._redsys_hmac_sha256_signature(packed_order_data)
}
form_data = {
"data": data,
"action": self.url,
"enctype": "application/x-www-form-urlencoded",
"method": "post"
}
return form_data
####################################################################
## Paso 3.1. Obtiene el número de operación y los datos que nos
## envíe la pasarela de pago.
@classmethod
def receiveConfirmation(cls, request):
# Es una respuesta HTTP POST "normal"
if 'Ds_MerchantParameters' in request.POST:
return cls._receiveConfirmationHTTPPOST(request)
# Es una respuesta SOAP
body = request.body
if "procesaNotificacionSIS" in body and "SOAP" in body:
return cls._receiveConfirmationSOAP(request)
raise Exception(u"No se reconoce la petición ni como HTTP POST ni como SOAP")
####################################################################
## Paso 3.1.a Procesar notificación HTTP POST
@staticmethod
def _receiveConfirmationHTTPPOST(request):
dlprint(u"Notificación Redsys HTTP POST:")
dlprint(request.POST)
# Almacén de operaciones
try:
operation_data = json.loads(base64.b64decode(request.POST.get("Ds_MerchantParameters")))
dlprint(operation_data)
# Operation number
operation_number = operation_data.get("Ds_Order")
ds_transactiontype = operation_data.get("Ds_TransactionType")
if ds_transactiontype == "3":
# Operación de reembolso
operation = VPOSRefundOperation.objects.get(operation_number=operation_number)
else:
# Operación de confirmación de venta
operation = VPOSPaymentOperation.objects.get(operation_number=operation_number)
# Comprobar que no se trata de una operación de confirmación de compra anteriormente confirmada
if operation.status != "pending":
raise VPOSOperationAlreadyConfirmed(u"Operación ya confirmada")
operation.confirmation_data = {"GET": request.GET.dict(), "POST": request.POST.dict()}
operation.confirmation_code = operation_number
ds_errorcode = operation_data.get("Ds_ErrorCode")
if ds_errorcode:
errormsg = u' // ' + VPOSRedsys._format_ds_error_code(operation_data.get("Ds_ErrorCode"))
else:
errormsg = u''
operation.response_code = VPOSRedsys._format_ds_response_code(operation_data.get("Ds_Response")) + errormsg
operation.save()
dlprint("Operation {0} actualizada en _receiveConfirmationHTTPPOST()".format(operation.operation_number))
dlprint(u"Ds_Response={0} Ds_ErrorCode={1}".format(operation_data.get("Ds_Response"), operation_data.get("Ds_ErrorCode")))
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
except VPOSRefundOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación, esto es fundamental para luego calcular la firma
vpos = operation.virtual_point_of_sale
vpos._init_delegated()
vpos.operation = operation
# Iniciamos los valores recibidos en el delegado
# Datos de la operación al completo
# Usado para recuperar los datos la referencia
vpos.delegated.ds_merchantparameters = operation_data
## Datos que llegan por POST
# Firma enviada por RedSys, que más tarde compararemos con la generada por el comercio
vpos.delegated.firma = request.POST.get("Ds_Signature")
# Versión del método de firma utilizado
vpos.delegated.signature_version = request.POST.get("Ds_SignatureVersion")
# Parámetros de la operación (en base64 + JSON)
vpos.delegated.merchant_parameters = request.POST.get("Ds_MerchantParameters")
## Datos decodificados de Ds_MerchantParameters
# Respuesta de la pasarela de pagos. Indica si la operación se autoriza o no
vpos.delegated.ds_response = operation_data.get("Ds_Response")
return vpos.delegated
####################################################################
## Paso 3.1.b Procesar notificación SOAP
@staticmethod
def _receiveConfirmationSOAP(request):
dlprint(u"Notificación Redsys SOAP:")
body = request.body
dlprint(body)
root = etree.fromstring(body)
tree = etree.ElementTree(root)
soapdict = dictlist(tree.getroot())
# Aquí tendremos toda la cadena <Message>...</Message>
xml_content = soapdict['{http://schemas.xmlsoap.org/soap/envelope/}Envelope']['value'][0][
'{http://schemas.xmlsoap.org/soap/envelope/}Body']['value'][0]['{InotificacionSIS}procesaNotificacionSIS'][
'value'][0]['XML']['value']
# procesar <Message>...</Message>
dlprint(u"Mensaje XML completo:" + xml_content)
root = etree.fromstring(xml_content)
# Almacén de operaciones
try:
ds_order = root.xpath("//Message/Request/Ds_Order/text()")[0]
ds_response = root.xpath("//Message/Request/Ds_Response/text()")[0]
ds_transactiontype = root.xpath("//Message/Request/Ds_TransactionType/text()")[0]
try:
ds_authorisationcode = root.xpath("//Message/Request/Ds_AuthorisationCode/text()")[0]
except IndexError:
dlprint(u"Ds_Order {0} sin Ds_AuthorisationCode (Ds_response={1})".format(ds_order, ds_response))
ds_authorisationcode = ""
try:
ds_errorcode = root.xpath("//Message/Request/Ds_ErrorCode/text()")[0]
errormsg = u' // ' + VPOSRedsys._format_ds_error_code(ds_errorcode)
except IndexError:
ds_errorcode = None
errormsg = u''
if ds_transactiontype == "3":
# Operación de reembolso
operation = VPOSRefundOperation.objects.get(operation_number=ds_order)
else:
# Operación de confirmación de venta
operation = VPOSPaymentOperation.objects.get(operation_number=ds_order)
if operation.status != "pending":
raise VPOSOperationAlreadyConfirmed(u"Operación ya confirmada")
operation.confirmation_data = {"GET": "", "POST": xml_content}
operation.confirmation_code = ds_order
operation.response_code = VPOSRedsys._format_ds_response_code(ds_response) + errormsg
operation.save()
dlprint("Operation {0} actualizada en _receiveConfirmationSOAP()".format(operation.operation_number))
dlprint(u"Ds_Response={0} Ds_ErrorCode={1}".format(ds_response, ds_errorcode))
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
except VPOSRefundOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación, esto es fundamental
# para luego calcular la firma
vpos = operation.virtual_point_of_sale
vpos._init_delegated()
vpos.operation = operation
## Iniciamos los valores recibidos en el delegado
# Contenido completo de <Request>...</Request>, necesario posteriormente para cálculo de firma
#soap_request = etree.tostring(root.xpath("//Message/Request")[0])
# corrige autocierre de etuqueta y entrecomillado de atributos. Para la comprobación de la firma,
# la etiqueta debe tener apertura y cierre y el atributo va entre comilla simple
# soap_request = soap_request\
# .replace("<Ds_MerchantData/>", "<Ds_MerchantData></Ds_MerchantData>", 1)\
# .replace('"',"'")
regex = r"<Request.+</Request>"
matches = re.search(regex, xml_content, re.MULTILINE)
soap_request = matches.group(0)
vpos.delegated.soap_request = soap_request
dlprint(u"Request:" + vpos.delegated.soap_request)
# Firma enviada por RedSys, que más tarde compararemos con la generada por el comercio
vpos.delegated.firma = root.xpath("//Message/Signature/text()")[0]
dlprint(u"Signature:" + vpos.delegated.firma)
# Código que indica el tipo de transacción
vpos.delegated.ds_response = root.xpath("//Message/Request/Ds_Response/text()")[0]
# Usado para recuperar los datos la referencia
vpos.delegated.ds_merchantparameters = {}
try:
vpos.delegated.ds_merchantparameters["Ds_Merchant_Identifier"] = root.xpath("//Message/Request/Ds_Merchant_Identifier/text()")[0]
vpos.delegated.ds_merchantparameters["Ds_ExpiryDate"] = root.xpath("//Message/Request/Ds_ExpiryDate/text()")[0]
# Aquí la idea es incluir más parámetros que nos puedan servir en el llamador de este módulo
except IndexError:
pass
return vpos.delegated
####################################################################
## Paso 3.2. Verifica que los datos enviados desde
## la pasarela de pago identifiquen a una operación de compra y un
## pago autorizado.
def verifyConfirmation(self):
firma_calculada = self._verification_signature()
dlprint("Firma calculada " + firma_calculada)
dlprint("Firma recibida " + self.firma)
# Traducir caracteres de la firma recibida '-' y '_' al alfabeto base64
firma_traducida = self.firma.replace("-", "+").replace("_", "/")
if self.firma != firma_traducida:
dlprint("Firma traducida " + firma_traducida)
# Comprueba si el envío es correcto
if firma_traducida != firma_calculada:
dlprint("Las firmas no coinciden")
return False
else:
dlprint("Firma verificada correctamente")
# Comprobar que el resultado se corresponde a un pago autorizado
# por RedSys. Los pagos autorizados son todos los Ds_Response entre
# 0000 y 0099 [manual TPV Virtual SIS v1.0, pág. 31]
if len(self.ds_response) != 4 or not self.ds_response.isdigit():
dlprint(u"Transacción no autorizada por RedSys. Ds_Response es {0} (no está entre 0000-0099)".format(
self.ds_response))
return False
elif self.ds_response[:2] != "00":
dlprint(u"Transacción no autorizada por RedSys. Ds_Response es {0} (no está entre 0000-0099)".format(
self.ds_response))
return False
return True
####################################################################
## Paso 3.3a. Realiza el cobro y genera la respuesta a la pasarela y
## comunicamos con la pasarela de pago para que marque la operación
## como pagada. Sólo se usa en CECA
def charge(self):
# En caso de tener habilitada la preautorización
# no nos importa el tipo de confirmación.
if self.operative_type == PREAUTHORIZATION_TYPE:
# Cuando se tiene habilitada política de preautorización.
dlprint("Confirmar mediante política de preautorizacion")
if self._confirm_preauthorization():
return HttpResponse("OK")
else:
return self.responseNok()
# En otro caso la confirmación continua haciendose como antes.
# Sin cambiar nada.
elif self.soap_request:
dlprint("responseOk SOAP")
# Respuesta a notificación HTTP SOAP
response = '<Response Ds_Version="0.0"><Ds_Response_Merchant>OK</Ds_Response_Merchant></Response>'
dlprint("FIRMAR RESPUESTA {response} CON CLAVE DE CIFRADO {key}".format(response=response,
key=self.encryption_key))
signature = self._redsys_hmac_sha256_signature(response)
message = "<Message>{response}<Signature>{signature}</Signature></Message>".format(response=response,
signature=signature)
dlprint("MENSAJE RESPUESTA CON FIRMA {0}".format(message))
# El siguiente mensaje NO debe tener espacios en blanco ni saltos de línea entre las marcas XML
out = "<?xml version='1.0' encoding='UTF-8'?><SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"><SOAP-ENV:Body><ns1:procesaNotificacionSISResponse xmlns:ns1=\"InotificacionSIS\" SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><result xsi:type=\"xsd:string\">{0}</result></ns1:procesaNotificacionSISResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>"
out = out.format(cgi.escape(message))
dlprint("RESPUESTA SOAP:" + out)
return HttpResponse(out, "text/xml")
else:
dlprint(u"responseOk HTTP POST (respuesta vacía)")
# Respuesta a notificación HTTP POST
# En RedSys no se exige una respuesta, por parte del comercio, para verificar
# la operación, pasamos una respuesta vacia
return HttpResponse("")
####################################################################
## Paso 3.3b. Si ha habido un error en el pago, se ha de dar una
## respuesta negativa a la pasarela bancaria.
def responseNok(self, **kwargs):
if self.operative_type == PREAUTHORIZATION_TYPE:
# Cuando se tiene habilitada política de preautorización.
dlprint("Enviar mensaje para cancelar una preautorizacion")
self._cancel_preauthorization()
return HttpResponse("")
elif self.soap_request:
dlprint("responseNok SOAP")
# Respuesta a notificación HTTP SOAP
response = '<Response Ds_Version="0.0"><Ds_Response_Merchant>KO</Ds_Response_Merchant></Response>'
dlprint("FIRMAR RESPUESTA {response} CON CLAVE DE CIFRADO {key}".format(response=response,
key=self.encryption_key))
signature = self._redsys_hmac_sha256_signature(response)
message = "<Message>{response}<Signature>{signature}</Signature></Message>".format(response=response,
signature=signature)
dlprint("MENSAJE RESPUESTA CON FIRMA {0}".format(message))
# El siguiente mensaje NO debe tener espacios en blanco ni saltos de línea entre las marcas XML
out = "<?xml version='1.0' encoding='UTF-8'?><SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"><SOAP-ENV:Body><ns1:procesaNotificacionSISResponse xmlns:ns1=\"InotificacionSIS\" SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><result xsi:type=\"xsd:string\">{0}</result></ns1:procesaNotificacionSISResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>"
out = out.format(cgi.escape(message))
dlprint("RESPUESTA SOAP:" + out)
return HttpResponse(out, "text/xml")
else:
dlprint(u"responseNok HTTP POST (respuesta vacía)")
# Respuesta a notificación HTTP POST
# En RedSys no se exige una respuesta, por parte del comercio, para verificar
# que la operación ha sido negativa, pasamos una respuesta vacia
return HttpResponse("")
####################################################################
## Paso R1 (Refund) Configura el TPV en modo devolución y ejecuta la operación
def refund(self, operation_sale_code, refund_amount, description):
"""
Implementación particular del mátodo de devolución para el TPV de Redsys.
Se ocupa de preparar un mensaje http con los parámetros adecuados.
Realizar la comunicación con los parámetros dados y la codificación necesaria.
Interpretar la respuesta HTML, buscando etiquetas DOM que informen si la operación
se realiza correctamente o con error.
NOTA IMPORTANTE: La busqueda de etiquetas en el arbol DOM es sensible a posibles cambios en la plataforma Redsys,
por lo tanto en caso de no encontrar ninguna etiqueta de las posibles esperadas
(noSePuedeRealizarOperacion o operacionAceptada), se lanza una excepción del tipo 'VPOSOperationException'.
Es responsibilidad del programador gestionar adecuadamente esta excepción desde la vista
y en caso que se produzca, avisar a los desarrolladores responsables del módulo 'DjangoVirtualPost'
para su actualización.
:param refund_amount: Cantidad de la devolución.
:param description: Motivo o comentario de la devolución.
:return: True | False según se complete la operación con éxito.
"""
# Modificamos el tipo de operación para indicar que la transacción
# es de tipo devolución automática.
# URL de pago según el entorno.
self.url = self.REDSYS_URL[self.parent.environment]
# IMPORTANTE: Este es el código de operación para hacer devoluciones.
self.transaction_type = 3
# Formato para Importe: según redsys, ha de tener un formato de entero positivo, con las dos últimas posiciones
# ocupadas por los decimales
self.importe = "{0:.2f}".format(float(refund_amount)).replace(".", "")
# Idioma de la pasarela, por defecto es español, tomamos
# el idioma actual y le asignamos éste
self.idioma = self.IDIOMAS["es"]
lang = translation.get_language()
if lang in self.IDIOMAS:
self.idioma = self.IDIOMAS[lang]
order_data = {
# Indica el importe de la venta
"DS_MERCHANT_AMOUNT": self.importe,
# Indica el número de operacion
"DS_MERCHANT_ORDER": self.parent.operation.operation_number,
# Código FUC asignado al comercio
"DS_MERCHANT_MERCHANTCODE": self.merchant_code,
# Indica el tipo de moneda a usar
"DS_MERCHANT_CURRENCY": self.tipo_moneda,
# Indica que tipo de transacción se utiliza
"DS_MERCHANT_TRANSACTIONTYPE": self.transaction_type,
# Indica el terminal
"DS_MERCHANT_TERMINAL": self.terminal_id,
# Obligatorio si se tiene confirmación online.
"DS_MERCHANT_MERCHANTURL": self.merchant_response_url,
# URL a la que se redirige al usuario en caso de que la venta haya sido satisfactoria
"DS_MERCHANT_URLOK": self.parent.operation.payment.url_ok,
# URL a la que se redirige al usuario en caso de que la venta NO haya sido satisfactoria
"DS_MERCHANT_URLKO": self.parent.operation.payment.url_nok,
# Se mostrará al titular en la pantalla de confirmación de la compra
"DS_MERCHANT_PRODUCTDESCRIPTION": description,
# Indica el valor del idioma
"DS_MERCHANT_CONSUMERLANGUAGE": self.idioma,
# Representa la suma total de los importes de las cuotas
"DS_MERCHANT_SUMTOTAL": self.importe,
}
json_order_data = json.dumps(order_data)
packed_order_data = base64.b64encode(json_order_data)
data = {
"Ds_SignatureVersion": "HMAC_SHA256_V1",
"Ds_MerchantParameters": packed_order_data,
"Ds_Signature": self._redsys_hmac_sha256_signature(packed_order_data)
}
headers = {'enctype': 'application/x-www-form-urlencoded'}
# Realizamos petición POST con los datos de la operación y las cabeceras necesarias.
refund_html_request = requests.post(self.url, data=data, headers=headers)
# En caso de tener una respuesta 200
if refund_html_request.status_code == 200:
# Iniciamos un objeto BeautifulSoup (para poder leer los elementos del DOM del HTML recibido).
html = BeautifulSoup(refund_html_request.text, "html.parser")
# Buscamos elementos significativos del DOM que nos indiquen si la operación se ha realizado correctamente o no.
refund_message_error = html.find('text', {'lngid': 'noSePuedeRealizarOperacion'})
refund_message_ok = html.find('text', {'lngid': 'operacionAceptada'})
# Cuando en el DOM del documento HTML aparece un mensaje de error.
if refund_message_error:
dlprint(refund_message_error)
dlprint(u'Error realizando la operación')
status = False
# Cuando en el DOM del documento HTML aparece un mensaje de ok.
elif refund_message_ok:
dlprint(u'Operación realizada correctamente')
dlprint(refund_message_error)
status = True
# No aparece mensaje de error ni de ok
else:
raise VPOSOperationException("La resupuesta HTML con la pantalla de devolución "
"no muestra mensaje informado de forma expícita "
"si la operación se produce con éxito o error. Revisar método 'VPOSRedsys.refund'.")
# Respuesta HTTP diferente a 200
else:
status = False
return status
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
if self.soap_request:
dlprint("refund_response_ok SOAP")
# Respuesta a notificación HTTP SOAP
response = '<Response Ds_Version="0.0"><Ds_Response_Merchant>OK</Ds_Response_Merchant></Response>'
dlprint("FIRMAR RESPUESTA {response} CON CLAVE DE CIFRADO {key}".format(response=response,
key=self.encryption_key))
signature = self._redsys_hmac_sha256_signature(response)
message = "<Message>{response}<Signature>{signature}</Signature></Message>".format(response=response,
signature=signature)
dlprint("MENSAJE RESPUESTA CON FIRMA {0}".format(message))
# El siguiente mensaje NO debe tener espacios en blanco ni saltos de línea entre las marcas XML
out = "<?xml version='1.0' encoding='UTF-8'?><SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"><SOAP-ENV:Body><ns1:procesaNotificacionSISResponse xmlns:ns1=\"InotificacionSIS\" SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><result xsi:type=\"xsd:string\">{0}</result></ns1:procesaNotificacionSISResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>"
out = out.format(cgi.escape(message))
dlprint("RESPUESTA SOAP:" + out)
return HttpResponse(out, "text/xml")
else:
dlprint(u"refund_response_ok HTTP POST (respuesta vacía)")
# Respuesta a notificación HTTP POST
# En RedSys no se exige una respuesta, por parte del comercio, para verificar
# la operación, pasamos una respuesta vacia
return HttpResponse("")
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
if self.soap_request:
dlprint("refund_response_nok SOAP")
# Respuesta a notificación HTTP SOAP
response = '<Response Ds_Version="0.0"><Ds_Response_Merchant>KO</Ds_Response_Merchant></Response>'
dlprint("FIRMAR RESPUESTA {response} CON CLAVE DE CIFRADO {key}".format(response=response,
key=self.encryption_key))
signature = self._redsys_hmac_sha256_signature(response)
message = "<Message>{response}<Signature>{signature}</Signature></Message>".format(response=response,
signature=signature)
dlprint("MENSAJE RESPUESTA CON FIRMA {0}".format(message))
# El siguiente mensaje NO debe tener espacios en blanco ni saltos de línea entre las marcas XML
out = "<?xml version='1.0' encoding='UTF-8'?><SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"><SOAP-ENV:Body><ns1:procesaNotificacionSISResponse xmlns:ns1=\"InotificacionSIS\" SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><result xsi:type=\"xsd:string\">{0}</result></ns1:procesaNotificacionSISResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>"
out = out.format(cgi.escape(message))
dlprint("RESPUESTA SOAP:" + out)
return HttpResponse(out, "text/xml")
else:
dlprint(u"refund_response_nok HTTP POST (respuesta vacía)")
# Respuesta a notificación HTTP POST
# En RedSys no se exige una respuesta, por parte del comercio, para verificar
# que la operación ha sido negativa, pasamos una respuesta vacia
return HttpResponse("")
def _confirm_preauthorization(self):
"""
Realiza petición HTTP POST con los parámetros adecuados para
confirmar una operación de pre-autorización.
NOTA: La respuesta de esta petición es un HTML, aplicamos scraping
para asegurarnos que corresponde a una pantalla de éxito.
NOTA2: Si el HTML anterior no proporciona información de éxito o error. Lanza una excepción.
:return: status: Bool
"""
dlprint("Entra en confirmacion de pre-autorizacion")
# URL de pago según el entorno
self.url = self.REDSYS_URL[self.parent.environment]
# IMPORTANTE: Este es el código de operación para hacer confirmación de preautorizacon.
self.transaction_type = 2
# Idioma de la pasarela, por defecto es español, tomamos
# el idioma actual y le asignamos éste
self.idioma = self.IDIOMAS["es"]
lang = translation.get_language()
if lang in self.IDIOMAS:
self.idioma = self.IDIOMAS[lang]
self.importe = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
if self.importe == "000":
self.importe = "0"
order_data = {
# Indica el importe de la venta
"DS_MERCHANT_AMOUNT": self.importe,
# Indica el número de operacion
"DS_MERCHANT_ORDER": self.parent.operation.operation_number,
# Código FUC asignado al comercio
"DS_MERCHANT_MERCHANTCODE": self.merchant_code,
# Indica el tipo de moneda a usar
"DS_MERCHANT_CURRENCY": self.tipo_moneda,
# Indica que tipo de transacción se utiliza
"DS_MERCHANT_TRANSACTIONTYPE": self.transaction_type,
# Indica el terminal
"DS_MERCHANT_TERMINAL": self.terminal_id,
# Obligatorio si se tiene confirmación online.
"DS_MERCHANT_MERCHANTURL": self.merchant_response_url,
# URL a la que se redirige al usuario en caso de que la venta haya sido satisfactoria
"DS_MERCHANT_URLOK": self.parent.operation.url_ok,
# URL a la que se redirige al usuario en caso de que la venta NO haya sido satisfactoria
"DS_MERCHANT_URLKO": self.parent.operation.url_nok,
# Se mostrará al titular en la pantalla de confirmación de la compra
"DS_MERCHANT_PRODUCTDESCRIPTION": self.parent.operation.description,
# Indica el valor del idioma
"DS_MERCHANT_CONSUMERLANGUAGE": self.idioma,
# Representa la suma total de los importes de las cuotas
"DS_MERCHANT_SUMTOTAL": self.importe,
}
json_order_data = json.dumps(order_data)
packed_order_data = base64.b64encode(json_order_data)
dlprint(json_order_data)
data = {
"Ds_SignatureVersion": "HMAC_SHA256_V1",
"Ds_MerchantParameters": packed_order_data,
"Ds_Signature": self._redsys_hmac_sha256_signature(packed_order_data)
}
headers = {'enctype': 'application/x-www-form-urlencoded'}
# Realizamos petición POST con los datos de la operación y las cabeceras necesarias.
confirmpreauth_html_request = requests.post(self.url, data=data, headers=headers)
if confirmpreauth_html_request.status_code == 200:
dlprint("_confirm_preauthorization status_code 200")
# Iniciamos un objeto BeautifulSoup (para poder leer los elementos del DOM del HTML recibido).
html = BeautifulSoup(confirmpreauth_html_request.text, "html.parser")
# Buscamos elementos significativos del DOM que nos indiquen si la operación se ha realizado correctamente o no.
confirmpreauth_message_error = html.find('text', {'lngid': 'noSePuedeRealizarOperacion'})
confirmpreauth_message_ok = html.find('text', {'lngid': 'operacionAceptada'})
# Cuando en el DOM del documento HTML aparece un mensaje de error.
if confirmpreauth_message_error:
dlprint(confirmpreauth_message_error)
dlprint(u'Error realizando la operación')
status = False
# Cuando en el DOM del documento HTML aparece un mensaje de ok.
elif confirmpreauth_message_ok:
dlprint(u'Operación realizada correctamente')
dlprint(confirmpreauth_message_ok)
status = True
# No aparece mensaje de error ni de ok
else:
raise VPOSOperationException(
"La resupuesta HTML con la pantalla de confirmación no muestra mensaje informado de forma expícita,"
" si la operación se produce con éxito/error, (revisar método 'VPOSRedsys._confirm_preauthorization').")
# Respuesta HTTP diferente a 200
else:
status = False
return status
def _cancel_preauthorization(self):
"""
Realiza petición HTTP POST con los parámetros adecuados para
anular una operación de pre-autorización.
NOTA: La respuesta de esta petición es un HTML, aplicamos scraping
para asegurarnos que corresponde a una pantalla de éxito.
NOTA2: Si el HTML anterior no proporciona información de éxito o error. Lanza una excepción.
:return: status: Bool
"""
dlprint("Entra en cancelacion de pre-autorizacion")
# URL de pago según el entorno
self.url = self.REDSYS_URL[self.parent.environment]
# IMPORTANTE: Este es el código de operación para hacer cancelación de preautorizacon.
self.transaction_type = 9
# Idioma de la pasarela, por defecto es español, tomamos
# el idioma actual y le asignamos éste
self.idioma = self.IDIOMAS["es"]
lang = translation.get_language()
if lang in self.IDIOMAS:
self.idioma = self.IDIOMAS[lang]
self.importe = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
if self.importe == "000":
self.importe = "0"
order_data = {
# Indica el importe de la venta
"DS_MERCHANT_AMOUNT": self.importe,
# Indica el número de operacion
"DS_MERCHANT_ORDER": self.parent.operation.operation_number,
# Código FUC asignado al comercio
"DS_MERCHANT_MERCHANTCODE": self.merchant_code,
# Indica el tipo de moneda a usar
"DS_MERCHANT_CURRENCY": self.tipo_moneda,
# Indica que tipo de transacción se utiliza
"DS_MERCHANT_TRANSACTIONTYPE": self.transaction_type,
# Indica el terminal
"DS_MERCHANT_TERMINAL": self.terminal_id,
# Obligatorio si se tiene confirmación online.
"DS_MERCHANT_MERCHANTURL": self.merchant_response_url,
# URL a la que se redirige al usuario en caso de que la venta haya sido satisfactoria
"DS_MERCHANT_URLOK": self.parent.operation.url_ok,
# URL a la que se redirige al usuario en caso de que la venta NO haya sido satisfactoria
"DS_MERCHANT_URLKO": self.parent.operation.url_nok,
# Se mostrará al titular en la pantalla de confirmación de la compra
"DS_MERCHANT_PRODUCTDESCRIPTION": self.parent.operation.description,
# Indica el valor del idioma
"DS_MERCHANT_CONSUMERLANGUAGE": self.idioma,
# Representa la suma total de los importes de las cuotas
"DS_MERCHANT_SUMTOTAL": self.importe
}
json_order_data = json.dumps(order_data)
dlprint(json_order_data)
packed_order_data = base64.b64encode(json_order_data)
data = {
"Ds_SignatureVersion": "HMAC_SHA256_V1",
"Ds_MerchantParameters": packed_order_data,
"Ds_Signature": self._redsys_hmac_sha256_signature(packed_order_data)
}
headers = {'enctype': 'application/x-www-form-urlencoded'}
# Realizamos petición POST con los datos de la operación y las cabeceras necesarias.
confirmpreauth_html_request = requests.post(self.url, data=data, headers=headers)
if confirmpreauth_html_request.status_code == 200:
# Iniciamos un objeto BeautifulSoup (para poder leer los elementos del DOM del HTML recibido).
html = BeautifulSoup(confirmpreauth_html_request.text, "html.parser")
# Buscamos elementos significativos del DOM que nos indiquen si la operación se ha realizado correctamente o no.
confirmpreauth_message_error = html.find('text', {'lngid': 'noSePuedeRealizarOperacion'})
confirmpreauth_message_ok = html.find('text', {'lngid': 'operacionAceptada'})
# Cuando en el DOM del documento HTML aparece un mensaje de error.
if confirmpreauth_message_error:
dlprint(confirmpreauth_message_error)
dlprint(u'Error realizando la operación')
status = False
# Cuando en el DOM del documento HTML aparece un mensaje de ok.
elif confirmpreauth_message_ok:
dlprint(u'Operación realizada correctamente')
dlprint(confirmpreauth_message_ok)
status = True
# No aparece mensaje de error ni de ok
else:
raise VPOSOperationException(
"La resupuesta HTML con la pantalla de cancelación no muestra mensaje informado de forma expícita,"
" si la operación se produce con éxito/error, (revisar método 'VPOSRedsys._cancel_preauthorization').")
# Respuesta HTTP diferente a 200
else:
status = False
return status
####################################################################
## Generador de firma de mensajes
def _redsys_hmac_sha256_signature(self, data):
"""
Firma la cadena de texto recibida usando 3DES y HMAC SHA-256
Calcula la firma a incorporar en el formulario de pago
:type data: str cadena de texto que se va a firmar
:return: str cadena de texto con la firma
"""
# Obtener encryption key para el entorno actual (almacenada en self.encryption_key)
self.__init_encryption_key__()
dlprint("_redsys_hmac_sha256_signature: encryption key {0}".format(self.encryption_key))
# Decodificar firma
encryption_key = base64.b64decode(self.encryption_key)
# operation_number = bytes(self.parent.operation.operation_number)
operation_number = bytes(self.parent.operation.operation_number)
dlprint("_redsys_hmac_sha256_signature: operation_number {0}".format(operation_number))
# Rellenar cadena hasta múltiplo de 8 bytes
if len(operation_number) % 8 != 0:
dlprint(
"_redsys_hmac_sha256_signature: la longitud del operation number es {0} y necesita relleno para 3DES".format(
len(operation_number)))
operation_number += bytes("\x00") * (8 - len(self.parent.operation.operation_number) % 8)
dlprint("_redsys_hmac_sha256_signature: la longitud de la cadena rellenada para 3DES es de {0}".format(
len(operation_number)))
# Generar clave de firma con 3DES y IV igual a ocho bytes con cero
des3_obj = DES3.new(encryption_key, DES3.MODE_CBC, b"\x00" * 8)
signature_key = des3_obj.encrypt(operation_number)
# Generar firma HMAC SHA-256 del mensaje.
hash_obj = HMAC.new(key=signature_key, msg=data, digestmod=SHA256)
digest = hash_obj.digest()
# Devolver firma codificada en Base64
signature = base64.b64encode(digest)
dlprint("Firma: {0}".format(signature))
return signature
####################################################################
## Generador de firma para la verificación
def _verification_signature(self):
"""
Calcula la firma de verificación, tanto para peticiones SOAP como para peticiones HTTP POST
:rtype : str
:return: str firma calculada
"""
self.__init_encryption_key__()
# El método de comprobación de firma difiere según se esté procesando una notificación
# SOAP o HTTP POST
if self.soap_request:
## Cálculo de firma para confirmación SOAP:
dlprint(u"Comprobación de firma para SOAP con clave de cifrado " + self.encryption_key)
signature = self._redsys_hmac_sha256_signature(self.soap_request)
else:
## Cálculo de firma para confirmación HTTP POST:
dlprint(u"Comprobación de firma para HTTP POST con clave de cifrado " + self.encryption_key)
signature = self._redsys_hmac_sha256_signature(self.merchant_parameters)
dlprint("FIRMA {0}".format(signature))
return signature
@staticmethod
def _format_ds_response_code(ds_response):
"""
Formatea el mensaje asociado a un Ds_Response
:param ds_response: str código Ds_Response
:return: unicode mensaje formateado
"""
if not ds_response:
return None
if len(ds_response) == 4 and ds_response.isdigit() and ds_response[:2] == "00":
message = u"Transacción autorizada para pagos y preautorizaciones."
else:
message = VPOSRedsys.DS_RESPONSE_CODES.get(ds_response, u"código de respuesta Ds_Response desconocido")
out = u"{0}. {1}".format(ds_response, message)
return out
@staticmethod
def _format_ds_error_code(ds_errorcode):
"""
Formatea el mensaje asociado a un Ds_ErrorCode
:param ds_errorcode: str código Ds_ErrorCode
:return: unicode mensaje formateado
"""
if not ds_errorcode:
return ''
message = VPOSRedsys.DS_ERROR_CODES.get(ds_errorcode, u'Código de respuesta Ds_ErrorCode desconocido')
out = u"{0}. {1}".format(ds_errorcode, message)
return out
########################################################################################################################
########################################################################################################################
###################################################### TPV PayPal ######################################################
########################################################################################################################
########################################################################################################################
class VPOSPaypal(VirtualPointOfSale):
"""Información de configuración del TPV Virtual PayPal """
## Todo TPV tiene una relación con los datos generales del TPV
parent = models.OneToOneField(VirtualPointOfSale, parent_link=True, related_name="+", null=False, db_column="vpos_id")
# nombre de usuario para la API de Paypal
API_username = models.CharField(max_length=60, null=False, blank=False, verbose_name="API_username")
# contraseña para la API de Paypal
API_password = models.CharField(max_length=60, null=False, blank=False, verbose_name="API_password")
# firma para la API de Paypal
API_signature = models.CharField(max_length=60, null=False, blank=False, verbose_name="API_signature")
# versión de la API de Paypal
Version = models.CharField(max_length=3, null=False, blank=False, verbose_name="Version")
Return_url = {
"production": "http://" + settings.ALLOWED_HOSTS[0] + "/payment/confirm/paypal",
"testing": "http://" + settings.ALLOWED_HOSTS[0] + "/payment/confirm/paypal"
}
Cancel_url = {
"production": "http://" + settings.ALLOWED_HOSTS[0] + "/es/payment/cancel/",
"testing": "http://" + settings.ALLOWED_HOSTS[0] + "/es/payment/cancel/"
}
paypal_url = {
"production": {
"api": "https://api-3t.paypal.com/nvp",
"payment": "https://www.paypal.com/cgi-bin/webscr",
},
"testing": {
"api": "https://api-3t.sandbox.paypal.com/nvp",
"payment": "https://www.sandbox.paypal.com/cgi-bin/webscr",
}
}
# URL de pago que variará según el entorno
url = None
# Importe de la venta
importe = None
# Indica el número de operación
operation_number = None
# estado que indica si estamos en api o payment
endpoint = "api"
# Tipo de moneda usada en la operación, en este caso sera Euros
tipo_moneda = "978"
# Método de envío de formulario
method = "SetExpressCheckout"
# Versión de API de PayPal
version = "95"
# ID de la moneda
PaymentRequest_0_CurrencyCode = "EUR"
# Será siempre este valor fijo
PaymentRequest_0_PaymentAction = "Sale"
# Controla si se ha recibido la confirmación de pago del TPV y si esta es correcta.
is_verified = False
# Token devuelto por Paypal
valor_token = None
# ID del comprador devuelta por Paypal
valor_payerID = None
## Constructor del TPV PayPal
def __init__(self, *args, **kwargs):
super(VPOSPaypal, self).__init__(*args, **kwargs)
def __unicode__(self):
return u"API_username: {0}".format(self.API_username)
@classmethod
def form(cls):
from forms import VPOSPaypalForm
return VPOSPaypalForm
####################################################################
## Paso 1.1. Configuración del pago
def configurePayment(self, **kwargs):
# URL de pago según el entorno
self.url = self.paypal_url[self.parent.environment]
# Formato para Importe: según paypal, ha de tener un formato con un punto decimal con exactamente
# dos dígitos a la derecha que representa los céntimos
self.importe = "{0:.2f}".format(float(self.parent.operation.amount))
####################################################################
## Paso 1.2. Preparación del TPV y Generación del número de operación (token)
def setupPayment(self, operation_number=None, code_len=12):
"""
Inicializa el
Obtiene el número de operación, que para el caso de Paypal será el token
"""
dlprint("Paypal.setupPayment")
if operation_number:
self.token = operation_number
dlprint("Rescato el operation number para esta venta {0}".format(self.token))
return self.token
dlprint("El operation number no existía")
token_url = self.paypal_url[self.parent.environment][self.endpoint]
dlprint("Attribute paypal_url " + unicode(self.paypal_url))
dlprint("Endpoint {0}".format(self.endpoint))
dlprint("Enviroment {0}".format(self.parent.environment))
dlprint("URL de envío {0}".format(token_url))
# Preparamos los campos del formulario
query_args = {
# Indica el método a usar
"METHOD": self.method,
# Indica la versión
"VERSION": self.version,
# Indica el usuario registrado como buyer en paypal
"USER": self.API_username,
# Indica la contraseña del usuario registrado como buyer en paypal
"PWD": self.API_password,
# Indica la firma del usuario registrado como buyer en paypal
"SIGNATURE": self.API_signature,
# Importe de la venta
"PAYMENTREQUEST_0_AMT": self.importe,
# ID de la moneda a utilizar
"PAYMENTREQUEST_0_CURRENCYCODE": self.PaymentRequest_0_CurrencyCode,
# URL donde Paypal redirige al usuario comprador después de logearse en Paypal
"RETURNURL": self.Return_url[self.parent.environment],
# URL a la que Paypal redirige al comprador si el comprador no aprueba el pago
"CANCELURL": self.parent.operation.url_nok,
# Especifíca la acción
"PAYMENTREQUEST_0_PAYMENTACTION": self.PaymentRequest_0_PaymentAction,
# Especifica la descripción de la venta
"L_PAYMENTREQUEST_0_NAME0": unicode(self.parent.operation.description).encode('utf-8'),
# Especifica el importe final de la venta
"L_PAYMENTREQUEST_0_AMT0": self.parent.operation.amount
}
dlprint(u"Petición por POST")
dlprint(query_args)
# Recogemos los datos
data = urllib.urlencode(query_args)
dlprint("Recogemos los datos")
dlprint(data)
# Enviamos la petición HTTP POST
request = urllib2.Request(token_url, data)
# Recogemos la respuesta dada, que vendrá en texto plano
response = urllib2.urlopen(request)
res_string = response.read()
dlprint("Paypal responde")
dlprint("Respuesta PayPal: " + res_string)
res = urlparse.parse_qs(res_string)
# Comprobamos que exista un ACK y que este no contenga el valor "Failure"
if "ACK" in res and res["ACK"][0] == "Failure":
raise ValueError(u"ERROR. La respuesta ha sido incorrecta.")
# Si no devuelve un Token, habrá un error en la venta
if not "TOKEN" in res:
raise ValueError(u"ERROR. La respuesta no contiene token.")
# Si hay más de un token, habrá un error
if len(res["TOKEN"]) != 1:
raise ValueError(u"ERROR. El token no tiene un único elemento.")
self.token = res["TOKEN"][0]
dlprint("Todo OK el token es: " + self.token)
return self.token
####################################################################
## Paso 1.3. Obtiene los datos de pago
## Este método enviará un formulario por GET con el token dado anteriormente
def getPaymentFormData(self):
data = {
"cmd": "_express-checkout",
"token": self.token
}
form_data = {
"data": data,
"action": self.paypal_url[self.parent.environment]["payment"],
"method": "get"
}
return form_data
####################################################################
## Paso 3.1. Obtiene el número de operación(token) y los datos que nos
## envíe la pasarela de pago.
@staticmethod
def receiveConfirmation(request, **kwargs):
# Almacén de operaciones
try:
operation = VPOSPaymentOperation.objects.get(operation_number=request.GET.get("token"))
operation.confirmation_data = {"GET": request.GET.dict(), "POST": request.POST.dict()}
operation.confirmation_code = request.POST.get("token")
operation.save()
dlprint("Operation {0} actualizada en receiveConfirmation()".format(operation.operation_number))
vpos = operation.virtual_point_of_sale
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación
vpos._init_delegated()
vpos.operation = operation
# Iniciamos los valores recibidos en el delegado
# ID del comprador
vpos.delegated.payer_id = request.GET.get("PayerID")
# Token
vpos.delegated.token = request.GET.get("token")
dlprint(u"Lo que recibimos de Paypal: ")
dlprint(request.GET)
return vpos.delegated
####################################################################
## Paso 3.2. Verifica que los datos enviados desde
## la pasarela de pago identifiquen a una operación de compra.
def verifyConfirmation(self):
# Comprueba si el envío es correcto
# Para esto, comprobamos si hay alguna operación que tenga el mismo
# número de operación
self.valor_token = self.token
self.operation_number = self.token
# Almacenamos el valor del ID del comprador, para más tarde usarlo
self.valor_payerID = self.payer_id
operation = VPOSPaymentOperation.objects.filter(operation_number=self.valor_token)
if len(operation):
return True
return False
####################################################################
## Paso 3.3. Realiza el cobro y genera un formulario, para comunicarnos
## con PayPal
def charge(self):
# Prepara los campos del formulario
query_args = {
'METHOD': "DoExpressCheckoutPayment",
'USER': self.API_username,
'PWD': self.API_password,
'SIGNATURE': self.API_signature,
'VERSION': self.Version,
'TOKEN': self.operation_number,
'PAYERID': self.valor_payerID,
'PAYMENTREQUEST_0_CURRENCYCODE': self.PaymentRequest_0_CurrencyCode,
'PAYMENTREQUEST_0_PAYMENTACTION': self.PaymentRequest_0_PaymentAction,
'PAYMENTREQUEST_0_AMT': self.parent.operation.amount,
}
data = urllib.urlencode(query_args)
# Realizamos una petición HTTP POST
api_url = self.paypal_url[self.parent.environment]["api"]
request = urllib2.Request(api_url, data)
# Almacenamos la respuesta dada por PayPal
response = urllib2.urlopen(request)
res_string = response.read()
res = urlparse.parse_qs(res_string)
# Comprobamos que haya un ACK y que no tenga el valor de "Failure"
if "ACK" in res and res["ACK"][0] == "Failure":
raise ValueError(u"ERROR. La respuesta ha sido incorrecta.")
# Si no hay un token, entonces habrá un error
if not "TOKEN" in res:
raise ValueError(u"ERROR. La respuesta no contiene token.")
# Si hay más de un token, habrá un error
if len(res["TOKEN"]) != 1:
raise ValueError(u"ERROR. El token no tiene un único elemento.")
token = res["TOKEN"][0]
dlprint(u"El token es {0} y el número de operación era ".format(token, self.parent.operation.sale_code))
# Si llegamos aquí, es que ha ido bien la operación, asi que redireccionamos a la url de payment_ok
return redirect(reverse("payment_ok_url", kwargs={"sale_code": self.parent.operation.sale_code}))
####################################################################
## Paso 3.3b. Si ha habido un error en el pago, redirigimos a la url correcta
def responseNok(self, **kwargs):
dlprint("responseNok")
# En Paypal no se exige una respuesta, por parte del comercio, para verificar
# que la operación ha sido negativa, redireccionamos a la url de cancelación
return redirect(reverse("payment_cancel_url", kwargs={"sale_code": self.parent.operation.sale_code}))
####################################################################
## Paso R. (Refund) Configura el TPV en modo devolución
## TODO: No implementado
def refund(self, operation_sale_code, refund_amount, description):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Paypal.")
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Paypal.")
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Paypal.")
########################################################################################################################
########################################################################################################################
################################################# TPV Santander Elavon #################################################
########################################################################################################################
########################################################################################################################
class VPOSSantanderElavon(VirtualPointOfSale):
"""Información de configuración del TPV Virtual CECA"""
regex_clientid = re.compile("^[a-zA-Z0-9]*$")
regex_account = re.compile("^[a-zA-Z0-9.]*$")
regex_number = re.compile("^\d*$")
regex_operation_number_prefix = re.compile("^[A-Za-z0-9]*$")
# Relación con el padre (TPV).
# Al poner el signo "+" como "related_name" evitamos que desde el padre
# se pueda seguir la relación hasta aquí (ya que cada uno de las clases
# que heredan de ella estará en una tabla y sería un lío).
parent = models.OneToOneField(VirtualPointOfSale, parent_link=True, related_name="+", null=False, db_column="vpos_id")
# Identifica al comercio, será facilitado por la caja en el proceso de alta
merchant_id = models.CharField(max_length=50, null=False, blank=False, verbose_name="MerchantID",
validators=[MinLengthValidator(1), MaxLengthValidator(50),
RegexValidator(regex=regex_clientid,
message="Asegúrese de que todos los caracteres son alfanuméricos")])
# Confirmation URL that will be used by the virtual POS
merchant_response_url = models.URLField(max_length=64, null=False, blank=False, verbose_name="MerchantURL",
help_text=u"Confirmation URL that will be used by the virtual POS")
# Identifica la caja, será facilitado por la caja en el proceso de alta
account = models.CharField(max_length=30, null=False, blank=False, verbose_name="Account",
validators=[MinLengthValidator(0), MaxLengthValidator(30),
RegexValidator(regex=regex_account,
message="Asegúrese de que todos los caracteres son alfanuméricos")])
# Clave de cifrado
encryption_key = models.CharField(max_length=64, null=False, blank=False, verbose_name="Clave secreta de cifrado",
validators=[MinLengthValidator(8), MaxLengthValidator(10)])
# Prefijo del número de operación usado para identicar al servidor desde el que se realiza la petición
operation_number_prefix = models.CharField(max_length=20, null=False, blank=True,
verbose_name="Prefijo del número de operación",
validators=[MinLengthValidator(0), MaxLengthValidator(20),
RegexValidator(regex=regex_operation_number_prefix,
message="Asegúrese de sólo use caracteres alfanuméricos")])
# El TPV de Santander Elavon utiliza dos protocolos, "Redirect" y "Remote". Cada uno de ellos tiene dos entornos,
# uno para pruebas y otro para producción
REDIRECT_SERVICE_URL = {
"production": "https://hpp.santanderelavontpvvirtual.es/pay",
"testing": "https://hpp.prueba.santanderelavontpvvirtual.es/pay"
}
REMOTE_SERVICE_URL = {
"production": "https://remote.santanderelavontpvvirtual.es/remote",
"testing": "https://remote.prueba.santanderelavontpvvirtual.es/remote"
}
# URL de pago que variará según el entorno
url = None
# Identifica el importe de la venta, siempre será un número entero y donde los dos últimos dígitos representan los decimales
amount = None
# Tipo de moneda (forzado a Euro (EUR))
currency = "EUR"
# Timestamp requerido entre los datos POST enviados al servidor
timestamp = None
####################################################################
## Inicia el valor de la clave de cifrado en función del entorno
def __init_encryption_key__(self):
# Este modelo de TPV utiliza una única clave de cifrado tanto para el entorno de pruebas como para el de
# producción, por lo que no es necesario hacer nada especial
pass
####################################################################
## Constructor del TPV Santader Elavon
def __init__(self, *args, **kwargs):
super(VPOSSantanderElavon, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.name
@classmethod
def form(cls):
from forms import VPOSSantanderElavonForm
return VPOSSantanderElavonForm
####################################################################
## Paso 1.1. Configuración del pago
def configurePayment(self, **kwargs):
# URL de pago según el entorno
self.url = {
"redirect": self.REDIRECT_SERVICE_URL[self.parent.environment],
"remote": self.REMOTE_SERVICE_URL[self.parent.environment]
}
# Formato para Importe: según las especificaciones, ha de tener un formato de entero positivo
self.amount = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
# Timestamp con la hora local requerido por el servidor en formato AAAAMMDDHHMMSS
self.timestamp = timezone.now().strftime("%Y%m%d%H%M%S")
####################################################################
## Paso 1.2. Preparación del TPV y Generación del número de operación
def setupPayment(self, operation_number=None, code_len=40):
"""
Inicializa el número de operación si no se indica uno
explícitamente en los argumentos.
"""
if operation_number:
return operation_number
operation_number = ''
for i in range(code_len):
operation_number += random.choice('ABCDEFGHJKLMNPQRSTUWXYZ23456789')
# Si en settings tenemos un prefijo del número de operación
# se lo añadimos delante, con carácter "-" entre medias
if self.operation_number_prefix:
operation_number = self.operation_number_prefix + "-" + operation_number
return operation_number[0:code_len]
return operation_number
####################################################################
## Paso 1.3. Obtiene los datos de pago
## Este método será el que genere los campos del formulario de pago
## que se rellenarán desde el cliente (por Javascript)
def getPaymentFormData(self):
data = {
# Identifica al comercio, será facilitado por la entidad
"MERCHANT_ID": self.merchant_id,
# Identifica al terminal, será facilitado por la entidad
"ACCOUNT": self.account,
# Identifica el número de pedido, factura, albarán, etc
"ORDER_ID": self.parent.operation.operation_number,
# Importe de la operación sin formatear. Siempre será entero con los dos últimos dígitos usados para los centimos
"AMOUNT": self.amount,
"CURRENCY": self.currency,
# Marca de tiempo de la transacción
"TIMESTAMP": self.timestamp,
# Cadena de caracteres calculada por el comercio
"SHA1HASH": self._post_signature(),
# No cargar el importe de forma automática (AUTO_SETTLE_FLAG=0). En el método charge() hay que hacer una
# llamada a un webservice XML con los datos apropiados para que el pago se haga efectivo.
"AUTO_SETTLE_FLAG": "0",
# URL de confirmación. Si se indica una, se sobrescribe el valor que tenga configurada la cuenta del TPV
"MERCHANT_RESPONSE_URL": self.merchant_response_url
}
form_data = {
"data": data,
"action": self.url['redirect'],
"enctype": "application/x-www-form-urlencoded",
"method": "post"
}
dlprint(u"Datos para formulario Santander Elavon: {0}".format(form_data))
return form_data
####################################################################
## Paso 3.1. Obtiene el número de operación y los datos que nos
## envíe la pasarela de pago.
@staticmethod
def receiveConfirmation(request, **kwargs):
dlprint(u"receiveConfirmation. Encoding:{0}".format(request.encoding))
# Almacén de operaciones
try:
operation = VPOSPaymentOperation.objects.get(operation_number=request.POST.get("ORDER_ID"))
operation.confirmation_data = {"GET": request.GET.dict(), "POST": request.POST.dict()}
# en charge() nos harán falta tanto el AUTHCODE PASREF, por eso se meten los dos en el campo
# operation.confirmation_code, separados por el carácter ":"
operation.confirmation_code = "{pasref}:{authcode}".format(
pasref=request.POST.get("PASREF"),
authcode=request.POST.get("AUTHCODE")
)
operation.save()
dlprint(u"Operation {0} actualizada en receiveConfirmation()".format(operation.operation_number))
vpos = operation.virtual_point_of_sale
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación, esto es fundamental
# para luego calcular la firma
vpos._init_delegated()
vpos.operation = operation
# Iniciamos los valores recibidos en el delegado, para el cálculo de la firma
# Marca de tiempo de la solicitud enviada a la pasarela
vpos.delegated.timestamp = request.POST.get("TIMESTAMP")
# Identifica al comercio
vpos.delegated.merchant_id = request.POST.get("MERCHANT_ID")
# Identifica el número de pedido, factura, albarán, etc
vpos.delegated.order_id = request.POST.get("ORDER_ID")
# Resultado de la operación
vpos.delegated.result = request.POST.get("RESULT")
# Mensaje textual del resultado de la operación
vpos.delegated.message = request.POST.get("MESSAGE", "")
dlprint("type(message): {0}".format(type(vpos.delegated.message)))
# Referencia asignada por el TPV
vpos.delegated.pasref = request.POST.get("PASREF")
# Código de autorización de la operación
vpos.delegated.authcode = request.POST.get("AUTHCODE")
# Firma enviada por la pasarela de pagos
vpos.delegated.sha1hash = request.POST.get("SHA1HASH")
# URLs para charge()
vpos.delegated.url = {
"redirect": VPOSSantanderElavon.REDIRECT_SERVICE_URL[vpos.environment],
"remote": VPOSSantanderElavon.REMOTE_SERVICE_URL[vpos.environment]
}
dlprint(u"Response Santander Elavon redirect: ")
dlprint(request.POST)
return vpos.delegated
####################################################################
## Paso 3.2. Verifica que los datos enviados desde
## la pasarela de pago identifiquen a una operación de compra.
def verifyConfirmation(self):
# Comprobar firma de la respuesta
firma_calculada = self._verification_signature()
dlprint(u"Firma recibida " + self.sha1hash)
dlprint(u"Firma calculada " + firma_calculada)
if self.sha1hash != firma_calculada:
return False
# Comprobar código de la respuesta. Tódos los códigos que sean diferentes de 00
# indican que la pasarela no ha aceptado la operación.
#
# A continuación se detallan todos los códigos posibles de la respuesta
#
# Código Descripción
# ------ --------------------------------------------------------------------------------------------------
# 00 Operación realizada correctamente: La transacción se ha procesado y puedes continuar con la
# venta.
#
# 1xx Una transacción denegada. Puedes tratar cualquier código 1xx como una transacción denegada e
# informar al cliente de que deberá intentar de nuevo el pago o probar con otro método distinto.
# Si lo deseas, puedes proporcionar flujos alternativos basados en códigos específicos como los
# que se indican a continuación:
# 101 Denegada por el banco: Normalmente, suele producirse por la falta de fondos o por una
# fecha de caducidad incorrecta.
# 102 Referencia del banco (tratar como denegada en el sistema automático, por ejemplo, en
# Internet)
# 103 Tarjeta perdida o robada
# 107 Las comprobaciones antifraude han bloqueado la transacción.
# 1xx Otro motivo poco frecuente. Tratar como denegada igual que el código 101.
#
# 2xx Error con los sistemas bancarios: Normalmente, puedes pedirle al cliente que vuelva a intentarlo
# de nuevo más tarde. El tiempo de resolución depende del problema.
#
# 3xx Error con el sistema TPV Virtual de Santander Elavon: Normal mente, puedes pedirle al cliente
# que vuelva a intentarlo de nuevo más tarde. El tiempo de resolución depende del problema.
#
# 5xx Contenido o formación incorrectos de los mensajes XML. Se trata de errores de desarrollo,
# errores de configuración o errores del cliente. A continuación, se incluye una lista completa, pero
# a grandes rasgos:
# 508 Problema de desarrollo: Comprueba el mensaje y corrige tu integración.
# 509 Problema del cliente: Comprueba el mensaje y pide al cliente que confirme los detalles de
# pago y que lo intente de nuevo.
# 5xx Problema de configuración: Comprueba el mensaje. Ponte en contacto con el equipo de soporte
# de TPV Virtual de Santander Elavon para solucionar estos problemas.
#
# 666 Cliente desactivado: Tu cuenta de TPV Virtual de Santander Elavon se ha suspendido. Ponte en
# contacto con el equipo de soporte de TPV Virtual de Santander Elavon para obtener más
# información.
if self.result != u"00":
return False
return True
####################################################################
## Paso 3.3a. Realiza el cobro y genera la respuesta a la pasarela y
## comunicamos con la pasarela de pago para que marque la operación
## como pagada.
def charge(self):
dlprint(u"responseOk")
# Enviar operación "settle" al TPV, mediante protocolo Santander Elavon "Remote"
dlprint(u"confirmation_code almacenado: {0}".format(self.parent.operation.confirmation_code))
self.pasref, self.authcode = self.parent.operation.confirmation_code.split(":", 1)
xml_string = u'<request timestamp="{timestamp}" type="settle"><merchantid>{merchant_id}</merchantid><account>{account}</account><orderid>{order_id}</orderid><pasref>{pasref}</pasref><authcode>{authcode}</authcode><sha1hash>{sha1hash}</sha1hash></request>'.format(
timestamp=self.timestamp,
merchant_id=self.merchant_id,
account=self.account,
order_id=self.parent.operation.operation_number,
pasref=self.pasref,
authcode=self.authcode,
sha1hash=self._settle_signature()
)
# Enviamos la petición HTTP POST
dlprint(u"Request SETTLE: {0}".format(xml_string))
request = urllib2.Request(self.url['remote'], xml_string, headers={"Content-Type": "application/xml"})
# Recogemos la respuesta dada, que vendrá en texto plano
response = urllib2.urlopen(request)
response_string = response.read().decode("utf8")
dlprint(u"Response SETTLE: {0}".format(response_string))
# Almacenar respuesta en datos de operación
extended_confirmation_data = u"{0}\n\nRespuesta settle:\n{1}".format(self.parent.operation.confirmation_data,
response_string)
self.parent.operation.confirmation_data = extended_confirmation_data
self.parent.operation.save()
dlprint(u"Operation {0} actualizada en charge()".format(self.parent.operation.operation_number))
# Comprobar que se ha hecho el cargo de forma correcta parseando el XML de la respuesta
try:
dlprint(u"Antes de parser BeautifulSoup")
soup = BeautifulSoup(response_string, "html.parser")
dlprint(u"Después de parser BeautifulSoup")
if soup.response.result.string != u"00":
dlprint(u"Response SETTLE operación no autorizada")
raise VPOSCantCharge(u"Cargo denegado (código TPV {0})".format(soup.response.result.string))
else:
dlprint(u"Response SETTLE operación autorizada")
except Exception as e:
dlprint(u"EXCEPCIÓN: {0}".format(e))
raise
# La pasarela de pagos Santander Elavon "Redirect" espera recibir una plantilla HTML que se le mostrará al
# cliente.
# Ya que dicho TPV no redirige al navegador del cliente a ninguna URL, se hace la redirección a la "url_ok"
# mediante Javascript.
return HttpResponse(u"""
<html>
<head>
<title>Operación realizada</title>
<script type="text/javascript">
window.location.assign("{0}");
</script>
</head>
<body>
<p><strong>Operación realizada con éxito</strong></p>
<p>Pulse <a href="{0}">este enlace</a> si su navegador no le redirige automáticamente</p>
</body>
</html>
""".format(self.parent.operation.url_ok))
####################################################################
## Paso 3.3b. Si ha habido un error en el pago, se ha de dar una
## respuesta negativa a la pasarela bancaria.
def responseNok(self, **kwargs):
# Enviar operación "void" mediante protocolo Santander Elavon "Remote"
dlprint(u"confirmation_code almacenado: {0}".format(self.parent.operation.confirmation_code))
self.pasref, self.authcode = self.parent.operation.confirmation_code.split(":", 1)
xml_string = u'<request timestamp="{timestamp}" type="void"><merchantid>{merchant_id}</merchantid><account>{account}</account><orderid>{order_id}</orderid><pasref>{pasref}</pasref><authcode>{authcode}</authcode><sha1hash>{sha1hash}</sha1hash></request>'.format(
timestamp=self.timestamp,
merchant_id=self.merchant_id,
account=self.account,
order_id=self.parent.operation.operation_number,
pasref=self.pasref,
authcode=self.authcode,
sha1hash=self._void_signature()
)
# Enviamos la petición HTTP POST
dlprint(u"Request VOID: {0}".format(xml_string))
request = urllib2.Request(self.url['remote'], xml_string, headers={"Content-Type": "application/xml"})
# Recogemos la respuesta dada, que vendrá en texto plano
response = urllib2.urlopen(request)
response_string = response.read().decode("utf8")
dlprint(u"Response VOID: {0}".format(response_string))
# Almacenar respuesta en datos de operación
extended_confirmation_data = u"{0}\n\nRespuesta void:\n{1}".format(self.parent.operation.confirmation_data,
response_string)
self.parent.operation.confirmation_data = extended_confirmation_data
self.parent.operation.save()
dlprint(u"Operation {0} actualizada en responseNok()".format(self.parent.operation.operation_number))
# La pasarela de pagos Santander Elavon "Redirect" no espera recibir ningún valor especial.
dlprint(u"responseNok")
# La pasarela de pagos Santander Elavon "Redirect" espera recibir una plantilla HTML que se le mostrará al
# cliente.
# Ya que dicho TPV no redirige al navegador del cliente a ninguna URL, se hace la redirección a la "url_ok"
# mediante Javascript.
return HttpResponse(u"""
<html>
<head>
<title>Operación cancelada</title>
<script type="text/javascript">
window.location.assign("{0}");
</script>
</head>
<body>
<p><strong>Operación cancelada</strong></p>
<p>Pulse <a href="{0}">este enlace</a> si su navegador no le redirige automáticamente</p>
</body>
</html>
""".format(self.parent.operation.url_nok))
####################################################################
## Paso R. (Refund) Configura el TPV en modo devolución
## TODO: No implementado
def refund(self, operation_sale_code, refund_amount, description):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Santander-Elavon.")
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Santader-Elavon.")
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Santender-Elavon.")
####################################################################
## Generador de firma para el envío POST al servicio "Redirect"
def _post_signature(self):
"""Calcula la firma a incorporar en el formulario de pago"""
self.__init_encryption_key__()
dlprint(u"Clave de cifrado es " + self.encryption_key)
amount = "{0:.2f}".format(float(self.parent.operation.amount)).replace(".", "")
signature1 = u"{timestamp}.{merchant_id}.{order_id}.{amount}.{currency}".format(
merchant_id=self.merchant_id,
order_id=self.parent.operation.operation_number,
amount=amount,
currency=self.currency,
timestamp=self.timestamp
)
firma1 = hashlib.sha1(signature1).hexdigest()
dlprint(u"FIRMA1 datos: {0}".format(signature1))
dlprint(u"FIRMA1 hash: {0}".format(firma1))
signature2 = u"{firma1}.{secret}".format(firma1=firma1, secret=self.encryption_key)
firma2 = hashlib.sha1(signature2).hexdigest()
dlprint(u"FIRMA2 datos: {0}".format(signature2))
dlprint(u"FIRMA2 hash: {0}".format(firma2))
return firma2
####################################################################
## Generador de firma para el envío XML POST al servicio "settle"/"void" (Protocolo "Remote")
def _settle_void_signature(self, label=None):
"""Calcula la firma a incorporar en el en la petición XML 'settle' o 'void'"""
self.__init_encryption_key__()
dlprint(u"Calcular firma para {0}. La clave de cifrado es {1}".format(label, self.encryption_key))
signature1 = u"{timestamp}.{merchant_id}.{order_id}...".format(
merchant_id=self.merchant_id,
order_id=self.parent.operation.operation_number,
timestamp=self.timestamp
)
firma1 = hashlib.sha1(signature1).hexdigest()
dlprint(u"FIRMA1 datos: {0}".format(signature1))
dlprint(u"FIRMA1 hash: {0}".format(firma1))
signature2 = u"{firma1}.{secret}".format(firma1=firma1, secret=self.encryption_key)
firma2 = hashlib.sha1(signature2).hexdigest()
dlprint(u"FIRMA2 datos: {0}".format(signature2))
dlprint(u"FIRMA2 hash: {0}".format(firma2))
return firma2
####################################################################
## Generador de firma para el envío XML POST al servicio "settle" (Protocolo "Remote")
def _settle_signature(self):
"""Calcula la firma a incorporar en el en la petición XML 'void'"""
return self._settle_void_signature(label="SETTLE")
####################################################################
## Generador de firma para el envío XML POST al servicio "void" (Protocolo "Remote")
def _void_signature(self):
"""Calcula la firma a incorporar en el en la petición XML 'void'"""
return self._settle_void_signature(label="VOID")
####################################################################
## Generador de firma para la verificación
def _verification_signature(self):
""" Calcula la firma de verificación de una respuesta de la pasarela de pagos """
self.__init_encryption_key__()
dlprint(u"Clave de cifrado es " + self.encryption_key)
signature1 = u"{timestamp}.{merchant_id}.{order_id}.{result}.{message}.{pasref}.{authcode}".format(
timestamp=self.timestamp,
merchant_id=self.merchant_id,
order_id=self.parent.operation.operation_number,
result=self.result,
message=self.message,
pasref=self.pasref,
authcode=self.authcode
)
firma1 = hashlib.sha1(signature1.encode("utf-8")).hexdigest()
dlprint(u"FIRMA1 datos: {0}".format(signature1))
dlprint(u"FIRMA1 hash: {0}".format(firma1))
signature2 = "{firma1}.{secret}".format(firma1=firma1, secret=self.encryption_key)
firma2 = hashlib.sha1(signature2).hexdigest()
dlprint(u"FIRMA2 datos: {0}".format(signature2))
dlprint(u"FIRMA2 hash: {0}".format(firma2))
return firma2
class VPOSBitpay(VirtualPointOfSale):
"""
Pago con criptomoneda usando la plataforma bitpay.com
Siguiendo la documentación: https://bitpay.com/api
"""
CURRENCIES = (
('EUR', 'Euro'),
('USD', 'Dolares'),
('BTC', 'Bitcoin'),
)
# Velocidad de la operación en función de la fortaleza de la confirmación en blockchain.
TRANSACTION_SPEED = (
('high', 'Alta'), # Se supone confirma en el momento que se ejecuta.
('medium', 'Media'), # Se supone confirmada una vez se verifica 1 bloque. (~10 min)
('low', 'Baja'), # Se supone confirmada una vez se verifican 6 bloques (~1 hora)
)
# Relación con el padre (TPV).
# Al poner el signo "+" como "related_name" evitamos que desde el padre
# se pueda seguir la relación hasta aquí (ya que cada uno de las clases
# que heredan de ella estará en una tabla y sería un lío).
parent = models.OneToOneField(VirtualPointOfSale, parent_link=True, related_name="+", null=False, db_column="vpos_id")
testing_api_key = models.CharField(max_length=512, null=True, blank=True, verbose_name="API Key de Bitpay para entorno de test")
production_api_key = models.CharField(max_length=512, null=False, blank=False, verbose_name="API Key de Bitpay para entorno de producción")
currency = models.CharField(max_length=3, choices=CURRENCIES, default='EUR', null=False, blank=False, verbose_name="Moneda (EUR, USD, BTC)")
transaction_speed = models.CharField(max_length=10, choices=TRANSACTION_SPEED, default='medium', null=False, blank=False, verbose_name="Velocidad de la operación")
notification_url = models.URLField(verbose_name="Url notificaciones actualización estados (https)", null=False, blank=False)
# Prefijo usado para identicar al servidor desde el que se realiza la petición, en caso de usar TPV-Proxy.
operation_number_prefix = models.CharField(max_length=20, null=True, blank=True, verbose_name="Prefijo del número de operación")
bitpay_url = {
"production": {
"api": "https://bitpay.com/api/",
"create_invoice": "https://bitpay.com/api/invoice",
"payment": "https://bitpay.com/invoice/"
},
"testing": {
"api": "https://test.bitpay.com/api/",
"create_invoice": "https://test.bitpay.com/api/invoice",
"payment": "https://test.bitpay.com/invoice/"
}
}
def configurePayment(self, **kwargs):
self.api_key = self.testing_api_key
if self.parent.environment == "production":
self.api_key = self.production_api_key
self.importe = self.parent.operation.amount
def setupPayment(self, operation_number=None, code_len=40):
"""
Inicializa el pago
Obtiene el número de operación, que para el caso de BitPay será el id dado
:param operation_number:
:param code_len:
:return:
"""
dlprint("BitPay.setupPayment")
if operation_number:
self.bitpay_id = operation_number
dlprint("Rescato el operation number para esta venta {0}".format(self.bitpay_id))
return self.bitpay_id
params = {
'price': self.importe,
'currency': self.currency,
'redirectURL': self.parent.operation.url_ok,
'itemDesc': self.parent.operation.description,
'notificationURL': self.notification_url,
# Campos libres para el programador, puedes introducir cualquier información útil.
# En nuestro caso el prefijo de la operación, que ayuda a TPV proxy a identificar el servidor
# desde donde se ha ejecutado la operación.
'posData': '{"operation_number_prefix": "' + str(self.operation_number_prefix) + '"}',
'fullNotifications': True
}
# URL de pago según el entorno
url = self.bitpay_url[self.parent.environment]["create_invoice"]
post = json.dumps(params)
req = urllib2.Request(url)
base64string = base64.encodestring(self.api_key).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
req.add_header("Content-Type", "application/json")
req.add_header("Content-Length", len(post))
json_response = urllib2.urlopen(req, post)
response = json.load(json_response)
dlprint(u"Parametros que enviamos a Bitpay para crear la operación")
dlprint(params)
dlprint(u"Respuesta de Bitpay")
dlprint(response)
if response.get("error"):
error = response.get("error")
message = error.get("message")
error_type = error.get("type")
raise ValueError(u"ERROR. {0} - {1}".format(message, error_type))
if not response.get("id"):
raise ValueError(u"ERROR. La respuesta no contiene id de invoice.")
self.bitpay_id = response.get("id")
return self.bitpay_id
def getPaymentFormData(self):
"""
Generar formulario (en este caso prepara un submit a la página de bitpay).
"""
url = self.bitpay_url[self.parent.environment]["payment"]
data = {"id": self.bitpay_id}
form_data = {
"data": data,
"action": url,
"method": "get"
}
return form_data
@staticmethod
def receiveConfirmation(request, **kwargs):
confirmation_body_param = json.loads(request.body)
# Almacén de operaciones
try:
operation = VPOSPaymentOperation.objects.get(operation_number=confirmation_body_param.get("id"))
if operation.status != "pending":
raise VPOSOperationAlreadyConfirmed(u"Operación ya confirmada")
operation.confirmation_data = {"GET": request.GET.dict(), "POST": request.POST.dict(), "BODY": confirmation_body_param}
operation.save()
dlprint("Operation {0} actualizada en receiveConfirmation()".format(operation.operation_number))
vpos = operation.virtual_point_of_sale
except VPOSPaymentOperation.DoesNotExist:
# Si no existe la operación, están intentando
# cargar una operación inexistente
return False
# Iniciamos el delegado y la operación
vpos._init_delegated()
vpos.operation = operation
vpos.delegated.bitpay_id = operation.confirmation_data["BODY"].get("id")
vpos.delegated.status = operation.confirmation_data["BODY"].get("status")
dlprint(u"Lo que recibimos de BitPay: ")
dlprint(operation.confirmation_data["BODY"])
return vpos.delegated
def verifyConfirmation(self):
# Comprueba si el envío es correcto
# Para esto, comprobamos si hay alguna operación que tenga el mismo
# número de operación
operation = VPOSPaymentOperation.objects.filter(operation_number=self.bitpay_id, status='pending')
if operation:
# En caso de recibir, un estado confirmado ()
# NOTA: Bitpay tiene los siguientes posibles estados:
# new, paid, confirmed, complete, expired, invalid.
if self.status == "paid":
dlprint(u"La operación es confirmada")
return True
return False
def charge(self):
dlprint(u"Marca la operacion como pagada")
return HttpResponse("OK")
def responseNok(self, extended_status=""):
dlprint("responseNok")
return HttpResponse("NOK")
####################################################################
## Paso R1 (Refund) Configura el TPV en modo devolución y ejecuta la operación
def refund(self, operation_sale_code, refund_amount, description):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Bitpay.")
####################################################################
## Paso R2.a. Respuesta positiva a confirmación asíncrona de refund
def refund_response_ok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Bitpay.")
####################################################################
## Paso R2.b. Respuesta negativa a confirmación asíncrona de refund
def refund_response_nok(self, extended_status=""):
raise VPOSOperationNotImplemented(u"No se ha implementado la operación de devolución particular para Bitpay.")
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cryptic_contest/wsgi.py | """
WSGI config for cryptic_contest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cryptic_contest.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
app/authorization/auth_impl.py | import os
from werkzeug.exceptions import Unauthorized
from werkzeug.security import check_password_hash, generate_password_hash
from app.authorization import auth
__usuario = os.environ.get('HTTPAUTH_USER')
__senha = os.environ.get('HTTPAUTH_PASS')
@auth.verify_password
def verify_password(username, password):
if username == __usuario:
return check_password_hash(generate_password_hash(__senha), password)
# raise Unauthorized
| [] | [] | [
"HTTPAUTH_USER",
"HTTPAUTH_PASS"
] | [] | ["HTTPAUTH_USER", "HTTPAUTH_PASS"] | python | 2 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vuln_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
modules/core/python/farm_ng/core/blobstore.py | import logging
import os
import pathlib
import google.protobuf.json_format as json_format
from farm_ng.core.resource_pb2 import Bucket
logger = logging.getLogger('blobstore')
logger.setLevel(logging.INFO)
class InvalidBucketException(Exception):
pass
class Blobstore:
def __init__(self):
self.root = os.getenv('BLOBSTORE_ROOT')
if (not self.root):
raise Exception('BLOBSTORE_ROOT not set.')
def read_protobuf_from_json_file(self, path, message):
self._check_valid_path(path)
with open(os.path.join(self.root, path)) as f:
json_format.Parse(f.read(), message)
def read_protobuf_from_binary_file(self, path, message):
self._check_valid_path(path)
with open(os.path.join(self.root, path), 'rb') as f:
message.ParseFromString(f.read())
def bucket_relative_path(self, bucket_id):
name = Bucket.Name(bucket_id)
return name[len('BUCKET_'):].lower()
def _check_valid_path(self, path):
valid_buckets = [self.bucket_relative_path(id) for id in Bucket.values()]
target_bucket = pathlib.Path(path).parts[0]
if target_bucket not in valid_buckets:
raise InvalidBucketException(f'Invalid bucket: {target_bucket}')
def _write_protobuf_to_json_file(self, path, message):
raise NotImplementedError()
def _write_protobuf_to_binary_file(self, path, message):
raise NotImplementedError()
def write_protobuf_as_resource(self, path, message, serialization='json'):
raise NotImplementedError()
def read_protobuf_from_resource(self, resource):
raise NotImplementedError()
| [] | [] | [
"BLOBSTORE_ROOT"
] | [] | ["BLOBSTORE_ROOT"] | python | 1 | 0 | |
common/db.go | package common
import (
"context"
"github.com/Venafi/vcert/pkg/endpoint"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/external"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
"github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute"
"os"
)
var tableName string
const primaryKey = "PolicyID"
type venafiError string
func (e venafiError) Error() string {
return string(e)
}
const PolicyNotFound venafiError = "policy not found"
const PolicyFoundButEmpty venafiError = "policy found but empty"
func init() {
tableName = os.Getenv("DYNAMODB_ZONES_TABLE")
if tableName == "" {
tableName = "VenafiCertPolicy"
}
cfg, err := external.LoadDefaultAWSConfig()
if err != nil {
panic("unable to load SDK config, " + err.Error())
}
db = dynamodb.New(cfg)
}
var db *dynamodb.Client
func GetPolicy(name string) (p endpoint.Policy, err error) {
input := &dynamodb.GetItemInput{
TableName: aws.String(tableName),
Key: map[string]dynamodb.AttributeValue{
primaryKey: {
S: aws.String(name),
},
},
}
result, err := db.GetItemRequest(input).Send(context.Background())
if err != nil {
return
}
if result.Item == nil {
err = PolicyNotFound
return
}
if len(result.Item) == 1 {
err = PolicyFoundButEmpty
return
}
err = dynamodbattribute.UnmarshalMap(result.Item, &p)
if err != nil {
return
}
return
}
func CreateEmptyPolicy(name string) error {
av := make(map[string]dynamodb.AttributeValue)
av[primaryKey] = dynamodb.AttributeValue{S: aws.String(name)}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(tableName),
}
_, err := db.PutItemRequest(input).Send(context.Background())
return err
}
func SavePolicy(name string, p endpoint.Policy) error {
av, err := dynamodbattribute.MarshalMap(p)
if err != nil {
return err
}
av[primaryKey] = dynamodb.AttributeValue{S: aws.String(name)}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(tableName),
}
_, err = db.PutItemRequest(input).Send(context.Background())
return err
}
func GetAllPoliciesNames() (names []string, err error) {
var t = db
result, err := t.ScanRequest(&dynamodb.ScanInput{TableName: &tableName}).Send(context.Background())
if err != nil {
return
}
names = make([]string, 0, len(result.Items))
for _, v := range result.Items {
name := *v[primaryKey].S
names = append(names, name)
}
return
}
func DeletePolicy(name string) error {
input := &dynamodb.DeleteItemInput{
TableName: aws.String(tableName),
Key: map[string]dynamodb.AttributeValue{
primaryKey: {
S: aws.String(name),
},
},
}
_, err := db.DeleteItemRequest(input).Send(context.Background())
if err != nil {
return err
}
return nil
}
| [
"\"DYNAMODB_ZONES_TABLE\""
] | [] | [
"DYNAMODB_ZONES_TABLE"
] | [] | ["DYNAMODB_ZONES_TABLE"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/signal"
"syscall"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
var (
kubeconfig = flag.String("kubeconfig", os.Getenv("KUBECONFIG"), "path to the kube-config")
master = flag.String("master", "", "Master if not using the default master")
keyPath = flag.String("key-path", "wg.key", "path to the private key")
cfgPath = flag.String("config", "/etc/wireguard/kwg.cfg", "WireGuard configuration to write")
nodeName = flag.String("node-name", func() string {
s, _ := os.Hostname()
return s
}(), "node name")
ifName = "kwg"
k *kubernetes.Clientset
ctx, cancel = context.WithCancel(context.Background())
stopCh = make(chan struct{}, 1)
)
func main() {
flag.Parse()
log.Print("knet-wg starting")
err := connect()
if err != nil {
log.Fatal(err)
}
var key wgtypes.Key
{ // ensure we have a key
keyData, err := ioutil.ReadFile(*keyPath)
if err == nil {
key, err = wgtypes.ParseKey(string(keyData))
if err != nil {
log.Fatal(err)
}
} else if os.IsNotExist(err) {
key, err = wgtypes.GeneratePrivateKey()
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(*keyPath, []byte(key.String()), 0600)
if err != nil {
log.Fatal(err)
}
} else {
log.Fatal(err)
}
}
// ------------------------------------------------------------------------
node, err := k.CoreV1().Nodes().Get(ctx, *nodeName, metav1.GetOptions{})
if err != nil {
log.Fatal("failed to get node ", *nodeName, ": ", err)
}
{ // ensure the node has published its key
if node.Annotations[pubkeyAnnotation] != key.PublicKey().String() {
log.Print("setting our pubkey annotation to node")
node.Annotations[pubkeyAnnotation] = key.PublicKey().String()
_, err = k.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
if err != nil {
log.Fatal(err)
}
}
}
// ------------------------------------------------------------------------
iface, _ := net.InterfaceByName(ifName)
{ // create the kwg interface
// err is not usable here, only an internal value is set in OpError.Err
if iface == nil {
log.Print("creating interface ", ifName)
_ = run("ip", "link", "add", ifName, "type", "wireguard")
iface, _ = net.InterfaceByName(ifName)
}
err = run("ip", "link", "set", ifName, "up")
if err != nil {
log.Fatal("failed to set link up: ", err)
}
syncNodeIPs(node.Spec.PodCIDRs, iface)
}
// ------------------------------------------------------------------------
writeCNIConfig(node, iface)
// ------------------------------------------------------------------------
factory := informers.NewSharedInformerFactory(k, time.Second*30)
factory.Start(stopCh)
coreFactory := factory.Core().V1()
nodesInformer := coreFactory.Nodes().Informer()
nodesInformer.AddEventHandler(&nodeEventHandler{
hs: map[string]uint64{},
nodes: map[string]nodeInfo{},
})
nodesInformer.Run(stopCh)
}
func connect() (err error) {
cfg, err := clientcmd.BuildConfigFromFlags(*master, *kubeconfig)
if err != nil {
err = fmt.Errorf("failed to build Kubernetes config: %w", err)
return
}
c, err := kubernetes.NewForConfig(cfg)
if err != nil {
err = fmt.Errorf("failed to build Kubernetes client: %w", err)
return
}
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
<-c
log.Print("interrupted, cancelling operations...")
cancel()
close(stopCh)
<-c
log.Print("second interrupt, exiting now.")
os.Exit(1)
}()
k = c
return
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
databuilder/sample_snowflake_data_loader_test.py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
"""
This is a example script which demo how to load data into neo4j without using Airflow DAG.
"""
import textwrap
import logging
import os
import sys
import uuid
from elasticsearch.client import Elasticsearch
from pyhocon import ConfigFactory
from databuilder.extractor.neo4j_extractor import Neo4jExtractor
from databuilder.extractor.neo4j_search_data_extractor import Neo4jSearchDataExtractor
from databuilder.extractor.snowflake_metadata_extractor import SnowflakeMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.job.job import DefaultJob
from databuilder.loader.file_system_elasticsearch_json_loader import FSElasticsearchJSONLoader
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.elasticsearch_publisher import ElasticsearchPublisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import NoopTransformer
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
# Disable snowflake logging
logging.getLogger("snowflake.connector.network").disabled = True
#data_base
SNOWFLAKE_DATABASE_KEY = 'LA_HAUS_DEV'
# set env NEO4J_HOST to override localhost
# NEO4J_ENDPOINT = f'bolt://{os.getenv("NEO4J_HOST", "localhost")}:7687'
NEO4J_ENDPOINT = 'bolt://localhost:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = 'test'
IGNORED_SCHEMAS = ['\'DVCORE\'', '\'INFORMATION_SCHEMA\'', '\'STAGE_ORACLE\'']
SUPPORTED_SCHEMAS = ['public']
SUPPORTED_SCHEMA_SQL_IN_CLAUSE = "('{schemas}')".format(schemas="', '".join(SUPPORTED_SCHEMAS))
es = Elasticsearch([
{'host': 'localhost'},
])
# todo: connection string needs to change
def connection_string():
# Refer this doc: https://docs.snowflake.com/en/user-guide/sqlalchemy.html#connection-parameters
# for supported connection parameters and configurations
user = os.environ['SF_DB_USER']
password = os.environ['SF_DB_PASSWORD']
account = os.environ['SF_DB_ACCOUNT']
# specify a warehouse to connect to.
warehouse = os.environ['SF_DB_WAREHOUSE']
role = os.environ['SF_DB_ROLE']
return f'snowflake://{user}:{password}@{account}/{SNOWFLAKE_DATABASE_KEY}?warehouse={warehouse}&role={role}'
def create_sample_snowflake_job():
# where_clause = f"WHERE c.TABLE_SCHEMA not in ({','.join(IGNORED_SCHEMAS)}) \
# AND c.TABLE_SCHEMA not like 'STAGE_%' \
# AND c.TABLE_SCHEMA not like 'HIST_%' \
# AND c.TABLE_SCHEMA not like 'SNAP_%' \
# AND lower(c.COLUMN_NAME) not like 'dw_%';"
where_clause_suffix = textwrap.dedent("""
WHERE c.TABLE_SCHEMA IN {schemas}
AND lower(c.COLUMN_NAME) not like 'dw_%';
""").format(schemas=SUPPORTED_SCHEMA_SQL_IN_CLAUSE)
print(where_clause_suffix)
tmp_folder = '/var/tmp/amundsen/tables'
node_files_folder = f'{tmp_folder}/nodes'
relationship_files_folder = f'{tmp_folder}/relationships'
sql_extractor = SnowflakeMetadataExtractor()
csv_loader = FsNeo4jCSVLoader()
task = DefaultTask(extractor=sql_extractor,
loader=csv_loader)
job_config = ConfigFactory.from_dict({
f'extractor.snowflake.extractor.sqlalchemy.{SQLAlchemyExtractor.CONN_STRING}': connection_string(),
f'extractor.snowflake.{SnowflakeMetadataExtractor.SNOWFLAKE_DATABASE_KEY}': SNOWFLAKE_DATABASE_KEY,
f'extractor.snowflake.{SnowflakeMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY}': where_clause_suffix,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.NODE_DIR_PATH}': node_files_folder,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.RELATION_DIR_PATH}': relationship_files_folder,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR}': True,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.FORCE_CREATE_DIR}': True,
f'publisher.neo4j.{neo4j_csv_publisher.NODE_FILES_DIR}': node_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.RELATION_FILES_DIR}': relationship_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_END_POINT_KEY}': neo4j_endpoint,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_USER}': neo4j_user,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_PASSWORD}': neo4j_password,
f'publisher.neo4j.{neo4j_csv_publisher.JOB_PUBLISH_TAG}': 'unique_tag'
})
job = DefaultJob(conf=job_config,
task=task,
publisher=Neo4jCsvPublisher())
return job.launch()
def create_es_publisher_sample_job(elasticsearch_index_alias='table_search_index',
elasticsearch_doc_type_key='table',
model_name='databuilder.models.table_elasticsearch_document.TableESDocument',
cypher_query=None,
elasticsearch_mapping=None):
"""
:param elasticsearch_index_alias: alias for Elasticsearch used in
amundsensearchlibrary/search_service/config.py as an index
:param elasticsearch_doc_type_key: name the ElasticSearch index is prepended with. Defaults to `table` resulting in
`table_search_index`
:param model_name: the Databuilder model class used in transporting between Extractor and Loader
:param cypher_query: Query handed to the `Neo4jSearchDataExtractor` class, if None is given (default)
it uses the `Table` query baked into the Extractor
:param elasticsearch_mapping: Elasticsearch field mapping "DDL" handed to the `ElasticsearchPublisher` class,
if None is given (default) it uses the `Table` query baked into the Publisher
"""
# loader saves data to this location and publisher reads it from here
extracted_search_data_path = '/var/tmp/amundsen/search_data.json'
task = DefaultTask(loader=FSElasticsearchJSONLoader(),
extractor=Neo4jSearchDataExtractor(),
transformer=NoopTransformer())
# elastic search client instance
elasticsearch_client = es
# unique name of new index in Elasticsearch
elasticsearch_new_index_key = 'tables' + str(uuid.uuid4())
job_config = ConfigFactory.from_dict({
f'extractor.search_data.extractor.neo4j.{Neo4jExtractor.GRAPH_URL_CONFIG_KEY}': neo4j_endpoint,
f'extractor.search_data.extractor.neo4j.{Neo4jExtractor.MODEL_CLASS_CONFIG_KEY}': model_name,
f'extractor.search_data.extractor.neo4j.{Neo4jExtractor.NEO4J_AUTH_USER}': neo4j_user,
f'extractor.search_data.extractor.neo4j.{Neo4jExtractor.NEO4J_AUTH_PW}': neo4j_password,
f'loader.filesystem.elasticsearch.{FSElasticsearchJSONLoader.FILE_PATH_CONFIG_KEY}': extracted_search_data_path,
f'loader.filesystem.elasticsearch.{FSElasticsearchJSONLoader.FILE_MODE_CONFIG_KEY}': 'w',
f'publisher.elasticsearch.{ElasticsearchPublisher.FILE_PATH_CONFIG_KEY}': extracted_search_data_path,
f'publisher.elasticsearch.{ElasticsearchPublisher.FILE_MODE_CONFIG_KEY}': 'r',
f'publisher.elasticsearch.{ElasticsearchPublisher.ELASTICSEARCH_CLIENT_CONFIG_KEY}':
elasticsearch_client,
f'publisher.elasticsearch.{ElasticsearchPublisher.ELASTICSEARCH_NEW_INDEX_CONFIG_KEY}':
elasticsearch_new_index_key,
f'publisher.elasticsearch.{ElasticsearchPublisher.ELASTICSEARCH_DOC_TYPE_CONFIG_KEY}':
elasticsearch_doc_type_key,
f'publisher.elasticsearch.{ElasticsearchPublisher.ELASTICSEARCH_ALIAS_CONFIG_KEY}':
elasticsearch_index_alias,
})
# only optionally add these keys, so need to dynamically `put` them
if cypher_query:
job_config.put(f'extractor.search_data.{Neo4jSearchDataExtractor.CYPHER_QUERY_CONFIG_KEY}',
cypher_query)
if elasticsearch_mapping:
job_config.put(f'publisher.elasticsearch.{ElasticsearchPublisher.ELASTICSEARCH_MAPPING_CONFIG_KEY}',
elasticsearch_mapping)
job = DefaultJob(conf=job_config,
task=task,
publisher=ElasticsearchPublisher())
return job.launch()
if __name__ == "__main__":
create_sample_snowflake_job()
create_es_publisher_sample_job() | [] | [] | [
"SF_DB_PASSWORD",
"SF_DB_ACCOUNT",
"SF_DB_USER",
"SF_DB_WAREHOUSE",
"SF_DB_ROLE",
"NEO4J_HOST"
] | [] | ["SF_DB_PASSWORD", "SF_DB_ACCOUNT", "SF_DB_USER", "SF_DB_WAREHOUSE", "SF_DB_ROLE", "NEO4J_HOST"] | python | 6 | 0 | |
dnwcamp/database.go | package dnwcamp
// Copyright 2014 - Tom Lennon. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
//
// This package manages database connections for DNW Camps
//
// Original created May 2014
import (
"fmt"
"labix.org/v2/mgo"
"os"
)
const (
// MONGODBSERVER = "mongodb://dnwUser:[email protected]:51459/campmaster" // Database URL
DATABASE = "campmaster" // Database name containing the app registration collection
COL_CAMPS = "Camps" // Name of camp configuration collection
COL_REG = "Registrations" // All camp registrations
)
func BuildDBSession() (*mgo.Session, error) {
mongoURI := os.Getenv("MONGOURI")
if mongoURI == "" {
fmt.Println("Database connection string was not found in the environment - Quitting")
os.Exit(12)
}
session, err := mgo.Dial(mongoURI)
if err != nil {
fmt.Println("Dialing the database presented the following error..", err)
os.Exit(10)
}
session.SetMode(mgo.Monotonic, true)
return session, nil
}
func ConnectCollection(s *mgo.Session, db string, collection string) (*mgo.Collection) {
c := s.DB(db).C(collection)
return c
}
func CloseConnection(s *mgo.Session) {
defer s.Close()
}
func OpenCampCollection() (*mgo.Session, *mgo.Collection, error) {
sess, err := BuildDBSession()
if err != nil {
return nil, nil, err
}
col := ConnectCollection(sess, DATABASE, COL_CAMPS)
// defer sess.Close()
return sess, col, err
}
func DeleteDNWTennisDB() error {
// TODO - Check for database before dropping
sess, err := BuildDBSession()
if err == nil {
err = sess.DB(DATABASE).DropDatabase()
}
sess.Close()
return err
}
func OpenRegistrationCollection() (*mgo.Session, *mgo.Collection, error) {
sess, err := BuildDBSession()
if err != nil {
return nil, nil, err
}
col := ConnectCollection(sess, DATABASE, COL_REG)
// defer sess.Close()
return sess, col, err
}
func DeleteRegistrationCollection() error {
sess, col, err := OpenRegistrationCollection()
if err == nil {
// TODO Add some code here to check that the collection exists prior to dropping
err = col.DropCollection()
}
sess.Close()
return err
} | [
"\"MONGOURI\""
] | [] | [
"MONGOURI"
] | [] | ["MONGOURI"] | go | 1 | 0 | |
src/core/main.go | // Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/gob"
"fmt"
"net/url"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
_ "github.com/astaxie/beego/session/redis_sentinel"
"github.com/goharbor/harbor/src/common/dao"
common_http "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
_ "github.com/goharbor/harbor/src/controller/event/handler"
"github.com/goharbor/harbor/src/core/api"
_ "github.com/goharbor/harbor/src/core/auth/authproxy"
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
_ "github.com/goharbor/harbor/src/core/auth/oidc"
_ "github.com/goharbor/harbor/src/core/auth/uaa"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares"
"github.com/goharbor/harbor/src/core/service/token"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/migration"
"github.com/goharbor/harbor/src/pkg/notification"
_ "github.com/goharbor/harbor/src/pkg/notifier/topic"
"github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
"github.com/goharbor/harbor/src/pkg/version"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/server"
)
const (
adminUserID = 1
)
func updateInitPassword(userID int, password string) error {
queryUser := models.User{UserID: userID}
user, err := dao.GetUser(queryUser)
if err != nil {
return fmt.Errorf("Failed to get user, userID: %d %v", userID, err)
}
if user == nil {
return fmt.Errorf("user id: %d does not exist", userID)
}
if user.Salt == "" {
salt := utils.GenerateRandomString()
user.Salt = salt
user.Password = password
err = dao.ChangeUserPassword(*user)
if err != nil {
return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err)
}
log.Infof("User id: %d updated its encrypted password successfully.", userID)
} else {
log.Infof("User id: %d already has its encrypted password.", userID)
}
return nil
}
func gracefulShutdown(closing, done chan struct{}) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
log.Infof("capture system signal %s, to close \"closing\" channel", <-signals)
close(closing)
select {
case <-done:
log.Infof("Goroutines exited normally")
case <-time.After(time.Second * 3):
log.Infof("Timeout waiting goroutines to exit")
}
os.Exit(0)
}
func main() {
beego.BConfig.WebConfig.Session.SessionOn = true
beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName
redisURL := os.Getenv("_REDIS_URL_CORE")
if len(redisURL) > 0 {
u, err := url.Parse(redisURL)
if err != nil {
panic("bad _REDIS_URL:" + redisURL)
}
gob.Register(models.User{})
if u.Scheme == "redis+sentinel" {
ps := strings.Split(u.Path, "/")
if len(ps) < 2 {
panic("bad redis sentinel url: no master name")
}
ss := make([]string, 5)
ss[0] = strings.Join(strings.Split(u.Host, ","), ";") // host
ss[1] = "100" // pool
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
ss[2] = password
}
}
if len(ps) > 2 {
db, err := strconv.Atoi(ps[2])
if err != nil {
panic("bad redis sentinel url: bad db")
}
if db != 0 {
ss[3] = ps[2]
}
}
ss[4] = ps[1] // monitor name
beego.BConfig.WebConfig.Session.SessionProvider = "redis_sentinel"
beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",")
} else {
ss := make([]string, 5)
ss[0] = u.Host // host
ss[1] = "100" // pool
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
ss[2] = password
}
}
if len(u.Path) > 1 {
if _, err := strconv.Atoi(u.Path[1:]); err != nil {
panic("bad redis url: bad db")
}
ss[3] = u.Path[1:]
}
ss[4] = u.Query().Get("idle_timeout_seconds")
beego.BConfig.WebConfig.Session.SessionProvider = "redis"
beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",")
}
}
beego.AddTemplateExt("htm")
log.Info("initializing configurations...")
config.Init()
log.Info("configurations initialization completed")
token.InitCreators()
database, err := config.Database()
if err != nil {
log.Fatalf("failed to get database configuration: %v", err)
}
if err := dao.InitDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)
}
if err = migration.Migrate(database); err != nil {
log.Fatalf("failed to migrate: %v", err)
}
if err := config.Load(); err != nil {
log.Fatalf("failed to load config: %v", err)
}
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initial password: %v", err)
}
if err := updateInitPassword(adminUserID, password); err != nil {
log.Error(err)
}
// Init API handler
if err := api.Init(); err != nil {
log.Fatalf("Failed to initialize API handlers with error: %s", err.Error())
}
registerScanners()
closing := make(chan struct{})
done := make(chan struct{})
go gracefulShutdown(closing, done)
if err := replication.Init(closing, done); err != nil {
log.Fatalf("failed to init for replication: %v", err)
}
log.Info("initializing notification...")
notification.Init()
server.RegisterRoutes()
if common_http.InternalTLSEnabled() {
log.Info("internal TLS enabled, Init TLS ...")
iTLSKeyPath := os.Getenv("INTERNAL_TLS_KEY_PATH")
iTLSCertPath := os.Getenv("INTERNAL_TLS_CERT_PATH")
log.Infof("load client key: %s client cert: %s", iTLSKeyPath, iTLSCertPath)
beego.BConfig.Listen.EnableHTTP = false
beego.BConfig.Listen.EnableHTTPS = true
beego.BConfig.Listen.HTTPSPort = 8443
beego.BConfig.Listen.HTTPSKeyFile = iTLSKeyPath
beego.BConfig.Listen.HTTPSCertFile = iTLSCertPath
beego.BeeApp.Server.TLSConfig = common_http.NewServerTLSConfig()
}
log.Infof("Version: %s, Git commit: %s", version.ReleaseVersion, version.GitCommit)
beego.RunWithMiddleWares("", middlewares.MiddleWares()...)
}
const (
clairScanner = "Clair"
trivyScanner = "Trivy"
)
func registerScanners() {
wantedScanners := make([]scanner.Registration, 0)
uninstallScannerNames := make([]string, 0)
if config.WithTrivy() {
log.Info("Registering Trivy scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: trivyScanner,
Description: "The Trivy scanner adapter",
URL: config.TrivyAdapterURL(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Trivy scanner")
uninstallScannerNames = append(uninstallScannerNames, trivyScanner)
}
if config.WithClair() {
log.Info("Registering Clair scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: clairScanner,
Description: "The Clair scanner adapter",
URL: config.ClairAdapterEndpoint(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Clair scanner")
uninstallScannerNames = append(uninstallScannerNames, clairScanner)
}
if err := scan.RemoveImmutableScanners(uninstallScannerNames); err != nil {
log.Warningf("failed to remove scanners: %v", err)
}
if err := scan.EnsureScanners(wantedScanners); err != nil {
log.Fatalf("failed to register scanners: %v", err)
}
if defaultScannerName := getDefaultScannerName(); defaultScannerName != "" {
log.Infof("Setting %s as default scanner", defaultScannerName)
if err := scan.EnsureDefaultScanner(defaultScannerName); err != nil {
log.Fatalf("failed to set default scanner: %v", err)
}
}
}
func getDefaultScannerName() string {
if config.WithTrivy() {
return trivyScanner
}
if config.WithClair() {
return clairScanner
}
return ""
}
| [
"\"_REDIS_URL_CORE\"",
"\"INTERNAL_TLS_KEY_PATH\"",
"\"INTERNAL_TLS_CERT_PATH\""
] | [] | [
"INTERNAL_TLS_CERT_PATH",
"INTERNAL_TLS_KEY_PATH",
"_REDIS_URL_CORE"
] | [] | ["INTERNAL_TLS_CERT_PATH", "INTERNAL_TLS_KEY_PATH", "_REDIS_URL_CORE"] | go | 3 | 0 | |
auth.go | package main
import (
"context"
"errors"
"os"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
func githubClient() (*github.Client, error) {
token := os.Getenv("GITHUB_AUTH_TOKEN")
if token == "" {
return nil, errors.New("authorization token not set")
}
ctx := context.Background()
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(ctx, ts)
return github.NewClient(tc), nil
}
| [
"\"GITHUB_AUTH_TOKEN\""
] | [] | [
"GITHUB_AUTH_TOKEN"
] | [] | ["GITHUB_AUTH_TOKEN"] | go | 1 | 0 | |
CNN_Model/run_3d_cnn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import cv2
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(allow_soft_placement=True, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
import pickle
import os
import cv2
import numpy as np
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Activation, Dense, Dropout, MaxPooling3D, Conv3D
from keras import optimizers
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers.noise import AlphaDropout
from keras import callbacks
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from data_utils.data_processor import load_dataset
from model.models import build_3d_cnn
from model_test_utils.metrics import mean_absolute_relative_error
from model_test_utils.metrics import coefficient_of_determination
from keras.layers.advanced_activations import ELU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
def main(*args, **kwargs):
if kwargs['n_jump'] == 0:
kwargs['n_jump'] = kwargs['n_stacked']
saved_file_name = './keras_3dcnn_{}stacked_{}jumps_{}depth.hdf5'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
data_path = os.path.join(
os.path.dirname(os.path.abspath(os.path.dirname(__file__))),
'dataset'
)
img_path = os.path.join(kwargs['img_path'])
out_path = os.path.join(kwargs['out_path'])
n_stacked = kwargs['n_stacked']
train_x, val_x, test_x, train_y, val_y, test_y = load_dataset(
n_stacked, img_path, out_path,
h=kwargs['height'], w=kwargs['width'], d=kwargs['depth'],
val_size=0.04, test_size=0.04,
n_jump=kwargs['n_jump']
)
print("number of train images:", train_x.shape)
print("number of validation images:", val_x.shape)
print("number of test images:", test_x.shape)
print("number of train output sets:", train_y.shape)
print("number of validation output sets:", val_y.shape)
print("number of test output sets:", test_y.shape)
with tf.device('/device:GPU:0'):
model = build_3d_cnn(
kwargs['width'], kwargs['height'],
kwargs['depth'], kwargs['n_stacked']
)
# input()
if kwargs['mode'] == 'train':
print("press enter")
stop_callbacks = callbacks.EarlyStopping(
monitor='val_loss', patience=30, verbose=0, mode='min', min_delta=0
)
checkpoint = callbacks.ModelCheckpoint(
saved_file_name, monitor='val_loss',
verbose=1, save_best_only=True, mode='min'
)
history = model.fit(
train_x, train_y,
batch_size=kwargs['batch_size'], epochs=kwargs['epochs'],
callbacks=[stop_callbacks,checkpoint],
validation_data=(val_x, val_y), shuffle=True
)
# test always
print("Start test....")
model.load_weights(saved_file_name)
model_y_val = model.predict(val_x, batch_size=None, verbose=0)
model_y = model.predict(test_x, batch_size=None, verbose=0)
# train result
if kwargs['mode'] == 'train':
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# val result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(val_y[:,i], model_y_val[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(val_y[:,i], model_y_val[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
csvdata = pd.DataFrame(val_y, columns=attrs)
csvdata['model_steering'] = model_y_val[:,0]
csvdata['model_throttle'] = model_y_val[:,1]
result_file_name = './result_val_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('val result saved')
# test result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(test_y[:,i], model_y[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(test_y[:,i], model_y[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
print("maximum test accuracy was " + str(max(test_y[:,i])))
csvdata = pd.DataFrame(test_y, columns=attrs)
csvdata['model_steering'] = model_y[:,0]
csvdata['model_throttle'] = model_y[:,1]
result_file_name = './result_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('test result saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"mode", help="train/test",
type=str, choices=["train", "test"]
)
parser.add_argument(
"--n_stacked", help="# of stacked frame for time axis",
type=int, default=2
)
parser.add_argument(
"--n_jump", help="time interval to get input, 0 for n_jump=n_stacked",
type=int, default=1
)
parser.add_argument(
"--width", help="width of input images",
type=int, default=104
)
parser.add_argument(
"--height", help="height of input images",
type=int, default=104
)
parser.add_argument(
"--depth", help="the number of channels of input images",
type=int, default=3
)
parser.add_argument(
"--img_path", help="image directory",
type=str, default='/home/jesse/Desktop/imagefiles/image_set'
)
parser.add_argument(
"--out_path", help="target csv filename",
type=str, default='/home/jesse/Desktop/training_dataset.csv'
)
parser.add_argument(
"--epochs", help="total number of training epochs",
type=int, default=50000
)
parser.add_argument(
"--batch_size", help="batch_size",
type=int, default=32
)
args = parser.parse_args()
main(**vars(args))
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
test/e2e/tests/e2e_test.go | package e2e_test
import (
"context"
"os"
"sync"
"testing"
"github.com/stretchr/testify/require"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
rpctypes "github.com/tendermint/tendermint/rpc/core/types"
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
"github.com/tendermint/tendermint/types"
)
func init() {
// This can be used to manually specify a testnet manifest and/or node to
// run tests against. The testnet must have been started by the runner first.
// os.Setenv("E2E_MANIFEST", "networks/ci.toml")
// os.Setenv("E2E_NODE", "validator01")
}
var (
ctx = context.Background()
testnetCache = map[string]e2e.Testnet{}
testnetCacheMtx = sync.Mutex{}
blocksCache = map[string][]*types.Block{}
blocksCacheMtx = sync.Mutex{}
)
// testNode runs tests for testnet nodes. The callback function is
// given a single stateful node to test, running as a subtest in
// parallel with other subtests.
//
// The testnet manifest must be given as the envvar E2E_MANIFEST. If not set,
// these tests are skipped so that they're not picked up during normal unit
// test runs. If E2E_NODE is also set, only the specified node is tested,
// otherwise all nodes are tested.
func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) {
t.Helper()
testnet := loadTestnet(t)
nodes := testnet.Nodes
if name := os.Getenv("E2E_NODE"); name != "" {
node := testnet.LookupNode(name)
require.NotNil(t, node, "node %q not found in testnet %q", name, testnet.Name)
nodes = []*e2e.Node{node}
}
for _, node := range nodes {
node := *node
if node.Stateless() {
continue
}
t.Run(node.Name, func(t *testing.T) {
t.Parallel()
testFunc(t, node)
})
}
}
// loadTestnet loads the testnet based on the E2E_MANIFEST envvar.
func loadTestnet(t *testing.T) e2e.Testnet {
t.Helper()
manifest := os.Getenv("E2E_MANIFEST")
if manifest == "" {
t.Skip("E2E_MANIFEST not set, not an end-to-end test run")
}
testnetCacheMtx.Lock()
defer testnetCacheMtx.Unlock()
if testnet, ok := testnetCache[manifest]; ok {
return testnet
}
testnet, err := e2e.LoadTestnet(manifest)
require.NoError(t, err)
testnetCache[manifest] = *testnet
return *testnet
}
// fetchBlockChain fetches a complete, up-to-date block history from
// the freshest testnet archive node.
func fetchBlockChain(t *testing.T) []*types.Block {
t.Helper()
testnet := loadTestnet(t)
// Find the freshest archive node
var (
client *rpchttp.HTTP
status *rpctypes.ResultStatus
)
for _, node := range testnet.ArchiveNodes() {
c, err := node.Client()
require.NoError(t, err)
s, err := c.Status(ctx)
require.NoError(t, err)
if status == nil || s.SyncInfo.LatestBlockHeight > status.SyncInfo.LatestBlockHeight {
client = c
status = s
}
}
require.NotNil(t, client, "couldn't find an archive node")
// Fetch blocks. Look for existing block history in the block cache, and
// extend it with any new blocks that have been produced.
blocksCacheMtx.Lock()
defer blocksCacheMtx.Unlock()
from := status.SyncInfo.EarliestBlockHeight
to := status.SyncInfo.LatestBlockHeight
blocks, ok := blocksCache[testnet.Name]
if !ok {
blocks = make([]*types.Block, 0, to-from+1)
}
if len(blocks) > 0 {
from = blocks[len(blocks)-1].Height + 1
}
for h := from; h <= to; h++ {
resp, err := client.Block(ctx, &(h))
require.NoError(t, err)
require.NotNil(t, resp.Block)
require.Equal(t, h, resp.Block.Height, "unexpected block height %v", resp.Block.Height)
blocks = append(blocks, resp.Block)
}
require.NotEmpty(t, blocks, "blockchain does not contain any blocks")
blocksCache[testnet.Name] = blocks
return blocks
}
| [
"\"E2E_NODE\"",
"\"E2E_MANIFEST\""
] | [] | [
"E2E_NODE",
"E2E_MANIFEST"
] | [] | ["E2E_NODE", "E2E_MANIFEST"] | go | 2 | 0 | |
cmd/ledgerfsck/main.go | package main
import (
"bytes"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/metadata"
"github.com/hyperledger/fabric/common/policies"
"github.com/hyperledger/fabric/core/chaincode/lifecycle"
"github.com/hyperledger/fabric/core/chaincode/persistence"
"github.com/hyperledger/fabric/core/chaincode/platforms"
"github.com/hyperledger/fabric/core/common/privdata"
coreconfig "github.com/hyperledger/fabric/core/config"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/ledgermgmt"
"github.com/hyperledger/fabric/core/operations"
"github.com/hyperledger/fabric/core/peer"
"github.com/hyperledger/fabric/core/scc/lscc"
gossipCommon "github.com/hyperledger/fabric/gossip/common"
"github.com/hyperledger/fabric/internal/peer/common"
"github.com/hyperledger/fabric/internal/peer/gossip"
"github.com/hyperledger/fabric/msp"
"github.com/hyperledger/fabric/msp/mgmt"
pb "github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protoutil"
"github.com/pkg/errors"
"github.com/spf13/viper"
)
var logger = flogging.MustGetLogger("ledgerfsck")
type ledgerFsck struct {
channelName string
mspConfigPath string
mspID string
mspType string
ledger ledger.PeerLedger
bundle *channelconfig.Bundle
}
func (fsck *ledgerFsck) Manager(channelID string) (policies.Manager, bool) {
return fsck.bundle.PolicyManager(), true
}
// Initialize
func (fsck *ledgerFsck) Initialize() error {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
if loggingSpec == "" {
loggingSpec = "ledgerfsck=debug:fatal"
}
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
if loggingFormat == "" {
loggingFormat = "%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}"
}
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stdout,
LogSpec: loggingSpec,
})
// Initialize viper configuration
viper.SetEnvPrefix("core")
viper.AutomaticEnv()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
err := common.InitConfig("core")
if err != nil {
logger.Errorf("failed to initialize configuration, because of %s", err)
return err
}
return nil
}
// ReadConfiguration read configuration parameters
func (fsck *ledgerFsck) ReadConfiguration() error {
// Read configuration parameters
flag.StringVar(&fsck.channelName, "channelName", "testChannel", "channel name to check the integrity")
flag.StringVar(&fsck.mspConfigPath, "mspPath", "", "path to the msp folder")
flag.StringVar(&fsck.mspID, "mspID", "", "the MSP identity of the organization")
flag.StringVar(&fsck.mspType, "mspType", "bccsp", "the type of the MSP provider, default bccsp")
flag.Parse()
if fsck.mspConfigPath == "" {
errMsg := "MSP folder not configured"
logger.Error(errMsg)
return errors.New(errMsg)
}
if fsck.mspID == "" {
errMsg := "MSPID was not provided"
logger.Error(errMsg)
return errors.New(errMsg)
}
logger.Debugf("channel name = %s", fsck.channelName)
logger.Debugf("MSP folder path = %s", fsck.mspConfigPath)
logger.Debugf("MSPID = %s", fsck.mspID)
logger.Debugf("MSP type = %s", fsck.mspType)
return nil
}
// InitCrypto
func (fsck *ledgerFsck) InitCrypto() error {
// Next need to init MSP
err := common.InitCrypto(fsck.mspConfigPath, fsck.mspID, fsck.mspType)
if err != nil {
logger.Errorf("failed to initialize MSP related configuration, failure %s", err)
return err
}
return nil
}
func createSelfSignedData() protoutil.SignedData {
sId := mgmt.GetLocalSigningIdentityOrPanic()
msg := make([]byte, 32)
sig, err := sId.Sign(msg)
if err != nil {
logger.Panicf("Failed creating self signed data because message signing failed: %v", err)
}
peerIdentity, err := sId.Serialize()
if err != nil {
logger.Panicf("Failed creating self signed data because peer identity couldn't be serialized: %v", err)
}
return protoutil.SignedData{
Data: msg,
Signature: sig,
Identity: peerIdentity,
}
}
func newOperationsSystem() *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("peer.operations"),
ListenAddress: viper.GetString("operations.listenAddress"),
Metrics: operations.MetricsOptions{
Provider: viper.GetString("metrics.provider"),
Statsd: &operations.Statsd{
Network: viper.GetString("metrics.statsd.network"),
Address: viper.GetString("metrics.statsd.address"),
WriteInterval: viper.GetDuration("metrics.statsd.writeInterval"),
Prefix: viper.GetString("metrics.statsd.prefix"),
},
},
TLS: operations.TLS{
Enabled: viper.GetBool("operations.tls.enabled"),
CertFile: viper.GetString("operations.tls.cert.file"),
KeyFile: viper.GetString("operations.tls.key.file"),
ClientCertRequired: viper.GetBool("operations.tls.clientAuthRequired"),
ClientCACertFiles: viper.GetStringSlice("operations.tls.clientRootCAs.files"),
},
Version: metadata.Version,
})
}
// OpenLedger
func (fsck *ledgerFsck) OpenLedger() error {
chaincodeInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "chaincodes")
ccPackageParser := &persistence.ChaincodePackageParser{}
ccStore := &persistence.Store{
Path: chaincodeInstallPath,
ReadWriter: &persistence.FilesystemIO{},
}
lifecycleResources := &lifecycle.Resources{
Serializer: &lifecycle.Serializer{},
ChannelConfigSource: peer.Default,
ChaincodeStore: ccStore,
PackageParser: ccPackageParser,
}
lifecycleValidatorCommitter := &lifecycle.ValidatorCommitter{
Resources: lifecycleResources,
LegacyDeployedCCInfoProvider: &lscc.DeployedCCInfoProvider{},
}
mspID := viper.GetString("peer.localMspId")
lifecycleCache := lifecycle.NewCache(lifecycleResources, mspID)
identityDeserializerFactory := func(chainID string) msp.IdentityDeserializer {
return mgmt.GetManagerForChain(chainID)
}
membershipInfoProvider := privdata.NewMembershipInfoProvider(createSelfSignedData(), identityDeserializerFactory)
opsSystem := newOperationsSystem()
err := opsSystem.Start()
if err != nil {
return errors.WithMessage(err, "failed to initialize operations subystems")
}
defer opsSystem.Stop()
metricsProvider := opsSystem.Provider
// Initialize ledger management
pr := platforms.NewRegistry(platforms.SupportedPlatforms...)
ledgermgmt.Initialize(&ledgermgmt.Initializer{
CustomTxProcessors: peer.ConfigTxProcessors,
PlatformRegistry: pr,
DeployedChaincodeInfoProvider: lifecycleValidatorCommitter,
MembershipInfoProvider: membershipInfoProvider,
MetricsProvider: metricsProvider,
StateListeners: []ledger.StateListener{lifecycleCache},
})
ledgerIds, err := ledgermgmt.GetLedgerIDs()
if err != nil {
errMsg := fmt.Sprintf("failed to read ledger, because of %s", err)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
// Check whenever channel name has corresponding ledger
var found = false
for _, name := range ledgerIds {
if name == fsck.channelName {
found = true
}
}
if !found {
errMsg := fmt.Sprintf("there is no ledger corresponding to the provided channel name %s. Exiting...", fsck.channelName)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
if fsck.ledger, err = ledgermgmt.OpenLedger(fsck.channelName); err != nil {
errMsg := fmt.Sprintf("failed to open ledger %s, because of the %s", fsck.channelName, err)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
return nil
}
// GetLatestChannelConfigBundle
func (fsck *ledgerFsck) GetLatestChannelConfigBundle() error {
var cb *pb.Block
var err error
if cb, err = getCurrConfigBlockFromLedger(fsck.ledger); err != nil {
logger.Warningf("Failed to find config block on ledger %s(%s)", fsck.channelName, err)
return err
}
qe, err := fsck.ledger.NewQueryExecutor()
defer qe.Done()
if err != nil {
logger.Errorf("failed to obtain query executor, error is %s", err)
return err
}
logger.Debug("reading configuration from state DB")
confBytes, err := qe.GetState("", "resourcesconfigtx.CHANNEL_CONFIG_KEY")
if err != nil {
logger.Errorf("failed to read channel config, error %s", err)
return err
}
conf := &pb.Config{}
err = proto.Unmarshal(confBytes, conf)
if err != nil {
logger.Errorf("could not read configuration, due to %s", err)
return err
}
if conf != nil {
logger.Debug("initialize channel config bundle")
fsck.bundle, err = channelconfig.NewBundle(fsck.channelName, conf)
if err != nil {
return err
}
} else {
// Config was only stored in the statedb starting with v1.1 binaries
// so if the config is not found there, extract it manually from the config block
logger.Debug("configuration wasn't stored in state DB retrieving config envelope from ledger")
envelopeConfig, err := protoutil.ExtractEnvelope(cb, 0)
if err != nil {
return err
}
logger.Debug("initialize channel config bundle from config transaction")
fsck.bundle, err = channelconfig.NewBundleFromEnvelope(envelopeConfig)
if err != nil {
return err
}
}
capabilitiesSupportedOrPanic(fsck.bundle)
channelconfig.LogSanityChecks(fsck.bundle)
return nil
}
func (fsck *ledgerFsck) Verify() {
blockchainInfo, err := fsck.ledger.GetBlockchainInfo()
if err != nil {
logger.Debugf("could not obtain blockchain information "+
"channel name %s, due to %s", fsck.channelName, err)
logger.Infof("FAIL")
os.Exit(-1)
}
logger.Debugf("ledger height of channel %s, is %d\n", fsck.channelName, blockchainInfo.Height)
signer := mgmt.GetLocalSigningIdentityOrPanic()
mcs := gossip.NewMCS(
fsck,
signer,
mgmt.NewDeserializersManager())
block, err := fsck.ledger.GetBlockByNumber(uint64(0))
if err != nil {
logger.Debugf("failed to read genesis block number, with error", err)
logger.Infof("FAIL")
os.Exit(-1)
}
// Get hash of genesis block
prevHash := protoutil.BlockHeaderHash(block.Header)
// complete full scan and check over ledger blocks
for blockIndex := uint64(1); blockIndex < blockchainInfo.Height; blockIndex++ {
block, err := fsck.ledger.GetBlockByNumber(blockIndex)
if err != nil {
logger.Debugf("failed to read block number %d from ledger, with error", blockIndex, err)
logger.Infof("FAIL")
os.Exit(-1)
}
if !bytes.Equal(prevHash, block.Header.PreviousHash) {
logger.Debugf("block number [%d]: hash comparison has failed, previous block hash %x doesn't"+
" equal to hash claimed within block header %x", blockIndex, prevHash, block.Header.PreviousHash)
logger.Infof("FAIL")
os.Exit(-1)
} else {
logger.Debugf("block number [%d]: previous hash matched", blockIndex)
}
signedBlock, err := proto.Marshal(block)
if err != nil {
logger.Debugf("failed marshaling block, due to", err)
logger.Infof("FAIL")
os.Exit(-1)
}
if err := mcs.VerifyBlock(gossipCommon.ChainID(fsck.channelName), block.Header.Number, signedBlock); err != nil {
logger.Debugf("failed to verify block with sequence number %d. %s", blockIndex, err)
logger.Infof("FAIL")
os.Exit(-1)
} else {
logger.Debugf("Block [seq = %d], hash = [%x], previous hash = [%x], VERIFICATION PASSED",
blockIndex, protoutil.BlockHeaderHash(block.Header), block.Header.PreviousHash)
}
prevHash = protoutil.BlockHeaderHash(block.Header)
}
logger.Infof("PASS")
}
func main() {
fsck := &ledgerFsck{}
// Initialize configuration
if err := fsck.Initialize(); err != nil {
os.Exit(-1)
}
// Read configuration parameters
if err := fsck.ReadConfiguration(); err != nil {
os.Exit(-1)
}
// Init crypto & MSP
if err := fsck.InitCrypto(); err != nil {
os.Exit(-1)
}
// OpenLedger
if err := fsck.OpenLedger(); err != nil {
os.Exit(-1)
}
// GetLatestChannelConfigBundle
if err := fsck.GetLatestChannelConfigBundle(); err != nil {
os.Exit(-1)
}
fsck.Verify()
}
// getCurrConfigBlockFromLedger read latest configuratoin block from the ledger
func getCurrConfigBlockFromLedger(ledger ledger.PeerLedger) (*pb.Block, error) {
logger.Debugf("Getting config block")
// get last block. Last block number is Height-1
blockchainInfo, err := ledger.GetBlockchainInfo()
if err != nil {
return nil, err
}
lastBlock, err := ledger.GetBlockByNumber(blockchainInfo.Height - 1)
if err != nil {
return nil, err
}
// get most recent config block location from last block metadata
configBlockIndex, err := protoutil.GetLastConfigIndexFromBlock(lastBlock)
if err != nil {
return nil, err
}
// get most recent config block
configBlock, err := ledger.GetBlockByNumber(configBlockIndex)
if err != nil {
return nil, err
}
logger.Debugf("Got config block[%d]", configBlockIndex)
return configBlock, nil
}
func capabilitiesSupportedOrPanic(res channelconfig.Resources) {
ac, ok := res.ApplicationConfig()
if !ok {
logger.Panicf("[channel %s] does not have application config so is incompatible", res.ConfigtxValidator().ChainID())
}
if err := ac.Capabilities().Supported(); err != nil {
logger.Panicf("[channel %s] incompatible %s", res.ConfigtxValidator(), err)
}
if err := res.ChannelConfig().Capabilities().Supported(); err != nil {
logger.Panicf("[channel %s] incompatible %s", res.ConfigtxValidator(), err)
}
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
] | [] | [
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
] | [] | ["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"] | go | 2 | 0 | |
pygeotools/lib/iolib.py | #! /usr/bin/env python
"""
Functions for IO, mostly wrapped around GDAL
Note: This was all written before RasterIO existed, which might be a better choice.
"""
import os
import subprocess
import numpy as np
from osgeo import gdal, gdal_array, osr
#Define drivers
mem_drv = gdal.GetDriverByName('MEM')
gtif_drv = gdal.GetDriverByName('GTiff')
vrt_drv = gdal.GetDriverByName("VRT")
#Default GDAL creation options
gdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']
#gdal_opt += ['BLOCKXSIZE=1024', 'BLOCKYSIZE=1024']
#List that can be used for building commands
gdal_opt_co = []
[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]
#Add methods to load ma from OpenCV, PIL, etc.
#These formats should be directly readable as np arrays
#Note: want to modify to import all bands as separate arrays in ndarray
#Unless the user requests a single band, or range of bands
#Check for file existence
def fn_check(fn):
"""Wrapper to check for file existence
Parameters
----------
fn : str
Input filename string.
Returns
-------
bool
True if file exists, False otherwise.
"""
return os.path.exists(fn)
def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status
def fn_list_check(fn_list):
status = True
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
status = False
return status
def fn_list_valid(fn_list):
print('%i input fn' % len(fn_list))
out_list = []
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
else:
out_list.append(fn)
print('%i output fn' % len(out_list))
return out_list
#Wrapper around gdal.Open
def fn_getds(fn):
"""Wrapper around gdal.Open()
"""
ds = None
if fn_check(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
else:
print("Unable to find %s" % fn)
return ds
def fn_getma(fn, bnum=1):
"""Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
#Add check for filename existence
ds = fn_getds(fn)
return ds_getma(ds, bnum=bnum)
#Given input dataset, return a masked array for the input band
def ds_getma(ds, bnum=1):
"""Get masked array from input GDAL Dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
b = ds.GetRasterBand(bnum)
return b_getma(b)
#Given input band, return a masked array
def b_getma(b):
"""Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values
"""
b_ndv = get_ndv_b(b)
#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)
#This is more appropriate for float, handles precision issues
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma
def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale
def fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False):
ds = gdal.Open(fn)
return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)
#Load a subsampled array
#Can specify scale factor or max dimension
#No need to load the entire dataset for stats computation
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
"""Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values
"""
#print src_ds.GetFileList()[0]
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
#The buf_size parameters determine the final array dimensions
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out
#Note: need to consolidate with warplib.writeout (takes ds, not ma)
#Add option to build overviews when writing GTiff
#Input proj must be WKT
def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
"""Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options
"""
#If input is not np.ma, this creates a new ma, which has default filL_value of 1E20
#Must manually override with ndv
#Also consumes a lot of memory
#Should bypass if input is bool
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
#Want to preserve fill_value if already specified
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
#Currently only support writing singleband rasters
#if a.ndim > 2:
# np_nbands = a.shape[2]
# if src_ds.RasterCount np_nbands:
# for bnum in np_nbands:
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
#If this is a fn, get a ds
#Note: this saves a lot of unnecessary iolib.fn_getds calls
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
#if isinstance(src_ds, gdal.Dataset):
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
#This is WKT
src_proj = src_ds.GetProjection()
#src_srs = osr.SpatialReference()
#src_srs.ImportFromWkt(src_ds.GetProjectionRef())
#Probably a cleaner way to handle this
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
#Need to create a new copy of the default options
opt = list(gdal_opt)
#Note: packbits is better for sparse data
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
#Not sure if VW can handle sparse tif
#opt.append('SPARSE_OK=TRUE')
#Use predictor=3 for floating point data
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
#If input ma is same as src_ds, write out array using CreateCopy from existing dataset
#if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())):
#Should compare srs.IsSame(src_srs)
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
#Note: third option is strict flag, set to false
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
#Otherwise, use Create
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
#Set ndv to 0
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
#opt.append('NBITS=1')
#Create(fn, nx, ny, nbands, dtype, opt)
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
#Note: Need GeoMA here to make this work, or accept gt as argument
#Could also do ds creation in calling script
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close()
#Move to geolib?
#Look up equivalent GDAL data type
def np2gdal_dtype(d):
"""
Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype
"""
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
#This creates dtype from another built-in type
#d = np.dtype(d)
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
#Write out as Byte
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt
def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype
#Replace nodata value in GDAL band
def replace_ndv(b, new_ndv):
b_ndv = get_ndv_b(b)
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
bma.set_fill_value(new_ndv)
b.WriteArray(bma.filled())
b.SetNoDataValue(new_ndv)
return b
def set_ndv(dst_fn, ndv):
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
for n in range(1, dst_ds.RasterCount+1):
b = dst_ds.GetRasterBand(1)
b.SetNoDataValue(ndv)
dst_ds = None
#Should overload these functions to handle fn, ds, or b
#Perhaps abstract, as many functions will need this functionality
def get_ndv_fn(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
return get_ndv_ds(ds)
#Want to modify to handle multi-band images and return list of ndv
def get_ndv_ds(ds, bnum=1):
b = ds.GetRasterBand(bnum)
return get_ndv_b(b)
#Return nodata value for GDAL band
def get_ndv_b(b):
"""Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value
"""
b_ndv = b.GetNoDataValue()
if b_ndv is None:
#Check ul pixel for ndv
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
#ur = float(b.ReadAsArray(ns-1, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
#ll = float(b.ReadAsArray(0, nl-1, 1, 1))
#Probably better to use 3/4 corner criterion
#if ul == ur == lr == ll:
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
#Assume ndv is 0
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv
#Write out a recarray as a csv
def write_recarray(outfn, ra):
with open(outfn,'w') as f:
f.write(','.join([str(item) for item in ra.dtype.names])+'\n')
for row in ra:
f.write(','.join([str(item) for item in row])+'\n')
#Check to make sure image doesn't contain errors
def image_check(fn):
ds = gdal.Open(fn)
status = True
for i in range(ds.RasterCount):
ds.GetRasterBand(i+1).Checksum()
if gdal.GetLastErrorType() != 0:
status = False
return status
#Return number of CPUs
#Logical is "virtual" cpu count with hyperthreading
#Set to False for physical cpu count
def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu
def setstripe(dir, threads=cpu_count()):
#import socket
#if 'nasa' in socket.getfqdn():
#Better to use 'df -T' to determine filesystem of directory
#Can do this with psutil Python lib, but need to also find mount point of file
if dir is not None:
if 'lustre' in str(subprocess.check_output(['df','-T'])):
if os.path.exists(dir):
if threads is None:
threads = cpu_count()
cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]
print(' '.join(cmd))
subprocess.call(cmd)
#This is a shared directory for files like LULC, used by multiple tools
#Default location is $HOME/data
#Can specify in ~/.bashrc or ~/.profile
#export DATADIR=$HOME/data
def get_datadir():
default_datadir = os.path.join(os.path.expanduser('~'), 'data')
datadir = os.environ.get('DATADIR', default_datadir)
if not os.path.exists(datadir):
os.makedirs(datadir)
return datadir
#Function to get files using urllib
#This works with ftp
def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn
#Function to get files using requests
#Works with https authentication
def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC
def get_auth():
"""Get authorization token for https
"""
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth
def readcsv(fn):
"""
Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling
"""
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts
| [] | [] | [
"DATADIR"
] | [] | ["DATADIR"] | python | 1 | 0 | |
test/binary_test.go | package test
import (
"fmt"
"testing"
)
func Test_binary(t *testing.T) {
for i := 110; i > 40; i-- {
raw := []int64{100, 90, 80, 70, 70, 60, 50, 50}
low, mid := binarySearch(raw, int64(i))
ire := []int64{int64(i)}
if mid == 0 {
newRe := append(ire, raw[low:]...)
raw = append(raw[:low], newRe...)
} else {
newRe := append(ire, raw[mid:]...)
raw = append(raw[:mid], newRe...)
}
println(fmt.Sprintf("new raw:%+v", raw))
}
}
func binarySearch(re []int64, salience int64) (int, int) {
low := 0
high := len(re) - 1
mid := 0
for low <= high {
mid := (low + high) / 2
if re[mid] == salience {
return low, mid
}
if re[mid] < salience {
high = mid - 1
} else {
low = mid + 1
}
}
return low, mid
}
| [] | [] | [] | [] | [] | go | null | null | null |
services/core/main.go | package main
import (
"errors"
"fmt"
"google.golang.org/grpc"
"log"
"net"
"os"
"os/signal"
"runtime/debug"
"syscall"
"telegram_boxes/services/core/app"
"telegram_boxes/services/core/app/admin"
"telegram_boxes/services/core/app/box"
"telegram_boxes/services/core/app/db"
slog "telegram_boxes/services/core/app/log"
"telegram_boxes/services/core/protobuf"
)
func main() {
logger, errLogger := slog.CreateLogger(os.Getenv("LOGS_HOST"), os.Getenv("LOGS_PORT"))
if errLogger != nil {
log.Fatal(errLogger)
return
}
dbConnect, err := db.InitDatabaseConnect(
os.Getenv("MONGO_HOST"), os.Getenv("MONGO_PORT"),
os.Getenv("MONGO_USERNAME"), os.Getenv("MONGO_PASSWORD"),
os.Getenv("MONGO_DATABASE"), os.Getenv("MONGO_MECHANISM"),
)
if err != nil {
_ = logger.System(err.Error())
return
}
defer dbConnect.Close()
adminClient, errAdmin := admin.CreateClient(os.Getenv("ADMIN_HOST"), os.Getenv("ADMIN_PORT"))
if errAdmin != nil {
_ = logger.System(errAdmin.Error())
return
}
//Create new server
s := protobuf.CreateServer(dbConnect, logger, adminClient, box.CreateClients(dbConnect))
//
defer recovery(s.Log())
go waitForShutdown(s)
lis, errCreateConn := net.Listen("tcp", fmt.Sprintf(":%s", os.Getenv("CORE_PORT")))
if errCreateConn != nil {
_ = logger.System(fmt.Sprintf("failed to listen: %v", err))
return
}
GRPCServer := grpc.NewServer(
grpc.UnaryInterceptor(logger.Interceptor),
)
protobuf.RegisterServersServer(GRPCServer, s)
protobuf.RegisterTasksServer(GRPCServer, s)
_ = logger.System(fmt.Sprintf("Protobuf CORE started on :%s", os.Getenv("CORE_PORT")))
err = GRPCServer.Serve(lis)
if err != nil {
_ = logger.System(fmt.Sprintf("failed to serve: %s" + err.Error()))
}
return
}
func waitForShutdown(b protobuf.MainServer) {
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
// Block until we receive our signal.
<-interruptChan
session := b.DB().GetMainSession().Clone()
defer session.Close()
servers , _ := b.DB().Models().Bots().GetAll(session)
for _ , s := range servers {
s.Status = app.StatusFatal.String()
b.DB().Models().Bots().UpdateBot(s,session)
_ = b.Admin().SendError(s.Status, s.UserName, "Выключение ядра")
}
os.Exit(0)
}
//Recovery application out of panic
func recovery(l slog.Client) {
var err error
r := recover()
if r != nil {
switch t := r.(type) {
case string:
err = errors.New(t)
case error:
err = t
default:
err = errors.New("Unknown error ")
}
_ = l.System("RECOVERY :" + err.Error() + "\n" + string(debug.Stack()))
}
}
| [
"\"LOGS_HOST\"",
"\"LOGS_PORT\"",
"\"MONGO_HOST\"",
"\"MONGO_PORT\"",
"\"MONGO_USERNAME\"",
"\"MONGO_PASSWORD\"",
"\"MONGO_DATABASE\"",
"\"MONGO_MECHANISM\"",
"\"ADMIN_HOST\"",
"\"ADMIN_PORT\"",
"\"CORE_PORT\"",
"\"CORE_PORT\""
] | [] | [
"MONGO_HOST",
"MONGO_DATABASE",
"LOGS_HOST",
"MONGO_USERNAME",
"CORE_PORT",
"ADMIN_HOST",
"MONGO_MECHANISM",
"MONGO_PORT",
"LOGS_PORT",
"MONGO_PASSWORD",
"ADMIN_PORT"
] | [] | ["MONGO_HOST", "MONGO_DATABASE", "LOGS_HOST", "MONGO_USERNAME", "CORE_PORT", "ADMIN_HOST", "MONGO_MECHANISM", "MONGO_PORT", "LOGS_PORT", "MONGO_PASSWORD", "ADMIN_PORT"] | go | 11 | 0 | |
tests/upgrade_latest_k8s_test.go | package mos_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/c3os-io/c3os/tests/machine"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func sucYAML(image, version string) string {
return `
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: os-upgrade
namespace: system-upgrade
labels:
k3s-upgrade: server
spec:
concurrency: 1
version: "` + version + `"
nodeSelector:
matchExpressions:
- {key: kubernetes.io/hostname, operator: Exists}
serviceAccountName: system-upgrade
cordon: false
upgrade:
image: "` + image + `"
command:
- "/usr/sbin/suc-upgrade"
`
}
var _ = Describe("k3s upgrade test from k8s", Label("upgrade-latest-with-kubernetes"), func() {
containerImage := os.Getenv("CONTAINER_IMAGE")
BeforeEach(func() {
machine.EventuallyConnects()
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs()
}
})
Context("live cd", func() {
It("has default service active", func() {
if containerImage == "" {
Fail("CONTAINER_IMAGE needs to be set")
}
if os.Getenv("FLAVOR") == "alpine" {
out, _ := machine.Sudo("rc-status")
Expect(out).Should(ContainSubstring("c3os"))
Expect(out).Should(ContainSubstring("c3os-agent"))
} else {
// Eventually(func() string {
// out, _ := machine.SSHCommand("sudo systemctl status c3os-agent")
// return out
// }, 30*time.Second, 10*time.Second).Should(ContainSubstring("no network token"))
out, _ := machine.Sudo("systemctl status c3os")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/c3os.service; enabled; vendor preset: disabled)"))
}
})
})
Context("install", func() {
It("to disk with custom config", func() {
err := machine.SendFile("assets/single.yaml", "/tmp/config.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
out, _ := machine.Sudo("elemental install --cloud-init /tmp/config.yaml /dev/sda")
Expect(out).Should(ContainSubstring("Running after-install hook"))
fmt.Println(out)
machine.Sudo("sync")
machine.DetachCD()
machine.Restart()
})
})
Context("first-boot", func() {
It("has default services on", func() {
if os.Getenv("FLAVOR") == "alpine" {
out, _ := machine.Sudo("rc-status")
Expect(out).Should(ContainSubstring("c3os"))
Expect(out).Should(ContainSubstring("c3os-agent"))
} else {
// Eventually(func() string {
// out, _ := machine.SSHCommand("sudo systemctl status c3os-agent")
// return out
// }, 30*time.Second, 10*time.Second).Should(ContainSubstring("no network token"))
out, _ := machine.Sudo("systemctl status c3os-agent")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/c3os-agent.service; enabled; vendor preset: disabled)"))
out, _ = machine.Sudo("systemctl status systemd-timesyncd")
Expect(out).Should(ContainSubstring("loaded (/usr/lib/systemd/system/systemd-timesyncd.service; enabled; vendor preset: disabled)"))
}
})
It("upgrades from kubernetes", func() {
Eventually(func() string {
var out string
if os.Getenv("FLAVOR") == "alpine" {
out, _ = machine.Sudo("cat /var/log/c3os-agent.log")
} else {
out, _ = machine.Sudo("systemctl status c3os-agent")
}
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("One time bootstrap starting"))
Eventually(func() string {
out, _ := machine.Sudo("cat /etc/rancher/k3s/k3s.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("https:"))
kubectl := func(s string) (string, error) {
return machine.Sudo("k3s kubectl " + s)
}
currentVersion, err := machine.SSHCommand("source /etc/os-release; echo $VERSION")
Expect(err).ToNot(HaveOccurred())
Expect(currentVersion).To(ContainSubstring("c3OS"))
By("installing system-upgrade-controller", func() {
resp, err := http.Get("https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml")
Expect(err).ToNot(HaveOccurred())
defer resp.Body.Close()
data := bytes.NewBuffer([]byte{})
_, err = io.Copy(data, resp.Body)
Expect(err).ToNot(HaveOccurred())
temp, err := ioutil.TempFile("", "temp")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(temp.Name())
err = ioutil.WriteFile(temp.Name(), data.Bytes(), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
err = machine.SendFile(temp.Name(), "/tmp/kubectl.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
Eventually(func() string {
out, _ := kubectl("apply -f /tmp/kubectl.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("unchanged"))
})
By("triggering an upgrade", func() {
suc := sucYAML(strings.ReplaceAll(containerImage, ":8h", ""), "8h")
err := ioutil.WriteFile("assets/generated.yaml", []byte(suc), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
err = machine.SendFile("assets/generated.yaml", "./suc.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
fmt.Println(suc)
Eventually(func() string {
out, _ := kubectl("apply -f suc.yaml")
fmt.Println(out)
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("created"))
Eventually(func() string {
out, _ := kubectl("get pods -A")
fmt.Println(out)
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("apply-os-upgrade-on-"))
Eventually(func() string {
out, _ := kubectl("get pods -A")
fmt.Println(out)
version, err := machine.SSHCommand("source /etc/os-release; echo $VERSION")
if err != nil || !strings.Contains(version, "c3OS") {
// If we met error, keep going with the Eventually
return currentVersion
}
return version
}, 20*time.Minute, 10*time.Second).ShouldNot(Equal(currentVersion))
})
})
})
})
| [
"\"CONTAINER_IMAGE\"",
"\"FLAVOR\"",
"\"FLAVOR\"",
"\"FLAVOR\""
] | [] | [
"CONTAINER_IMAGE",
"FLAVOR"
] | [] | ["CONTAINER_IMAGE", "FLAVOR"] | go | 2 | 0 | |
test/lib/ufe/testChildFilter.py | #!/usr/bin/env python
#
# Copyright 2020 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import maya.cmds as cmds
import mayaUtils
import ufe
import unittest
@unittest.skipIf(os.getenv('UFE_PREVIEW_VERSION_NUM', '0000') < '2022', 'ChildFilterTestCase is only available in Maya with UFE preview version 0.2.22 and greater')
class ChildFilterTestCase(unittest.TestCase):
'''Verify the ChildFilter USD implementation.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
def setUp(self):
''' Called initially to set up the Maya test environment '''
# Load plugins
self.assertTrue(self.pluginsLoaded)
# Open ballset.ma scene in testSamples
mayaUtils.openGroupBallsScene()
# Clear selection to start off
cmds.select(clear=True)
def testFilteredChildren(self):
# Check that we have six balls
propsPath = ufe.PathString.path('|transform1|proxyShape1,/Ball_set/Props')
propsItem = ufe.Hierarchy.createItem(propsPath)
propsHier = ufe.Hierarchy.hierarchy(propsItem)
self.assertEqual(6, len(propsHier.children()))
# Deactivate Ball_3 (which will remove it from children)
ball3Path = ufe.PathString.path('|transform1|proxyShape1,/Ball_set/Props/Ball_3')
ball3Hier = ufe.Hierarchy.createItem(ball3Path)
cmds.delete('|transform1|proxyShape1,/Ball_set/Props/Ball_3')
# Props should now have 5 children and ball3 should not be one of them.
children = propsHier.children()
self.assertEqual(5, len(children))
self.assertNotIn(ball3Hier, children)
# Ensure we have one USD child filter
rid = ufe.RunTimeMgr.instance().getId('USD')
usdHierHndlr = ufe.RunTimeMgr.instance().hierarchyHandler(rid)
cf = usdHierHndlr.childFilter()
self.assertEqual(1, len(cf))
# Make sure the USD hierarchy handler has an inactive prims filter
self.assertEqual('InactivePrims', cf[0].name)
# Toggle "Inactive Prims" on and get the filtered children
# (with inactive prims) and verify ball3 is one of them.
cf[0].value = True
children = propsHier.filteredChildren(cf)
self.assertEqual(6, len(children))
self.assertIn(ball3Hier, children)
| [] | [] | [
"UFE_PREVIEW_VERSION_NUM"
] | [] | ["UFE_PREVIEW_VERSION_NUM"] | python | 1 | 0 | |
lib/dispatchcloud/dispatcher_test.go | // Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: AGPL-3.0
package dispatchcloud
import (
"context"
"encoding/json"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"sync"
"time"
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ssh"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&DispatcherSuite{})
type DispatcherSuite struct {
ctx context.Context
cancel context.CancelFunc
cluster *arvados.Cluster
stubDriver *test.StubDriver
disp *dispatcher
}
func (s *DispatcherSuite) SetUpTest(c *check.C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
s.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))
dispatchpub, _ := test.LoadTestKey(c, "test/sshkey_dispatch")
dispatchprivraw, err := ioutil.ReadFile("test/sshkey_dispatch")
c.Assert(err, check.IsNil)
_, hostpriv := test.LoadTestKey(c, "test/sshkey_vm")
s.stubDriver = &test.StubDriver{
HostKey: hostpriv,
AuthorizedKeys: []ssh.PublicKey{dispatchpub},
ErrorRateDestroy: 0.1,
MinTimeBetweenCreateCalls: time.Millisecond,
}
s.cluster = &arvados.Cluster{
ManagementToken: "test-management-token",
Containers: arvados.ContainersConfig{
DispatchPrivateKey: string(dispatchprivraw),
StaleLockTimeout: arvados.Duration(5 * time.Millisecond),
CloudVMs: arvados.CloudVMsConfig{
Driver: "test",
SyncInterval: arvados.Duration(10 * time.Millisecond),
TimeoutIdle: arvados.Duration(150 * time.Millisecond),
TimeoutBooting: arvados.Duration(150 * time.Millisecond),
TimeoutProbe: arvados.Duration(15 * time.Millisecond),
TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
MaxCloudOpsPerSecond: 500,
PollInterval: arvados.Duration(5 * time.Millisecond),
ProbeInterval: arvados.Duration(5 * time.Millisecond),
MaxProbesPerSecond: 1000,
TimeoutSignal: arvados.Duration(3 * time.Millisecond),
TimeoutStaleRunLock: arvados.Duration(3 * time.Millisecond),
TimeoutTERM: arvados.Duration(20 * time.Millisecond),
ResourceTags: map[string]string{"testtag": "test value"},
TagKeyPrefix: "test:",
},
},
InstanceTypes: arvados.InstanceTypeMap{
test.InstanceType(1).Name: test.InstanceType(1),
test.InstanceType(2).Name: test.InstanceType(2),
test.InstanceType(3).Name: test.InstanceType(3),
test.InstanceType(4).Name: test.InstanceType(4),
test.InstanceType(6).Name: test.InstanceType(6),
test.InstanceType(8).Name: test.InstanceType(8),
test.InstanceType(16).Name: test.InstanceType(16),
},
}
arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
arvClient, err := arvados.NewClientFromConfig(s.cluster)
c.Check(err, check.IsNil)
s.disp = &dispatcher{
Cluster: s.cluster,
Context: s.ctx,
ArvClient: arvClient,
AuthToken: arvadostest.AdminToken,
Registry: prometheus.NewRegistry(),
}
// Test cases can modify s.cluster before calling
// initialize(), and then modify private state before calling
// go run().
}
func (s *DispatcherSuite) TearDownTest(c *check.C) {
s.cancel()
s.disp.Close()
}
// DispatchToStubDriver checks that the dispatcher wires everything
// together effectively. It uses a real scheduler and worker pool with
// a fake queue and cloud driver. The fake cloud driver injects
// artificial errors in order to exercise a variety of code paths.
func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
queue := &test.Queue{
ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
return ChooseInstanceType(s.cluster, ctr)
},
Logger: ctxlog.TestLogger(c),
}
for i := 0; i < 200; i++ {
queue.Containers = append(queue.Containers, arvados.Container{
UUID: test.ContainerUUID(i + 1),
State: arvados.ContainerStateQueued,
Priority: int64(i%20 + 1),
RuntimeConstraints: arvados.RuntimeConstraints{
RAM: int64(i%3+1) << 30,
VCPUs: i%8 + 1,
},
})
}
s.disp.queue = queue
var mtx sync.Mutex
done := make(chan struct{})
waiting := map[string]struct{}{}
for _, ctr := range queue.Containers {
waiting[ctr.UUID] = struct{}{}
}
finishContainer := func(ctr arvados.Container) {
mtx.Lock()
defer mtx.Unlock()
if _, ok := waiting[ctr.UUID]; !ok {
c.Errorf("container completed twice: %s", ctr.UUID)
return
}
delete(waiting, ctr.UUID)
if len(waiting) == 0 {
close(done)
}
}
executeContainer := func(ctr arvados.Container) int {
finishContainer(ctr)
return int(rand.Uint32() & 0x3)
}
n := 0
s.stubDriver.Queue = queue
s.stubDriver.SetupVM = func(stubvm *test.StubVM) {
n++
stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
stubvm.ExecuteContainer = executeContainer
stubvm.CrashRunningContainer = finishContainer
switch n % 7 {
case 0:
stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
case 1:
stubvm.CrunchRunMissing = true
case 2:
stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
default:
stubvm.CrunchRunCrashRate = 0.1
stubvm.ArvMountDeadlockRate = 0.1
}
}
s.stubDriver.Bugf = c.Errorf
start := time.Now()
go s.disp.run()
err := s.disp.CheckHealth()
c.Check(err, check.IsNil)
select {
case <-done:
c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
case <-time.After(10 * time.Second):
c.Fatalf("timed out; still waiting for %d containers: %q", len(waiting), waiting)
}
deadline := time.Now().Add(5 * time.Second)
for range time.NewTicker(10 * time.Millisecond).C {
insts, err := s.stubDriver.InstanceSets()[0].Instances(nil)
c.Check(err, check.IsNil)
queue.Update()
ents, _ := queue.Entries()
if len(ents) == 0 && len(insts) == 0 {
break
}
if time.Now().After(deadline) {
c.Fatalf("timed out with %d containers (%v), %d instances (%+v)", len(ents), ents, len(insts), insts)
}
}
req := httptest.NewRequest("GET", "/metrics", nil)
req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
resp := httptest.NewRecorder()
s.disp.ServeHTTP(resp, req)
c.Check(resp.Code, check.Equals, http.StatusOK)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Create"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="List"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Destroy"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="Create"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="List"} 0\n.*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} 0.*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="disappeared"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="failure"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="success"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="shutdown"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="unknown"} 0\n.*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds{quantile="0.95"} [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_sum [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds{quantile="0.95"} [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_sum [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
}
func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
for _, token := range []string{"abc", ""} {
req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
resp := httptest.NewRecorder()
s.disp.ServeHTTP(resp, req)
if token == "" {
c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
} else {
c.Check(resp.Code, check.Equals, http.StatusForbidden)
}
}
}
func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
s.cluster.ManagementToken = ""
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
for _, token := range []string{"abc", ""} {
req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
resp := httptest.NewRecorder()
s.disp.ServeHTTP(resp, req)
c.Check(resp.Code, check.Equals, http.StatusForbidden)
}
}
func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
type instance struct {
Instance string
WorkerState string `json:"worker_state"`
Price float64
LastContainerUUID string `json:"last_container_uuid"`
ArvadosInstanceType string `json:"arvados_instance_type"`
ProviderInstanceType string `json:"provider_instance_type"`
}
type instancesResponse struct {
Items []instance
}
getInstances := func() instancesResponse {
req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
req.Header.Set("Authorization", "Bearer abcdefgh")
resp := httptest.NewRecorder()
s.disp.ServeHTTP(resp, req)
var sr instancesResponse
c.Check(resp.Code, check.Equals, http.StatusOK)
err := json.Unmarshal(resp.Body.Bytes(), &sr)
c.Check(err, check.IsNil)
return sr
}
sr := getInstances()
c.Check(len(sr.Items), check.Equals, 0)
ch := s.disp.pool.Subscribe()
defer s.disp.pool.Unsubscribe(ch)
ok := s.disp.pool.Create(test.InstanceType(1))
c.Check(ok, check.Equals, true)
<-ch
for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); {
sr = getInstances()
if len(sr.Items) > 0 {
break
}
time.Sleep(time.Millisecond)
}
c.Assert(len(sr.Items), check.Equals, 1)
c.Check(sr.Items[0].Instance, check.Matches, "inst.*")
c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
c.Check(sr.Items[0].Price, check.Equals, 0.123)
c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
c.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)
c.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)
}
| [
"\"ARVADOS_API_HOST\""
] | [] | [
"ARVADOS_API_HOST"
] | [] | ["ARVADOS_API_HOST"] | go | 1 | 0 | |
test/unit/common/test_utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree, debug_logger, make_timestamp_iter
import ctypes
import contextlib
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import platform
import os
import mock
import random
import re
import socket
import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from nose import SkipTest
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from test.unit import FakeLogger, requires_o_tmpfile_support
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertNotIn('once', options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_set_owner(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
_ret = lambda: None
_ret.pw_uid = 100
_mock_getpwnam = MagicMock(return_value=_ret)
_mock_chown = mock.Mock()
with patch('os.chown', _mock_chown), \
patch('pwd.getpwnam', _mock_getpwnam):
utils.dump_recon_cache(submit_dict, testcache_file,
logger, set_owner="swift")
_mock_getpwnam.assert_called_once_with("swift")
self.assertEqual(_mock_chown.call_args[0][1], 100)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('errno.ECONNREFUSED message test', log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertNotIn('my error message', log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertNotIn('txn', log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertNotIn('client_ip', log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertNotIn('client_ip', log_msg)
self.assertNotIn('1.2.3.4', log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaisesRegexp(
ValueError, 'Unable to find section3 config section in.*',
utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertNotIn(func, utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertIsNone(utils.remove_file(file_name))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertIsNone(utils.remove_file(file_name))
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_positive_int_value(self):
expectations = {
# value : expected,
'1': 1,
1: 1,
'2': 2,
'1024': 1024,
'0': ValueError,
'-1': ValueError,
'0x01': ValueError,
'asdf': ValueError,
None: ValueError,
0: ValueError,
-1: ValueError,
'1.2': ValueError, # string expresses float should be value error
}
for value, expected in expectations.items():
try:
rv = utils.config_positive_int_value(value)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(
'Config option must be an positive int number, '
'not "%s".' % value, e.message)
else:
self.assertEqual(expected, rv)
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
f_blocks = 100
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Make sure setting noop, which disables fallocate, also stops the
# fallocate_reserve check.
# Set the fallocate_reserve to 99% and request an object that is
# about 50% the size. With fallocate_reserve off this will succeed.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('99%')
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0)
# Setting noop to False after the constructor allows us to use
# a noop fallocate syscall and still test fallocate_reserve.
fallocate.noop = False
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1022')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1% reserved, have 100 bytes * 2/100 free, and file size is
# 99, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0)
# Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49,
# so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 50
StatVFS.f_bavail = 2
StatVFS.f_blocks = 50
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0)
# Want 100% reserved, have 100 * 100/100 free, and file size is 0,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 100
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100.0 <= 100.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1% reserved, have 100 * 2/100 free, and file size is 101,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(101))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# is 100, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('98%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 99
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(100))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 98.0 <= 98.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2% reserved, have 1000 bytes * 21/1000 free, and file size
# is 999, so succeeds.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0)
# Want 2% resereved, have 1000 bytes * 21/1000 free, and file size
# is 1000, so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1000))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 2.0 <= 2.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('garbage')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_get_valid_utf8_str(self):
def do_test(input_value, expected):
actual = utils.get_valid_utf8_str(input_value)
self.assertEqual(expected, actual)
self.assertIsInstance(actual, six.binary_type)
actual.decode('utf-8')
do_test(b'abc', b'abc')
do_test(u'abc', b'abc')
do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81')
do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81')
# test some invalid UTF-8
do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd')
# check surrogate pairs, too
do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'),
do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'),
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_modify_priority(self):
pid = os.getpid()
logger = debug_logger()
called = {}
def _fake_setpriority(*args):
called['setpriority'] = args
def _fake_syscall(*args):
called['syscall'] = args
# Test if current architecture supports changing of priority
try:
utils.NR_ioprio_set()
except OSError as e:
raise SkipTest(e)
with patch('swift.common.utils._libc_setpriority',
_fake_setpriority), \
patch('swift.common.utils._posix_syscall', _fake_syscall):
called = {}
# not set / default
utils.modify_priority({}, logger)
self.assertEqual(called, {})
called = {}
# just nice
utils.modify_priority({'nice_priority': '1'}, logger)
self.assertEqual(called, {'setpriority': (0, pid, 1)})
called = {}
# just ionice class uses default priority 0
utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# just ionice priority is ignored
utils.modify_priority({'ionice_priority': '4'}, logger)
self.assertEqual(called, {})
called = {}
# bad ionice class
utils.modify_priority({'ionice_class': 'class_foo'}, logger)
self.assertEqual(called, {})
called = {}
# ionice class & priority
utils.modify_priority({
'ionice_class': 'IOPRIO_CLASS_BE',
'ionice_priority': '4',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (251, 1, pid, 2 << 13 | 4)
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (30, 1, pid, 2 << 13 | 4)
})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# all
utils.modify_priority({
'nice_priority': '-15',
'ionice_class': 'IOPRIO_CLASS_IDLE',
'ionice_priority': '6',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (251, 1, pid, 3 << 13 | 6),
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (30, 1, pid, 3 << 13 | 6),
})
else:
self.fail("Unexpected call: %r" % called)
def test__NR_ioprio_set(self):
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(251, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(30, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp(dir='/tmp')
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = "I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'r') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp(dir='/tmp')
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, "hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, "bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'r') as f:
self.assertEqual(f.read(), "bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp(dir='/tmp')
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or next(self.ts)
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, next(self.ts))
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, next(self.ts))
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, next(self.ts))
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = next(self.ts)
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = next(self.ts)
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], next(self.ts))
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], next(self.ts))
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], next(self.ts))
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertNotIn('admin', info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertNotIn('admin', info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertNotIn('swift', utils._swift_info)
self.assertNotIn('cap1', utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertNotIn('cap2', info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertNotIn('cap3', info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertNotIn('cap1_foo', info['cap1'])
self.assertNotIn('c', info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertIsNone(next(pile))
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestSpliterator(unittest.TestCase):
def test_string(self):
input_chunks = ["coun", "ter-", "b", "ra", "nch-mater",
"nit", "y-fungusy", "-nummular"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(8)), "counter-")
self.assertEqual(''.join(si.take(7)), "branch-")
self.assertEqual(''.join(si.take(10)), "maternity-")
self.assertEqual(''.join(si.take(8)), "fungusy-")
self.assertEqual(''.join(si.take(8)), "nummular")
def test_big_input_string(self):
input_chunks = ["iridium"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(2)), "ir")
self.assertEqual(''.join(si.take(1)), "i")
self.assertEqual(''.join(si.take(2)), "di")
self.assertEqual(''.join(si.take(1)), "u")
self.assertEqual(''.join(si.take(1)), "m")
def test_chunk_boundaries(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(7)), "soylent")
self.assertEqual(''.join(si.take(5)), "green")
self.assertEqual(''.join(si.take(2)), "is")
self.assertEqual(''.join(si.take(6)), "people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = 'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i) * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = ''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
'': 'd41d8cd98f00b204e9800998ecf8427e',
'some data': '1e50210a0202497fb79bc38b6ade6c34',
('a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
if __name__ == '__main__':
unittest.main()
| [] | [] | [
"TZ",
"HOME"
] | [] | ["TZ", "HOME"] | python | 2 | 0 | |
pkg/monitor/netstat.go | /*
*
* * Licensed to the Apache Software Foundation (ASF) under one or more
* * contributor license agreements. See the NOTICE file distributed with
* * this work for additional information regarding copyright ownership.
* * The ASF licenses this file to You under the Apache License, Version 2.0
* * (the "License"); you may not use this file except in compliance with
* * the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package monitor
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"github.com/IceFireDB/IceFireDB-Proxy/pkg/netstat"
)
var ethInterface string
type NetworkStat struct {
RxBytes uint64
TxBytes uint64
}
type NetStatCallback func(stat *NetworkStat, err error)
var folder string
func init() {
ethInterface = "eth0"
if val := os.Getenv("MSP_ETH_INTERFACE_NAME"); len(val) > 0 {
ethInterface = val
}
folder = "/sys/class/net/" + ethInterface + "/statistics/"
cmd := exec.Command("ip", "-o", "-4", "route", "show", "to", "default")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return
}
parts := strings.Split(strings.TrimSpace(out.String()), " ")
if len(parts) < 5 {
fmt.Println(fmt.Errorf("invalid result from \"ip -o -4 route show to default\": %s", out.String()))
return
}
ethInterface = strings.TrimSpace(parts[4])
folder = "/sys/class/net/" + ethInterface + "/statistics/"
}
func GetNetstat() (tcp map[string]int, err error) {
socks, err := netstat.TCPSocks(netstat.NoopFilter)
if err != nil {
logrus.Error("获取服务TCP连接失败 ", err)
return
}
tcp = make(map[string]int)
if len(socks) > 0 {
for _, value := range socks {
state := value.State.String()
tcp[state]++
}
}
return
}
func CurrentNetworkStatInputByte() float64 {
rxBytes, _ := ReadNumberFromFile(folder + "rx_bytes")
return rxBytes
}
func CurrentNetworkStatOutputByte() float64 {
txBytes, _ := ReadNumberFromFile(folder + "tx_bytes")
return txBytes
}
func ReadNumberFromFile(name string) (n float64, err error) {
out, err := ioutil.ReadFile(name)
if err != nil {
return 0, err
}
n, err = strconv.ParseFloat(strings.TrimSpace(string(out)), 64)
if err != nil {
return n, err
}
return n, nil
}
| [
"\"MSP_ETH_INTERFACE_NAME\""
] | [] | [
"MSP_ETH_INTERFACE_NAME"
] | [] | ["MSP_ETH_INTERFACE_NAME"] | go | 1 | 0 | |
netmiko/utilities.py | """Miscellaneous utility functions."""
from __future__ import print_function
from __future__ import unicode_literals
import sys
import io
import os
import serial.tools.list_ports
from netmiko._textfsm import _clitable as clitable
from netmiko._textfsm._clitable import CliTableError
from netmiko.py23_compat import text_type
# Dictionary mapping 'show run' for vendors with different command
SHOW_RUN_MAPPER = {
"juniper": "show configuration",
"juniper_junos": "show configuration",
"extreme": "show configuration",
"extreme_ers": "show running-config",
"extreme_exos": "show configuration",
"extreme_netiron": "show running-config",
"extreme_nos": "show running-config",
"extreme_slx": "show running-config",
"extreme_vdx": "show running-config",
"extreme_vsp": "show running-config",
"extreme_wing": "show running-config",
"hp_comware": "display current-configuration",
"huawei": "display current-configuration",
"fortinet": "show full-configuration",
"checkpoint": "show configuration",
"cisco_wlc": "show run-config",
"enterasys": "show running-config",
"dell_force10": "show running-config",
"avaya_vsp": "show running-config",
"avaya_ers": "show running-config",
"brocade_vdx": "show running-config",
"brocade_nos": "show running-config",
"brocade_fastiron": "show running-config",
"brocade_netiron": "show running-config",
"alcatel_aos": "show configuration snapshot",
}
# Expand SHOW_RUN_MAPPER to include '_ssh' key
new_dict = {}
for k, v in SHOW_RUN_MAPPER.items():
new_key = k + "_ssh"
new_dict[k] = v
new_dict[new_key] = v
SHOW_RUN_MAPPER = new_dict
# Default location of netmiko temp directory for netmiko tools
NETMIKO_BASE_DIR = "~/.netmiko"
def load_yaml_file(yaml_file):
"""Read YAML file."""
try:
import yaml
except ImportError:
sys.exit("Unable to import yaml module.")
try:
with io.open(yaml_file, "rt", encoding="utf-8") as fname:
return yaml.safe_load(fname)
except IOError:
sys.exit("Unable to open YAML file: {0}".format(yaml_file))
def load_devices(file_name=None):
"""Find and load .netmiko.yml file."""
yaml_devices_file = find_cfg_file(file_name)
return load_yaml_file(yaml_devices_file)
def find_cfg_file(file_name=None):
"""Look for .netmiko.yml in current dir, then ~/.netmiko.yml."""
base_file = ".netmiko.yml"
check_files = [base_file, os.path.expanduser("~") + "/" + base_file]
if file_name:
check_files.insert(0, file_name)
for test_file in check_files:
if os.path.isfile(test_file):
return test_file
raise IOError("{}: file not found in current dir or home dir.".format(base_file))
def display_inventory(my_devices):
"""Print out inventory devices and groups."""
inventory_groups = ["all"]
inventory_devices = []
for k, v in my_devices.items():
if isinstance(v, list):
inventory_groups.append(k)
elif isinstance(v, dict):
inventory_devices.append((k, v["device_type"]))
inventory_groups.sort()
inventory_devices.sort(key=lambda x: x[0])
print("\nDevices:")
print("-" * 40)
for a_device, device_type in inventory_devices:
device_type = " ({})".format(device_type)
print("{:<25}{:>15}".format(a_device, device_type))
print("\n\nGroups:")
print("-" * 40)
for a_group in inventory_groups:
print(a_group)
print()
def obtain_all_devices(my_devices):
"""Dynamically create 'all' group."""
new_devices = {}
for device_name, device_or_group in my_devices.items():
# Skip any groups
if not isinstance(device_or_group, list):
new_devices[device_name] = device_or_group
return new_devices
def obtain_netmiko_filename(device_name):
"""Create file name based on device_name."""
_, netmiko_full_dir = find_netmiko_dir()
return "{}/{}.txt".format(netmiko_full_dir, device_name)
def write_tmp_file(device_name, output):
file_name = obtain_netmiko_filename(device_name)
with open(file_name, "w") as f:
f.write(output)
return file_name
def ensure_dir_exists(verify_dir):
"""Ensure directory exists. Create if necessary."""
if not os.path.exists(verify_dir):
# Doesn't exist create dir
os.makedirs(verify_dir)
else:
# Exists
if not os.path.isdir(verify_dir):
# Not a dir, raise an exception
raise ValueError("{} is not a directory".format(verify_dir))
def find_netmiko_dir():
"""Check environment first, then default dir"""
try:
netmiko_base_dir = os.environ["NETMIKO_DIR"]
except KeyError:
netmiko_base_dir = NETMIKO_BASE_DIR
netmiko_base_dir = os.path.expanduser(netmiko_base_dir)
if netmiko_base_dir == "/":
raise ValueError("/ cannot be netmiko_base_dir")
netmiko_full_dir = "{}/tmp".format(netmiko_base_dir)
return (netmiko_base_dir, netmiko_full_dir)
def write_bytes(out_data, encoding="ascii"):
"""Write Python2 and Python3 compatible byte stream."""
if sys.version_info[0] >= 3:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(b"")):
return out_data
else:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(str(""))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(
out_data
)
raise ValueError(msg)
def check_serial_port(name):
"""returns valid COM Port."""
try:
cdc = next(serial.tools.list_ports.grep(name))
return cdc[0]
except StopIteration:
msg = "device {} not found. ".format(name)
msg += "available devices are: "
ports = list(serial.tools.list_ports.comports())
for p in ports:
msg += "{},".format(text_type(p))
raise ValueError(msg)
def get_template_dir():
"""Find and return the ntc-templates/templates dir."""
try:
template_dir = os.path.expanduser(os.environ["NET_TEXTFSM"])
index = os.path.join(template_dir, "index")
if not os.path.isfile(index):
# Assume only base ./ntc-templates specified
template_dir = os.path.join(template_dir, "templates")
except KeyError:
# Construct path ~/ntc-templates/templates
home_dir = os.path.expanduser("~")
template_dir = os.path.join(home_dir, "ntc-templates", "templates")
index = os.path.join(template_dir, "index")
if not os.path.isdir(template_dir) or not os.path.isfile(index):
msg = """
Valid ntc-templates not found, please install https://github.com/networktocode/ntc-templates
and then set the NET_TEXTFSM environment variable to point to the ./ntc-templates/templates
directory."""
raise ValueError(msg)
return os.path.abspath(template_dir)
def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs
def get_structured_data(raw_output, platform, command):
"""Convert raw CLI output to structured data using TextFSM template."""
template_dir = get_template_dir()
index_file = os.path.join(template_dir, "index")
textfsm_obj = clitable.CliTable(index_file, template_dir)
attrs = {"Command": command, "Platform": platform}
try:
# Parse output through template
textfsm_obj.ParseCmd(raw_output, attrs)
structured_data = clitable_to_dict(textfsm_obj)
output = raw_output if structured_data == [] else structured_data
return output
except CliTableError:
return raw_output
| [] | [] | [
"NET_TEXTFSM",
"NETMIKO_DIR"
] | [] | ["NET_TEXTFSM", "NETMIKO_DIR"] | python | 2 | 0 | |
init.go | // Copyright (C) 2017. See AUTHORS.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package openssl is a light wrapper around OpenSSL for Go.
It strives to provide a near-drop-in replacement for the Go standard library
tls package, while allowing for:
Performance
OpenSSL is battle-tested and optimized C. While Go's built-in library shows
great promise, it is still young and in some places, inefficient. This simple
OpenSSL wrapper can often do at least 2x with the same cipher and protocol.
On my lappytop, I get the following benchmarking speeds:
BenchmarkSHA1Large_openssl 1000 2611282 ns/op 401.56 MB/s
BenchmarkSHA1Large_stdlib 500 3963983 ns/op 264.53 MB/s
BenchmarkSHA1Small_openssl 1000000 3476 ns/op 0.29 MB/s
BenchmarkSHA1Small_stdlib 5000000 550 ns/op 1.82 MB/s
BenchmarkSHA256Large_openssl 200 8085314 ns/op 129.69 MB/s
BenchmarkSHA256Large_stdlib 100 18948189 ns/op 55.34 MB/s
BenchmarkSHA256Small_openssl 1000000 4262 ns/op 0.23 MB/s
BenchmarkSHA256Small_stdlib 1000000 1444 ns/op 0.69 MB/s
BenchmarkOpenSSLThroughput 100000 21634 ns/op 47.33 MB/s
BenchmarkStdlibThroughput 50000 58974 ns/op 17.36 MB/s
Interoperability
Many systems support OpenSSL with a variety of plugins and modules for things,
such as hardware acceleration in embedded devices.
Greater flexibility and configuration
OpenSSL allows for far greater configuration of corner cases and backwards
compatibility (such as support of SSLv2). You shouldn't be using SSLv2 if you
can help but, but sometimes you can't help it.
Security
Yeah yeah, Heartbleed. But according to the author of the standard library's
TLS implementation, Go's TLS library is vulnerable to timing attacks. And
whether or not OpenSSL received the appropriate amount of scrutiny
pre-Heartbleed, it sure is receiving it now.
Usage
Starting an HTTP server that uses OpenSSL is very easy. It's as simple as:
log.Fatal(openssl.ListenAndServeTLS(
":8443", "my_server.crt", "my_server.key", myHandler))
Getting a net.Listener that uses OpenSSL is also easy:
ctx, err := openssl.NewCtxFromFiles("my_server.crt", "my_server.key")
if err != nil {
log.Fatal(err)
}
l, err := openssl.Listen("tcp", ":7777", ctx)
Making a client connection is straightforward too:
ctx, err := NewCtx()
if err != nil {
log.Fatal(err)
}
err = ctx.LoadVerifyLocations("/etc/ssl/certs/ca-certificates.crt", "")
if err != nil {
log.Fatal(err)
}
conn, err := openssl.Dial("tcp", "localhost:7777", ctx, 0)
Help wanted: To get this library to work with net/http's client, we
had to fork net/http. It would be nice if an alternate http client library
supported the generality needed to use OpenSSL instead of crypto/tls.
*/
package openssl
// #include "shim.h"
import "C"
import (
"errors"
"fmt"
"os"
"strings"
)
func init() {
if rc := C.X_shim_init(); rc != 0 {
panic(fmt.Errorf("X_shim_init failed with %d", rc))
}
if os.Getenv("DEBUG") == "1" {
fmt.Printf("OPENSSL_VERSION_NUMBER -> 0x%0x\nOPENSSL_VERSION_TEXT -> %s\n",
C.OPENSSL_VERSION_NUMBER, C.OPENSSL_VERSION_TEXT)
}
}
// errorFromErrorQueue needs to run in the same OS thread as the operation
// that caused the possible error
func errorFromErrorQueue() error {
var errs []string
for {
err := C.ERR_get_error()
if err == 0 {
break
}
errs = append(errs, fmt.Sprintf("%s:%s:%s",
C.GoString(C.ERR_lib_error_string(err)),
C.GoString(C.ERR_func_error_string(err)),
C.GoString(C.ERR_reason_error_string(err))))
}
return errors.New(fmt.Sprintf("SSL errors: %s", strings.Join(errs, "\n")))
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
agent/runners/acme.py | import asyncio
import json
import logging
import os
import sys
from aiohttp import ClientError
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # noqa
from runners.agent_container import ( # noqa:E402
arg_parser,
create_agent_with_args,
AriesAgent,
)
from runners.support.utils import ( # noqa:E402
check_requires,
log_msg,
log_status,
log_timer,
prompt,
prompt_loop,
)
CRED_PREVIEW_TYPE = "https://didcomm.org/issue-credential/2.0/credential-preview"
SELF_ATTESTED = os.getenv("SELF_ATTESTED")
TAILS_FILE_COUNT = int(os.getenv("TAILS_FILE_COUNT", 100))
logging.basicConfig(level=logging.WARNING)
LOGGER = logging.getLogger(__name__)
class AcmeAgent(AriesAgent):
def __init__(
self,
ident: str,
http_port: int,
admin_port: int,
no_auto: bool = False,
**kwargs,
):
super().__init__(
ident,
http_port,
admin_port,
prefix="Acme",
no_auto=no_auto,
**kwargs,
)
self.connection_id = None
self._connection_ready = None
self.cred_state = {}
self.cred_attrs = {}
async def detect_connection(self):
await self._connection_ready
self._connection_ready = None
@property
def connection_ready(self):
return self._connection_ready.done() and self._connection_ready.result()
async def handle_oob_invitation(self, message):
pass
async def handle_connections(self, message):
print(
self.ident, "handle_connections", message["state"], message["rfc23_state"]
)
conn_id = message["connection_id"]
if (not self.connection_id) and message["rfc23_state"] == "invitation-sent":
print(self.ident, "set connection id", conn_id)
self.connection_id = conn_id
if (
message["connection_id"] == self.connection_id
and message["rfc23_state"] == "completed"
and (self._connection_ready and not self._connection_ready.done())
):
self.log("Connected")
self._connection_ready.set_result(True)
async def handle_issue_credential_v2_0(self, message):
state = message["state"]
cred_ex_id = message["cred_ex_id"]
prev_state = self.cred_state.get(cred_ex_id)
if prev_state == state:
return # ignore
self.cred_state[cred_ex_id] = state
self.log(f"Credential: state = {state}, cred_ex_id = {cred_ex_id}")
if state == "request-received":
# TODO issue credentials based on offer preview in cred ex record
pass
async def handle_issue_credential_v2_0_indy(self, message):
pass # employee id schema does not support revocation
async def handle_present_proof_v2_0(self, message):
state = message["state"]
pres_ex_id = message["pres_ex_id"]
self.log(f"Presentation: state = {state}, pres_ex_id = {pres_ex_id}")
if state == "presentation-received":
# TODO handle received presentations
pass
async def handle_basicmessages(self, message):
self.log("Received message:", message["content"])
async def main(args):
acme_agent = await create_agent_with_args(args, ident="acme")
try:
log_status(
"#1 Provision an agent and wallet, get back configuration details"
+ (
f" (Wallet type: {acme_agent.wallet_type})"
if acme_agent.wallet_type
else ""
)
)
agent = AcmeAgent(
"acme.agent",
acme_agent.start_port,
acme_agent.start_port + 1,
genesis_data=acme_agent.genesis_txns,
no_auto=acme_agent.no_auto,
tails_server_base_url=acme_agent.tails_server_base_url,
timing=acme_agent.show_timing,
multitenant=acme_agent.multitenant,
mediation=acme_agent.mediation,
wallet_type=acme_agent.wallet_type,
seed=acme_agent.seed,
)
acme_agent.public_did = True
# TODO: Create schema
acme_schema_name = "employee id schema"
acme_schema_attrs = ["employee_id", "name", "date", "position"]
await acme_agent.initialize(
the_agent=agent,
schema_name=acme_schema_name,
schema_attrs=acme_schema_attrs,
)
# generate an invitation for Alice
await acme_agent.generate_invitation(display_qr=True, wait=True)
options = (
" (1) Issue Credential\n"
" (2) Send Proof Request\n"
" (3) Send Message\n"
" (X) Exit?\n"
"[1/2/3/X]"
)
async for option in prompt_loop(options):
if option is not None:
option = option.strip()
if option is None or option in "xX":
break
elif option == "1":
log_status("#13 Issue credential offer to X")
# TODO credential offers
elif option == "2":
log_status("#20 Request proof of degree from alice")
# TODO presentation requests
elif option == "3":
msg = await prompt("Enter message: ")
await agent.admin_POST(
f"/connections/{agent.connection_id}/send-message", {"content": msg}
)
if acme_agent.show_timing:
timing = await acme_agent.agent.fetch_timing()
if timing:
for line in acme_agent.agent.format_timing(timing):
log_msg(line)
finally:
terminated = await acme_agent.terminate()
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
if __name__ == "__main__":
parser = arg_parser(ident="acme", port=8040)
args = parser.parse_args()
ENABLE_PYDEVD_PYCHARM = os.getenv("ENABLE_PYDEVD_PYCHARM", "").lower()
ENABLE_PYDEVD_PYCHARM = ENABLE_PYDEVD_PYCHARM and ENABLE_PYDEVD_PYCHARM not in (
"false",
"0",
)
PYDEVD_PYCHARM_HOST = os.getenv("PYDEVD_PYCHARM_HOST", "localhost")
PYDEVD_PYCHARM_CONTROLLER_PORT = int(
os.getenv("PYDEVD_PYCHARM_CONTROLLER_PORT", 5001)
)
if ENABLE_PYDEVD_PYCHARM:
try:
import pydevd_pycharm
print(
"Acme remote debugging to "
f"{PYDEVD_PYCHARM_HOST}:{PYDEVD_PYCHARM_CONTROLLER_PORT}"
)
pydevd_pycharm.settrace(
host=PYDEVD_PYCHARM_HOST,
port=PYDEVD_PYCHARM_CONTROLLER_PORT,
stdoutToServer=True,
stderrToServer=True,
suspend=False,
)
except ImportError:
print("pydevd_pycharm library was not found")
check_requires(args)
try:
asyncio.get_event_loop().run_until_complete(main(args))
except KeyboardInterrupt:
os._exit(1)
| [] | [] | [
"ENABLE_PYDEVD_PYCHARM",
"PYDEVD_PYCHARM_CONTROLLER_PORT",
"TAILS_FILE_COUNT",
"SELF_ATTESTED",
"PYDEVD_PYCHARM_HOST"
] | [] | ["ENABLE_PYDEVD_PYCHARM", "PYDEVD_PYCHARM_CONTROLLER_PORT", "TAILS_FILE_COUNT", "SELF_ATTESTED", "PYDEVD_PYCHARM_HOST"] | python | 5 | 0 | |
adapters/http_test.go | package adapters_test
import (
"net/http"
"testing"
"github.com/smartcontractkit/chainlink/adapters"
"github.com/smartcontractkit/chainlink/internal/cltest"
"github.com/smartcontractkit/chainlink/store/models"
"github.com/stretchr/testify/assert"
)
func TestHttpAdapters_NotAUrlError(t *testing.T) {
tests := []struct {
name string
adapter adapters.BaseAdapter
}{
{"HTTPGet", &adapters.HTTPGet{URL: cltest.WebURL("NotAURL")}},
{"HTTPPost", &adapters.HTTPPost{URL: cltest.WebURL("NotAURL")}},
}
for _, tt := range tests {
test := tt
t.Run(test.name, func(t *testing.T) {
t.Parallel()
result := test.adapter.Perform(models.RunResult{}, nil)
assert.Equal(t, models.JSON{}, result.Data)
assert.True(t, result.HasError())
})
}
}
func TestHttpGet_Perform(t *testing.T) {
cases := []struct {
name string
status int
want string
wantErrored bool
response string
}{
{"success", 200, "results!", false, `results!`},
{"success but error in body", 200, `{"error": "results!"}`, false, `{"error": "results!"}`},
{"success with HTML", 200, `<html>results!</html>`, false, `<html>results!</html>`},
{"not found", 400, "inputValue", true, `<html>so bad</html>`},
{"server error", 400, "inputValue", true, `Invalid request`},
}
for _, tt := range cases {
test := tt
t.Run(test.name, func(t *testing.T) {
t.Parallel()
input := cltest.RunResultWithResult("inputValue")
mock, cleanup := cltest.NewHTTPMockServer(t, test.status, "GET", test.response,
func(_ http.Header, body string) { assert.Equal(t, ``, body) })
defer cleanup()
hga := adapters.HTTPGet{URL: cltest.WebURL(mock.URL)}
result := hga.Perform(input, nil)
val, err := result.ResultString()
assert.NoError(t, err)
assert.Equal(t, test.want, val)
assert.Equal(t, test.wantErrored, result.HasError())
assert.Equal(t, false, result.Status.PendingBridge())
})
}
}
func TestHttpPost_Perform(t *testing.T) {
cases := []struct {
name string
status int
want string
wantErrored bool
response string
}{
{"success", 200, "results!", false, `results!`},
{"success but error in body", 200, `{"error": "results!"}`, false, `{"error": "results!"}`},
{"success with HTML", 200, `<html>results!</html>`, false, `<html>results!</html>`},
{"not found", 400, "inputVal", true, `<html>so bad</html>`},
{"server error", 500, "inputVal", true, `big error`},
}
for _, tt := range cases {
test := tt
t.Run(test.name, func(t *testing.T) {
t.Parallel()
input := cltest.RunResultWithResult("inputVal")
wantedBody := `{"result":"inputVal"}`
mock, cleanup := cltest.NewHTTPMockServer(t, test.status, "POST", test.response,
func(_ http.Header, body string) { assert.Equal(t, wantedBody, body) })
defer cleanup()
hpa := adapters.HTTPPost{URL: cltest.WebURL(mock.URL)}
result := hpa.Perform(input, nil)
val := result.Result()
assert.Equal(t, test.want, val.String())
assert.Equal(t, true, val.Exists())
assert.Equal(t, test.wantErrored, result.HasError())
assert.Equal(t, false, result.Status.PendingBridge())
})
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
gm-center/gmc_api_gateway/app/api/auth.go | package api
import (
"encoding/json"
"fmt"
"gmc_api_gateway/app/db"
"gmc_api_gateway/app/model"
"net/http"
"os"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/labstack/echo/v4"
)
type jwtCustomClaims struct {
Name string `json:"name"`
Role string `json:"role"`
jwt.StandardClaims
}
func GetJWTSecret() string {
return os.Getenv("SIGNINGKEY")
}
func AuthenticateUser(id, password string) (bool, string) {
db := db.DbManager()
var user model.MemberWithPassword
idCheck := strings.Compare(id, "") != 0
passCheck := strings.Compare(password, "") != 0
if idCheck && passCheck {
if err := db.First(&user, model.MemberWithPassword{Member: model.Member{Id: id}, Password: password}).Error; err == nil {
return true, user.RoleName
}
}
return false, ""
}
func LoginUser(c echo.Context) (err error) {
var user model.User
Body := responseBody(c.Request().Body)
// fmt.Println("Body is : ", Body)
err = json.Unmarshal([]byte(Body), &user)
if err != nil {
c.String(http.StatusInternalServerError, "Invalid json provided")
return
}
fmt.Println("Body Value is : ", user)
fmt.Println("user email is : ", user.Id)
fmt.Println("user password is : ", user.Password)
loginResult, userRole := AuthenticateUser(user.Id, user.Password)
// fmt.Println("loginResult is : ", loginResult)
// fmt.Println("userRole is : ", userRole)
if loginResult {
accessToken, expire, err := generateAccessToken(user.Id, userRole)
fmt.Println("accessToken is : ", accessToken)
fmt.Println("expire is : ", expire)
fmt.Println("err is : ", err)
// cookieName := "gedgeAuth"
// if cookieName != "" {
// cookie := new(http.Cookie)
// cookie.Name = cookieName
// cookie.Value = accessToken
// cookie.Expires = expire
// c.SetCookie(cookie)
// }
if err != nil {
return c.JSON(http.StatusUnauthorized, err.Error())
}
fmt.Println("token is : ", accessToken)
return c.JSON(http.StatusOK, echo.Map{
"status": 200,
"access-token": accessToken,
"userRole": userRole,
})
}
return c.JSON(http.StatusUnauthorized, false)
}
func generateAccessToken(userid string, userrole string) (string, time.Time, error) {
expirationTime := time.Now().Add(time.Minute * 15)
return generateToken(userid, userrole, expirationTime, []byte(GetJWTSecret()))
}
func generateToken(userid string, userrole string, expirationTime time.Time, secret []byte) (string, time.Time, error) {
claims := &jwtCustomClaims{
Name: userid,
Role: userrole,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime.Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString(secret)
if err != nil {
return "", time.Now(), err
}
return tokenString, expirationTime, nil
}
// func VerifyAccessToken(c echo.Context) (err error) {
// }
| [
"\"SIGNINGKEY\""
] | [] | [
"SIGNINGKEY"
] | [] | ["SIGNINGKEY"] | go | 1 | 0 | |
examples/optimization/layout_opt/hybrid_run.py | """
A prototype application of the distributed cross-entropy method to the wind optimization problem.
In this basic implementation, the number of turbines is fixed and the generative distribution is uncorrelated.
TODO:
+ Add boundary constraints / penalties
+ Add proximity constraints
+ Better order turbine locations
+ Investigate turbine number as an attribute
+ Investigate modeling parameter covariances
+ Investigate other distribution types
+ Investigate parameter transformations
+ Add solar
+ Add storage
+ Add cabling, etc
+ investigate organic approach
"""
import matplotlib as mpl
mpl.use('Agg')
import os
from dotenv import load_dotenv
import numpy as np
from matplotlib.animation import (
PillowWriter,
)
from matplotlib.lines import Line2D
from tools.optimization import (
setup_run,
DataRecorder
)
from hybrid.sites import make_circular_site, make_irregular_site, SiteInfo
from hybrid.log import opt_logger as logger
from hybrid.sites import locations
from hybrid.keys import set_developer_nrel_gov_key
from hybrid.layout.plot_tools import *
from parametrized_optimization_driver import ParametrizedOptimizationDriver
from hybrid_optimization_problem import HybridOptimizationProblem
from hybrid_parametrization import HybridParametrization
np.set_printoptions(precision=2, threshold=10000, linewidth=240)
# Set API key
load_dotenv()
NREL_API_KEY = os.getenv("NREL_API_KEY")
set_developer_nrel_gov_key(NREL_API_KEY) # Set this key manually here if you are not setting it using the .env
def run(default_config: {}) -> None:
config, output_path, run_name = setup_run(default_config)
recorder = DataRecorder.make_data_recorder(output_path)
max_evaluations = config['max_evaluations']
location_index = config['location']
location = locations[location_index]
site = config['site']
site_data = None
if site == 'circular':
site_data = make_circular_site(lat=location[0], lon=location[1], elev=location[2])
elif site == 'irregular':
site_data = make_irregular_site(lat=location[0], lon=location[1], elev=location[2])
else:
raise Exception("Unknown site '" + site + "'")
site_info = SiteInfo(site_data)
inner_problem = HybridOptimizationProblem(site_info, config['num_turbines'], config['solar_capacity'])
problem = HybridParametrization(inner_problem)
optimizer = ParametrizedOptimizationDriver(problem, recorder=recorder, **config['optimizer_config'])
figure = plt.figure(1)
axes = figure.add_subplot(111)
axes.set_aspect('equal')
plt.grid()
plt.tick_params(which='both', labelsize=15)
plt.xlabel('x (m)', fontsize=15)
plt.ylabel('y (m)', fontsize=15)
site_info.plot()
score, evaluation, best_solution = optimizer.central_solution()
score, evaluation = problem.objective(best_solution) if score is None else score
print(-1, ' ', score, evaluation)
print('setup 1')
num_substeps = 1
figure, axes = plt.subplots(dpi=200)
axes.set_aspect(1)
animation_writer = PillowWriter(2 * num_substeps)
animation_writer.setup(figure, os.path.join(output_path, 'trajectory.gif'), dpi=200)
print('setup 2')
_, _, central_solution = optimizer.central_solution()
print('setup 3')
bounds = problem.inner_problem.site_info.polygon.bounds
site_sw_bound = np.array([bounds[0], bounds[1]])
site_ne_bound = np.array([bounds[2], bounds[3]])
site_center = .5 * (site_sw_bound + site_ne_bound)
max_delta = max(bounds[2] - bounds[0], bounds[3] - bounds[1])
reach = (max_delta / 2) * 1.3
min_plot_bound = site_center - reach
max_plot_bound = site_center + reach
print('setup 4')
best_score, best_evaluation, best_solution = 0.0, 0.0, None
def plot_candidate(candidate):
nonlocal best_score, best_evaluation, best_solution
axes.cla()
axes.set(xlim=(min_plot_bound[0], max_plot_bound[0]), ylim=(min_plot_bound[1], max_plot_bound[1]))
wind_color = (153 / 255, 142 / 255, 195 / 255)
solar_color = (241 / 255, 163 / 255, 64 / 255)
central_color = (.5, .5, .5)
conforming_candidate, _, __ = problem.make_conforming_candidate_and_get_penalty(candidate)
problem.plot_candidate(conforming_candidate, figure, axes, central_color, central_color, alpha=.7)
if best_solution is not None:
conforming_best, _, __ = problem.make_conforming_candidate_and_get_penalty(best_solution)
problem.plot_candidate(conforming_best, figure, axes, wind_color, solar_color, alpha=1.0)
axes.set_xlabel('Best Solution AEP: {}'.format(best_evaluation))
else:
axes.set_xlabel('')
axes.legend([
Line2D([0], [0], color=wind_color, lw=8),
Line2D([0], [0], color=solar_color, lw=8),
Line2D([0], [0], color=central_color, lw=8),
],
['Wind Layout', 'Solar Layout', 'Mean Search Vector'],
loc='lower left')
animation_writer.grab_frame()
print('plot candidate')
plot_candidate(central_solution)
central_prev = central_solution
# TODO: make a smooth transition between points
# TODO: plot exclusion zones
print('begin')
while optimizer.num_evaluations() < max_evaluations:
print('step start')
logger.info("Starting step, num evals {}".format(optimizer.num_evaluations()))
optimizer.step()
print('step end')
proportion = min(1.0, optimizer.num_evaluations() / max_evaluations)
g = 1.0 * proportion
b = 1.0 - g
a = .5
color = (b, g, b)
best_score, best_evaluation, best_solution = optimizer.best_solution()
central_score, central_evaluation, central_solution = optimizer.central_solution()
a1 = optimizer.converter.convert_from(central_prev)
b1 = optimizer.converter.convert_from(central_solution)
a = np.array(a1, dtype=np.float64)
b = np.array(b1, dtype=np.float64)
for i in range(num_substeps):
p = (i + 1) / num_substeps
c = (1 - p) * a + p * b
candidate = optimizer.converter.convert_to(c)
plot_candidate(candidate)
central_prev = central_solution
print(optimizer.num_iterations(), ' ', optimizer.num_evaluations(), best_score, best_evaluation)
animation_writer.finish()
optimizer.close()
print("Results and animation written to " + os.path.abspath(output_path))
default_config = {
'name': 't2',
'location': 1,
'site': 'irregular',
'solar_capacity': 50000, # kW
'num_turbines': 50, #
'max_evaluations': 20,
'optimizer_config': {
'method': 'CMA-ES',
'nprocs': 1,
'generation_size': 5,
'selection_proportion': .33,
'prior_scale': 1.0,
'prior_params': {
# "grid_angle": {
# "mu": 0.1
# }
}
}
}
run(default_config)
| [] | [] | [
"NREL_API_KEY"
] | [] | ["NREL_API_KEY"] | python | 1 | 0 | |
enterprise/internal/batches/webhooks/github_test.go | package webhooks
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sourcegraph/sourcegraph/cmd/frontend/webhooks"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/sources"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/store"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/syncer"
ct "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/testing"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/timeutil"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/schema"
)
// Run from integration_test.go
func testGitHubWebhook(db *sql.DB, userID int32) func(*testing.T) {
return func(t *testing.T) {
now := timeutil.Now()
clock := func() time.Time { return now }
ctx := context.Background()
rcache.SetupForTest(t)
ct.TruncateTables(t, db, "changeset_events", "changesets")
cf, save := httptestutil.NewGitHubRecorderFactory(t, *update, "github-webhooks")
defer save()
secret := "secret"
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
token = "no-GITHUB_TOKEN-set"
}
repoStore := database.Repos(db)
esStore := database.ExternalServices(db)
extSvc := &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GitHub",
Config: ct.MarshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: token,
Repos: []string{"sourcegraph/sourcegraph"},
Webhooks: []*schema.GitHubWebhook{{Org: "sourcegraph", Secret: secret}},
}),
}
err := esStore.Upsert(ctx, extSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(extSvc, cf)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph")
if err != nil {
t.Fatal(err)
}
err = repoStore.Create(ctx, githubRepo)
if err != nil {
t.Fatal(err)
}
s := store.NewWithClock(db, clock)
sourcer := sources.NewSourcer(cf)
spec := &btypes.BatchSpec{
NamespaceUserID: userID,
UserID: userID,
}
if err := s.CreateBatchSpec(ctx, spec); err != nil {
t.Fatal(err)
}
batchChange := &btypes.BatchChange{
Name: "Test batch changes",
Description: "Testing THE WEBHOOKS",
InitialApplierID: userID,
NamespaceUserID: userID,
LastApplierID: userID,
LastAppliedAt: clock(),
BatchSpecID: spec.ID,
}
err = s.CreateBatchChange(ctx, batchChange)
if err != nil {
t.Fatal(err)
}
// NOTE: Your sample payload should apply to a PR with the number matching below
changeset := &btypes.Changeset{
RepoID: githubRepo.ID,
ExternalID: "10156",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
BatchChanges: []btypes.BatchChangeAssoc{{BatchChangeID: batchChange.ID}},
}
err = s.CreateChangeset(ctx, changeset)
if err != nil {
t.Fatal(err)
}
// Set up mocks to prevent the diffstat computation from trying to
// use a real gitserver, and so we can control what diff is used to
// create the diffstat.
state := ct.MockChangesetSyncState(&protocol.RepoInfo{
Name: "repo",
VCS: protocol.VCSInfo{URL: "https://example.com/repo/"},
})
defer state.Unmock()
src, err := sourcer.ForRepo(ctx, s, githubRepo)
if err != nil {
t.Fatal(err)
}
err = syncer.SyncChangeset(ctx, s, src, githubRepo, changeset)
if err != nil {
t.Fatal(err)
}
hook := NewGitHubWebhook(s)
fixtureFiles, err := filepath.Glob("testdata/fixtures/webhooks/github/*.json")
if err != nil {
t.Fatal(err)
}
for _, fixtureFile := range fixtureFiles {
_, name := path.Split(fixtureFile)
name = strings.TrimSuffix(name, ".json")
t.Run(name, func(t *testing.T) {
ct.TruncateTables(t, db, "changeset_events")
tc := loadWebhookTestCase(t, fixtureFile)
// Send all events twice to ensure we are idempotent
for i := 0; i < 2; i++ {
for _, event := range tc.Payloads {
handler := webhooks.GitHubWebhook{
ExternalServices: esStore,
}
hook.Register(&handler)
u := extsvc.WebhookURL(extsvc.TypeGitHub, extSvc.ID, "https://example.com/")
req, err := http.NewRequest("POST", u, bytes.NewReader(event.Data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Github-Event", event.PayloadType)
req.Header.Set("X-Hub-Signature", sign(t, event.Data, []byte(secret)))
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
resp := rec.Result()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Non 200 code: %v", resp.StatusCode)
}
}
}
have, _, err := s.ListChangesetEvents(ctx, store.ListChangesetEventsOpts{})
if err != nil {
t.Fatal(err)
}
// Overwrite and format test case
if *update {
tc.ChangesetEvents = have
data, err := json.MarshalIndent(tc, " ", " ")
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(fixtureFile, data, 0666)
if err != nil {
t.Fatal(err)
}
}
opts := []cmp.Option{
cmpopts.IgnoreFields(btypes.ChangesetEvent{}, "CreatedAt"),
cmpopts.IgnoreFields(btypes.ChangesetEvent{}, "UpdatedAt"),
}
if diff := cmp.Diff(tc.ChangesetEvents, have, opts...); diff != "" {
t.Error(diff)
}
})
}
}
}
| [
"\"GITHUB_TOKEN\""
] | [] | [
"GITHUB_TOKEN"
] | [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
example/example.go | package main
import (
"net/http"
"os"
"github.com/davidbanham/recaptcha"
)
var recaptchaClient recaptcha.Client
func main() {
recaptchaClient = recaptcha.New(os.Getenv("RECAPTCHA_SECRET"))
http.HandleFunc("/", ServeForm)
http.HandleFunc("/verified", CheckResponse)
http.ListenAndServe(":8080", nil)
}
func ServeForm(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head>
<title>reCAPTCHA demo: Simple page</title>
<script src="https://www.google.com/recaptcha/api.js" async defer></script>
</head>
<body>
<form action="?" method="POST">
<div class="g-recaptcha" data-sitekey="` + os.Getenv("RECAPTCHA_SITE_KEY") + `"></div>
<br/>
<input type="submit" value="Submit">
</form>
</body>
</html>`))
}
func CheckResponse(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
verified, err := recaptchaClient.Verify(r.FormValue("g-recaptcha-response"))
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
if verified {
w.Write([]byte("yeah!"))
} else {
w.Write([]byte("nah"))
}
}
| [
"\"RECAPTCHA_SECRET\"",
"\"RECAPTCHA_SITE_KEY\""
] | [] | [
"RECAPTCHA_SITE_KEY",
"RECAPTCHA_SECRET"
] | [] | ["RECAPTCHA_SITE_KEY", "RECAPTCHA_SECRET"] | go | 2 | 0 | |
watchmate_v2/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watchmate.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
misc/sync_worlds_toa.py | from selenium import webdriver
import pyodbc
import os
server = os.getenv('SQLCONNSTR_SERVER')
database = os.getenv('SQLCONNSTR_DATABASE')
username = os.getenv('SQLCONNSTR_USERNAME')
password = os.getenv('SQLCONNSTR_PASSWORD')
driver = '{ODBC Driver 17 for SQL Server}'
sqlConn = pyodbc.connect('DRIVER='+driver+';SERVER='+server +
';PORT=1433;DATABASE='+database+';UID='+username+';PWD=' + password+';TDS_VERSION=8.0')
sqlCursor = sqlConn.cursor()
driver = webdriver.Edge()
division = 1
if division == 0:
driver.get("https://theorangealliance.org/events/1819-CMP-HOU1")
else:
driver.get("https://theorangealliance.org/events/1819-CMP-HOU2")
teamEntries = driver.find_elements_by_tag_name("toa-team-item")
for teamEntry in teamEntries:
teamNumber = teamEntry.find_elements_by_tag_name("div")[1].text
teamName = teamEntry.find_elements_by_tag_name("span")[0].text
if "Team #" in teamName:
teamName = "["+teamNumber+"]"
print(teamNumber+", "+teamName)
# exists = False
# if len(sqlCursor.execute("SELECT * FROM Teams WHERE TeamNumber="+str(teamNumber)).fetchall()) > 0:
# exists = True
# if not exists:
# sqlCursor.execute(
# "INSERT Teams (TeamNumber, TeamName) VALUES ("+teamNumber+", "+"'"+teamName+"'"+")")
exists = False
if len(sqlCursor.execute("SELECT * FROM HoustonWorldChampionshipTeams WHERE TeamNumber="+str(teamNumber)).fetchall()) > 0:
exists = True
if not exists:
sqlCursor.execute(
"INSERT HoustonWorldChampionshipTeams (TeamNumber, Division) VALUES ("+teamNumber+", "+str(division)+")")
sqlConn.commit()
| [] | [] | [
"SQLCONNSTR_DATABASE",
"SQLCONNSTR_PASSWORD",
"SQLCONNSTR_SERVER",
"SQLCONNSTR_USERNAME"
] | [] | ["SQLCONNSTR_DATABASE", "SQLCONNSTR_PASSWORD", "SQLCONNSTR_SERVER", "SQLCONNSTR_USERNAME"] | python | 4 | 0 | |
tests/test_functional.py | import errno
from http import client as httplib
import logging
import multiprocessing
import os
import signal
import socket
import string
import subprocess
import sys
import time
import unittest
from waitress import server
from waitress.compat import WIN
from waitress.utilities import cleanup_unix_socket
dn = os.path.dirname
here = dn(__file__)
class NullHandler(logging.Handler): # pragma: no cover
"""A logging handler that swallows all emitted messages."""
def emit(self, record):
pass
def start_server(app, svr, queue, **kwargs): # pragma: no cover
"""Run a fixture application."""
logging.getLogger("waitress").addHandler(NullHandler())
try_register_coverage()
svr(app, queue, **kwargs).run()
def try_register_coverage(): # pragma: no cover
# Hack around multiprocessing exiting early and not triggering coverage's
# atexit handler by always registering a signal handler
if "COVERAGE_PROCESS_START" in os.environ:
def sigterm(*args):
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm)
class FixtureTcpWSGIServer(server.TcpWSGIServer):
"""A version of TcpWSGIServer that relays back what it's bound to."""
family = socket.AF_INET # Testing
def __init__(self, application, queue, **kw): # pragma: no cover
# Coverage doesn't see this as it's ran in a separate process.
kw["host"] = "127.0.0.1"
kw["port"] = 0 # Bind to any available port.
super().__init__(application, **kw)
host, port = self.socket.getsockname()
if os.name == "nt":
host = "127.0.0.1"
queue.put((host, port))
class SubprocessTests:
exe = sys.executable
server = None
def start_subprocess(self, target, **kw):
# Spawn a server process.
self.queue = multiprocessing.Queue()
if "COVERAGE_RCFILE" in os.environ:
os.environ["COVERAGE_PROCESS_START"] = os.environ["COVERAGE_RCFILE"]
if not WIN:
ctx = multiprocessing.get_context("fork")
else:
ctx = multiprocessing.get_context("spawn")
self.proc = ctx.Process(
target=start_server,
args=(target, self.server, self.queue),
kwargs=kw,
)
self.proc.start()
if self.proc.exitcode is not None: # pragma: no cover
raise RuntimeError("%s didn't start" % str(target))
# Get the socket the server is listening on.
self.bound_to = self.queue.get(timeout=5)
self.sock = self.create_socket()
def stop_subprocess(self):
if self.proc.exitcode is None:
self.proc.terminate()
self.sock.close()
# This give us one FD back ...
self.proc.join()
self.proc.close()
self.queue.close()
self.queue.join_thread()
# The following is for the benefit of PyPy 3, for some reason it is
# holding on to some resources way longer than necessary causing tests
# to fail with file desctriptor exceeded errors on macOS which defaults
# to 256 file desctriptors per process. While we could use ulimit to
# increase the limits before running tests, this works as well and
# means we don't need to remember to do that.
import gc
gc.collect()
def assertline(self, line, status, reason, version):
v, s, r = (x.strip() for x in line.split(None, 2))
self.assertEqual(s, status.encode("latin-1"))
self.assertEqual(r, reason.encode("latin-1"))
self.assertEqual(v, version.encode("latin-1"))
def create_socket(self):
return socket.socket(self.server.family, socket.SOCK_STREAM)
def connect(self):
self.sock.connect(self.bound_to)
def make_http_connection(self):
raise NotImplementedError # pragma: no cover
def send_check_error(self, to_send):
self.sock.send(to_send)
class TcpTests(SubprocessTests):
server = FixtureTcpWSGIServer
def make_http_connection(self):
return httplib.HTTPConnection(*self.bound_to)
class SleepyThreadTests(TcpTests, unittest.TestCase):
# test that sleepy thread doesnt block other requests
def setUp(self):
from tests.fixtureapps import sleepy
self.start_subprocess(sleepy.app)
def tearDown(self):
self.stop_subprocess()
def test_it(self):
getline = os.path.join(here, "fixtureapps", "getline.py")
cmds = (
[self.exe, getline, "http://%s:%d/sleepy" % self.bound_to],
[self.exe, getline, "http://%s:%d/" % self.bound_to],
)
r, w = os.pipe()
procs = []
for cmd in cmds:
procs.append(subprocess.Popen(cmd, stdout=w))
time.sleep(3)
for proc in procs:
if proc.returncode is not None: # pragma: no cover
proc.terminate()
proc.wait()
# the notsleepy response should always be first returned (it sleeps
# for 2 seconds, then returns; the notsleepy response should be
# processed in the meantime)
result = os.read(r, 10000)
os.close(r)
os.close(w)
self.assertEqual(result, b"notsleepy returnedsleepy returned")
class EchoTests:
def setUp(self):
from tests.fixtureapps import echo
self.start_subprocess(
echo.app,
trusted_proxy="*",
trusted_proxy_count=1,
trusted_proxy_headers={"x-forwarded-for", "x-forwarded-proto"},
clear_untrusted_proxy_headers=True,
)
def tearDown(self):
self.stop_subprocess()
def _read_echo(self, fp):
from tests.fixtureapps import echo
line, headers, body = read_http(fp)
return line, headers, echo.parse_response(body)
def test_date_and_server(self):
to_send = b"GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(headers.get("server"), "waitress")
self.assertTrue(headers.get("date"))
def test_bad_host_header(self):
# https://corte.si/posts/code/pathod/pythonservers/index.html
to_send = b"GET / HTTP/1.0\r\n Host: 0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "400", "Bad Request", "HTTP/1.0")
self.assertEqual(headers.get("server"), "waitress")
self.assertTrue(headers.get("date"))
def test_send_with_body(self):
to_send = b"GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
to_send += b"hello"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(echo.content_length, "5")
self.assertEqual(echo.body, b"hello")
def test_send_empty_body(self):
to_send = b"GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(echo.content_length, "0")
self.assertEqual(echo.body, b"")
def test_multiple_requests_with_body(self):
orig_sock = self.sock
for x in range(3):
self.sock = self.create_socket()
self.test_send_with_body()
self.sock.close()
self.sock = orig_sock
def test_multiple_requests_without_body(self):
orig_sock = self.sock
for x in range(3):
self.sock = self.create_socket()
self.test_send_empty_body()
self.sock.close()
self.sock = orig_sock
def test_without_crlf(self):
data = b"Echo\r\nthis\r\nplease"
s = (
b"GET / HTTP/1.0\r\n"
b"Connection: close\r\n"
b"Content-Length: %d\r\n"
b"\r\n"
b"%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(int(echo.content_length), len(data))
self.assertEqual(len(echo.body), len(data))
self.assertEqual(echo.body, (data))
def test_large_body(self):
# 1024 characters.
body = b"This string has 32 characters.\r\n" * 32
s = b"GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(body), body)
self.connect()
self.sock.send(s)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(echo.content_length, "1024")
self.assertEqual(echo.body, body)
def test_many_clients(self):
conns = []
for n in range(50):
h = self.make_http_connection()
h.request("GET", "/", headers={"Accept": "text/plain"})
conns.append(h)
responses = []
for h in conns:
response = h.getresponse()
self.assertEqual(response.status, 200)
responses.append(response)
for response in responses:
response.read()
for h in conns:
h.close()
def test_chunking_request_without_content(self):
header = b"GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
self.connect()
self.sock.send(header)
self.sock.send(b"0\r\n\r\n")
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
self.assertEqual(echo.body, b"")
self.assertEqual(echo.content_length, "0")
self.assertFalse("transfer-encoding" in headers)
def test_chunking_request_with_content(self):
control_line = b"20;\r\n" # 20 hex = 32 dec
s = b"This string has 32 characters.\r\n"
expected = s * 12
header = b"GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
self.connect()
self.sock.send(header)
with self.sock.makefile("rb", 0) as fp:
for n in range(12):
self.sock.send(control_line)
self.sock.send(s)
self.sock.send(b"\r\n") # End the chunk
self.sock.send(b"0\r\n\r\n")
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
self.assertEqual(echo.body, expected)
self.assertEqual(echo.content_length, str(len(expected)))
self.assertFalse("transfer-encoding" in headers)
def test_broken_chunked_encoding(self):
control_line = b"20;\r\n" # 20 hex = 32 dec
s = b"This string has 32 characters.\r\n"
to_send = b"GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
to_send += control_line + s + b"\r\n"
# garbage in input
to_send += b"garbage\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# receiver caught garbage and turned it into a 400
self.assertline(line, "400", "Bad Request", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
self.assertEqual(headers["content-type"], "text/plain")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_broken_chunked_encoding_missing_chunk_end(self):
control_line = b"20;\r\n" # 20 hex = 32 dec
s = b"This string has 32 characters.\r\n"
to_send = b"GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
to_send += control_line + s
# garbage in input
to_send += b"garbage"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# receiver caught garbage and turned it into a 400
self.assertline(line, "400", "Bad Request", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(b"Chunk not properly terminated" in response_body)
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
self.assertEqual(headers["content-type"], "text/plain")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_keepalive_http_10(self):
# Handling of Keep-Alive within HTTP 1.0
data = b"Default: Don't keep me alive"
s = b"GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data)
self.connect()
self.sock.send(s)
response = httplib.HTTPResponse(self.sock)
response.begin()
self.assertEqual(int(response.status), 200)
connection = response.getheader("Connection", "")
# We sent no Connection: Keep-Alive header
# Connection: close (or no header) is default.
self.assertTrue(connection != "Keep-Alive")
def test_keepalive_http10_explicit(self):
# If header Connection: Keep-Alive is explicitly sent,
# we want to keept the connection open, we also need to return
# the corresponding header
data = b"Keep me alive"
s = (
b"GET / HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: %d\r\n"
b"\r\n"
b"%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
response = httplib.HTTPResponse(self.sock)
response.begin()
self.assertEqual(int(response.status), 200)
connection = response.getheader("Connection", "")
self.assertEqual(connection, "Keep-Alive")
def test_keepalive_http_11(self):
# Handling of Keep-Alive within HTTP 1.1
# All connections are kept alive, unless stated otherwise
data = b"Default: Keep me alive"
s = b"GET / HTTP/1.1\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data)
self.connect()
self.sock.send(s)
response = httplib.HTTPResponse(self.sock)
response.begin()
self.assertEqual(int(response.status), 200)
self.assertTrue(response.getheader("connection") != "close")
def test_keepalive_http11_explicit(self):
# Explicitly set keep-alive
data = b"Default: Keep me alive"
s = (
b"GET / HTTP/1.1\r\n"
b"Connection: keep-alive\r\n"
b"Content-Length: %d\r\n"
b"\r\n"
b"%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
response = httplib.HTTPResponse(self.sock)
response.begin()
self.assertEqual(int(response.status), 200)
self.assertTrue(response.getheader("connection") != "close")
def test_keepalive_http11_connclose(self):
# specifying Connection: close explicitly
data = b"Don't keep me alive"
s = (
b"GET / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Content-Length: %d\r\n"
b"\r\n"
b"%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
response = httplib.HTTPResponse(self.sock)
response.begin()
self.assertEqual(int(response.status), 200)
self.assertEqual(response.getheader("connection"), "close")
def test_proxy_headers(self):
to_send = (
b"GET / HTTP/1.0\r\n"
b"Content-Length: 0\r\n"
b"Host: www.google.com:8080\r\n"
b"X-Forwarded-For: 192.168.1.1\r\n"
b"X-Forwarded-Proto: https\r\n"
b"X-Forwarded-Port: 5000\r\n\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(headers.get("server"), "waitress")
self.assertTrue(headers.get("date"))
self.assertIsNone(echo.headers.get("X_FORWARDED_PORT"))
self.assertEqual(echo.headers["HOST"], "www.google.com:8080")
self.assertEqual(echo.scheme, "https")
self.assertEqual(echo.remote_addr, "192.168.1.1")
self.assertEqual(echo.remote_host, "192.168.1.1")
class PipeliningTests:
def setUp(self):
from tests.fixtureapps import echo
self.start_subprocess(echo.app_body_only)
def tearDown(self):
self.stop_subprocess()
def test_pipelining(self):
s = (
b"GET / HTTP/1.0\r\n"
b"Connection: %s\r\n"
b"Content-Length: %d\r\n"
b"\r\n"
b"%s"
)
to_send = b""
count = 25
for n in range(count):
body = b"Response #%d\r\n" % (n + 1)
if n + 1 < count:
conn = b"keep-alive"
else:
conn = b"close"
to_send += s % (conn, len(body), body)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
for n in range(count):
expect_body = b"Response #%d\r\n" % (n + 1)
line = fp.readline() # status line
version, status, reason = (x.strip() for x in line.split(None, 2))
headers = parse_headers(fp)
length = int(headers.get("content-length")) or None
response_body = fp.read(length)
self.assertEqual(int(status), 200)
self.assertEqual(length, len(response_body))
self.assertEqual(response_body, expect_body)
class ExpectContinueTests:
def setUp(self):
from tests.fixtureapps import echo
self.start_subprocess(echo.app_body_only)
def tearDown(self):
self.stop_subprocess()
def test_expect_continue(self):
# specifying Connection: close explicitly
data = b"I have expectations"
to_send = (
b"GET / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Content-Length: %d\r\n"
b"Expect: 100-continue\r\n"
b"\r\n"
b"%s" % (len(data), data)
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line = fp.readline() # continue status line
version, status, reason = (x.strip() for x in line.split(None, 2))
self.assertEqual(int(status), 100)
self.assertEqual(reason, b"Continue")
self.assertEqual(version, b"HTTP/1.1")
fp.readline() # blank line
line = fp.readline() # next status line
version, status, reason = (x.strip() for x in line.split(None, 2))
headers = parse_headers(fp)
length = int(headers.get("content-length")) or None
response_body = fp.read(length)
self.assertEqual(int(status), 200)
self.assertEqual(length, len(response_body))
self.assertEqual(response_body, data)
class BadContentLengthTests:
def setUp(self):
from tests.fixtureapps import badcl
self.start_subprocess(badcl.app)
def tearDown(self):
self.stop_subprocess()
def test_short_body(self):
# check to see if server closes connection when body is too short
# for cl header
to_send = (
b"GET /short_body HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line = fp.readline() # status line
version, status, reason = (x.strip() for x in line.split(None, 2))
headers = parse_headers(fp)
content_length = int(headers.get("content-length"))
response_body = fp.read(content_length)
self.assertEqual(int(status), 200)
self.assertNotEqual(content_length, len(response_body))
self.assertEqual(len(response_body), content_length - 1)
self.assertEqual(response_body, b"abcdefghi")
# remote closed connection (despite keepalive header); not sure why
# first send succeeds
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_long_body(self):
# check server doesnt close connection when body is too short
# for cl header
to_send = (
b"GET /long_body HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line = fp.readline() # status line
version, status, reason = (x.strip() for x in line.split(None, 2))
headers = parse_headers(fp)
content_length = int(headers.get("content-length")) or None
response_body = fp.read(content_length)
self.assertEqual(int(status), 200)
self.assertEqual(content_length, len(response_body))
self.assertEqual(response_body, b"abcdefgh")
# remote does not close connection (keepalive header)
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line = fp.readline() # status line
version, status, reason = (x.strip() for x in line.split(None, 2))
headers = parse_headers(fp)
content_length = int(headers.get("content-length")) or None
response_body = fp.read(content_length)
self.assertEqual(int(status), 200)
class NoContentLengthTests:
def setUp(self):
from tests.fixtureapps import nocl
self.start_subprocess(nocl.app)
def tearDown(self):
self.stop_subprocess()
def test_http10_generator(self):
body = string.ascii_letters.encode("latin-1")
to_send = (
b"GET / HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(headers.get("content-length"), None)
self.assertEqual(headers.get("connection"), "close")
self.assertEqual(response_body, body)
# remote closed connection (despite keepalive header), because
# generators cannot have a content-length divined
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_http10_list(self):
body = string.ascii_letters.encode("latin-1")
to_send = (
b"GET /list HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(headers["content-length"], str(len(body)))
self.assertEqual(headers.get("connection"), "Keep-Alive")
self.assertEqual(response_body, body)
# remote keeps connection open because it divined the content length
# from a length-1 list
self.sock.send(to_send)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
def test_http10_listlentwo(self):
body = string.ascii_letters.encode("latin-1")
to_send = (
b"GET /list_lentwo HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(headers.get("content-length"), None)
self.assertEqual(headers.get("connection"), "close")
self.assertEqual(response_body, body)
# remote closed connection (despite keepalive header), because
# lists of length > 1 cannot have their content length divined
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_http11_generator(self):
body = string.ascii_letters
body = body.encode("latin-1")
to_send = b"GET / HTTP/1.1\r\nContent-Length: %d\r\n\r\n" % len(body)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
expected = b""
for chunk in chunks(body, 10):
expected += b"%s\r\n%s\r\n" % (
hex(len(chunk))[2:].upper().encode("latin-1"),
chunk,
)
expected += b"0\r\n\r\n"
self.assertEqual(response_body, expected)
# connection is always closed at the end of a chunked response
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_http11_list(self):
body = string.ascii_letters.encode("latin-1")
to_send = b"GET /list HTTP/1.1\r\nContent-Length: %d\r\n\r\n" % len(body)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
self.assertEqual(headers["content-length"], str(len(body)))
self.assertEqual(response_body, body)
# remote keeps connection open because it divined the content length
# from a length-1 list
self.sock.send(to_send)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
def test_http11_listlentwo(self):
body = string.ascii_letters.encode("latin-1")
to_send = b"GET /list_lentwo HTTP/1.1\r\nContent-Length: %d\r\n\r\n" % len(body)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
expected = b""
for chunk in (body[:1], body[1:]):
expected += b"%s\r\n%s\r\n" % (
(hex(len(chunk))[2:].upper().encode("latin-1")),
chunk,
)
expected += b"0\r\n\r\n"
self.assertEqual(response_body, expected)
# connection is always closed at the end of a chunked response
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
class WriteCallbackTests:
def setUp(self):
from tests.fixtureapps import writecb
self.start_subprocess(writecb.app)
def tearDown(self):
self.stop_subprocess()
def test_short_body(self):
# check to see if server closes connection when body is too short
# for cl header
to_send = (
b"GET /short_body HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# server trusts the content-length header (5)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, 9)
self.assertNotEqual(cl, len(response_body))
self.assertEqual(len(response_body), cl - 1)
self.assertEqual(response_body, b"abcdefgh")
# remote closed connection (despite keepalive header)
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_long_body(self):
# check server doesnt close connection when body is too long
# for cl header
to_send = (
b"GET /long_body HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
content_length = int(headers.get("content-length")) or None
self.assertEqual(content_length, 9)
self.assertEqual(content_length, len(response_body))
self.assertEqual(response_body, b"abcdefghi")
# remote does not close connection (keepalive header)
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
def test_equal_body(self):
# check server doesnt close connection when body is equal to
# cl header
to_send = (
b"GET /equal_body HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
content_length = int(headers.get("content-length")) or None
self.assertEqual(content_length, 9)
self.assertline(line, "200", "OK", "HTTP/1.0")
self.assertEqual(content_length, len(response_body))
self.assertEqual(response_body, b"abcdefghi")
# remote does not close connection (keepalive header)
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
def test_no_content_length(self):
# wtf happens when there's no content-length
to_send = (
b"GET /no_content_length HTTP/1.0\r\n"
b"Connection: Keep-Alive\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line = fp.readline() # status line
line, headers, response_body = read_http(fp)
content_length = headers.get("content-length")
self.assertEqual(content_length, None)
self.assertEqual(response_body, b"abcdefghi")
# remote closed connection (despite keepalive header)
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
class TooLargeTests:
toobig = 1050
def setUp(self):
from tests.fixtureapps import toolarge
self.start_subprocess(
toolarge.app, max_request_header_size=1000, max_request_body_size=1000
)
def tearDown(self):
self.stop_subprocess()
def test_request_headers_too_large_http11(self):
body = b""
bad_headers = b"X-Random-Header: 100\r\n" * int(self.toobig / 20)
to_send = b"GET / HTTP/1.1\r\nContent-Length: 0\r\n"
to_send += bad_headers
to_send += b"\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
response_line, headers, response_body = read_http(fp)
self.assertline(
response_line, "431", "Request Header Fields Too Large", "HTTP/1.0"
)
self.assertEqual(headers["connection"], "close")
def test_request_body_too_large_with_wrong_cl_http10(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
# first request succeeds (content-length 5)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# server trusts the content-length header; no pipelining,
# so request fulfilled, extra bytes are thrown away
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_wrong_cl_http10_keepalive(self):
body = b"a" * self.toobig
to_send = (
b"GET / HTTP/1.0\r\nContent-Length: 5\r\nConnection: Keep-Alive\r\n\r\n"
)
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
# first request succeeds (content-length 5)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
line, headers, response_body = read_http(fp)
self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_no_cl_http10(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.0\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# extra bytes are thrown away (no pipelining), connection closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_no_cl_http10_keepalive(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# server trusts the content-length header (assumed zero)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
line, headers, response_body = read_http(fp)
# next response overruns because the extra data appears to be
# header data
self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_wrong_cl_http11(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.1\r\nContent-Length: 5\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
# first request succeeds (content-length 5)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# second response is an error response
line, headers, response_body = read_http(fp)
self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_wrong_cl_http11_connclose(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.1\r\nContent-Length: 5\r\nConnection: close\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# server trusts the content-length header (5)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_no_cl_http11(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.1\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb") as fp:
# server trusts the content-length header (assumed 0)
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# server assumes pipelined requests due to http/1.1, and the first
# request was assumed c-l 0 because it had no content-length header,
# so entire body looks like the header of the subsequent request
# second response is an error response
line, headers, response_body = read_http(fp)
self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_with_no_cl_http11_connclose(self):
body = b"a" * self.toobig
to_send = b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n"
to_send += body
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# server trusts the content-length header (assumed 0)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_request_body_too_large_chunked_encoding(self):
control_line = b"20;\r\n" # 20 hex = 32 dec
s = b"This string has 32 characters.\r\n"
to_send = b"GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
repeat = control_line + s
to_send += repeat * ((self.toobig // len(repeat)) + 1)
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
# body bytes counter caught a max_request_body_size overrun
self.assertline(line, "413", "Request Entity Too Large", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertEqual(headers["content-type"], "text/plain")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
class InternalServerErrorTests:
def setUp(self):
from tests.fixtureapps import error
self.start_subprocess(error.app, expose_tracebacks=True)
def tearDown(self):
self.stop_subprocess()
def test_before_start_response_http_10(self):
to_send = b"GET /before_start_response HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(headers["connection"], "close")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_before_start_response_http_11(self):
to_send = b"GET /before_start_response HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_before_start_response_http_11_close(self):
to_send = b"GET /before_start_response HTTP/1.1\r\nConnection: close\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
self.assertEqual(headers["connection"], "close")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_start_response_http10(self):
to_send = b"GET /after_start_response HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
self.assertEqual(headers["connection"], "close")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_start_response_http11(self):
to_send = b"GET /after_start_response HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_start_response_http11_close(self):
to_send = b"GET /after_start_response HTTP/1.1\r\nConnection: close\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "500", "Internal Server Error", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
sorted(headers.keys()),
["connection", "content-length", "content-type", "date", "server"],
)
self.assertEqual(headers["connection"], "close")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_write_cb(self):
to_send = b"GET /after_write_cb HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
self.assertEqual(response_body, b"")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_in_generator(self):
to_send = b"GET /in_generator HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
self.assertEqual(response_body, b"")
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
class FileWrapperTests:
def setUp(self):
from tests.fixtureapps import filewrapper
self.start_subprocess(filewrapper.app)
def tearDown(self):
self.stop_subprocess()
def test_filelike_http11(self):
to_send = b"GET /filelike HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has not been closed
def test_filelike_nocl_http11(self):
to_send = b"GET /filelike_nocl HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has not been closed
def test_filelike_shortcl_http11(self):
to_send = b"GET /filelike_shortcl HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, 1)
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377" in response_body)
# connection has not been closed
def test_filelike_longcl_http11(self):
to_send = b"GET /filelike_longcl HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has not been closed
def test_notfilelike_http11(self):
to_send = b"GET /notfilelike HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has not been closed
def test_notfilelike_iobase_http11(self):
to_send = b"GET /notfilelike_iobase HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has not been closed
def test_notfilelike_nocl_http11(self):
to_send = b"GET /notfilelike_nocl HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed (no content-length)
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_shortcl_http11(self):
to_send = b"GET /notfilelike_shortcl HTTP/1.1\r\n\r\n"
self.connect()
for t in range(0, 2):
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, 1)
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377" in response_body)
# connection has not been closed
def test_notfilelike_longcl_http11(self):
to_send = b"GET /notfilelike_longcl HTTP/1.1\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body) + 10)
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_filelike_http10(self):
to_send = b"GET /filelike HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_filelike_nocl_http10(self):
to_send = b"GET /filelike_nocl HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_http10(self):
to_send = b"GET /notfilelike HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_nocl_http10(self):
to_send = b"GET /notfilelike_nocl HTTP/1.0\r\n\r\n"
self.connect()
self.sock.send(to_send)
with self.sock.makefile("rb", 0) as fp:
line, headers, response_body = read_http(fp)
self.assertline(line, "200", "OK", "HTTP/1.0")
ct = headers["content-type"]
self.assertEqual(ct, "image/jpeg")
self.assertTrue(b"\377\330\377" in response_body)
# connection has been closed (no content-length)
self.send_check_error(to_send)
self.assertRaises(ConnectionClosed, read_http, fp)
class TcpEchoTests(EchoTests, TcpTests, unittest.TestCase):
pass
class TcpPipeliningTests(PipeliningTests, TcpTests, unittest.TestCase):
pass
class TcpExpectContinueTests(ExpectContinueTests, TcpTests, unittest.TestCase):
pass
class TcpBadContentLengthTests(BadContentLengthTests, TcpTests, unittest.TestCase):
pass
class TcpNoContentLengthTests(NoContentLengthTests, TcpTests, unittest.TestCase):
pass
class TcpWriteCallbackTests(WriteCallbackTests, TcpTests, unittest.TestCase):
pass
class TcpTooLargeTests(TooLargeTests, TcpTests, unittest.TestCase):
pass
class TcpInternalServerErrorTests(
InternalServerErrorTests, TcpTests, unittest.TestCase
):
pass
class TcpFileWrapperTests(FileWrapperTests, TcpTests, unittest.TestCase):
pass
if hasattr(socket, "AF_UNIX"):
class FixtureUnixWSGIServer(server.UnixWSGIServer):
"""A version of UnixWSGIServer that relays back what it's bound to."""
family = socket.AF_UNIX # Testing
def __init__(self, application, queue, **kw): # pragma: no cover
# Coverage doesn't see this as it's ran in a separate process.
# To permit parallel testing, use a PID-dependent socket.
kw["unix_socket"] = "/tmp/waitress.test-%d.sock" % os.getpid()
super().__init__(application, **kw)
queue.put(self.socket.getsockname())
class UnixTests(SubprocessTests):
server = FixtureUnixWSGIServer
def make_http_connection(self):
return UnixHTTPConnection(self.bound_to)
def stop_subprocess(self):
super().stop_subprocess()
cleanup_unix_socket(self.bound_to)
def send_check_error(self, to_send):
# Unlike inet domain sockets, Unix domain sockets can trigger a
# 'Broken pipe' error when the socket it closed.
try:
self.sock.send(to_send)
except OSError as exc:
valid_errors = {errno.EPIPE, errno.ENOTCONN}
self.assertIn(get_errno(exc), valid_errors)
class UnixEchoTests(EchoTests, UnixTests, unittest.TestCase):
pass
class UnixPipeliningTests(PipeliningTests, UnixTests, unittest.TestCase):
pass
class UnixExpectContinueTests(ExpectContinueTests, UnixTests, unittest.TestCase):
pass
class UnixBadContentLengthTests(
BadContentLengthTests, UnixTests, unittest.TestCase
):
pass
class UnixNoContentLengthTests(NoContentLengthTests, UnixTests, unittest.TestCase):
pass
class UnixWriteCallbackTests(WriteCallbackTests, UnixTests, unittest.TestCase):
pass
class UnixTooLargeTests(TooLargeTests, UnixTests, unittest.TestCase):
pass
class UnixInternalServerErrorTests(
InternalServerErrorTests, UnixTests, unittest.TestCase
):
pass
class UnixFileWrapperTests(FileWrapperTests, UnixTests, unittest.TestCase):
pass
def parse_headers(fp):
"""Parses only RFC2822 headers from a file pointer."""
headers = {}
while True:
line = fp.readline()
if line in (b"\r\n", b"\n", b""):
break
line = line.decode("iso-8859-1")
name, value = line.strip().split(":", 1)
headers[name.lower().strip()] = value.lower().strip()
return headers
class UnixHTTPConnection(httplib.HTTPConnection):
"""Patched version of HTTPConnection that uses Unix domain sockets."""
def __init__(self, path):
httplib.HTTPConnection.__init__(self, "localhost")
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
def close(self):
self.sock.close()
class ConnectionClosed(Exception):
pass
# stolen from gevent
def read_http(fp): # pragma: no cover
try:
response_line = fp.readline()
except OSError as exc:
fp.close()
# errno 104 is ENOTRECOVERABLE, In WinSock 10054 is ECONNRESET
if get_errno(exc) in (errno.ECONNABORTED, errno.ECONNRESET, 104, 10054):
raise ConnectionClosed
raise
if not response_line:
raise ConnectionClosed
header_lines = []
while True:
line = fp.readline()
if line in (b"\r\n", b"\r\n", b""):
break
else:
header_lines.append(line)
headers = dict()
for x in header_lines:
x = x.strip()
if not x:
continue
key, value = x.split(b": ", 1)
key = key.decode("iso-8859-1").lower()
value = value.decode("iso-8859-1")
assert key not in headers, "%s header duplicated" % key
headers[key] = value
if "content-length" in headers:
num = int(headers["content-length"])
body = b""
left = num
while left > 0:
data = fp.read(left)
if not data:
break
body += data
left -= len(data)
else:
# read until EOF
body = fp.read()
return response_line, headers, body
# stolen from gevent
def get_errno(exc): # pragma: no cover
"""Get the error code out of socket.error objects.
socket.error in <2.5 does not have errno attribute
socket.error in 3.x does not allow indexing access
e.args[0] works for all.
There are cases when args[0] is not errno.
i.e. http://bugs.python.org/issue6471
Maybe there are cases when errno is set, but it is not the first argument?
"""
try:
if exc.errno is not None:
return exc.errno
except AttributeError:
pass
try:
return exc.args[0]
except IndexError:
return None
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
| [] | [] | [
"COVERAGE_PROCESS_START",
"COVERAGE_RCFILE"
] | [] | ["COVERAGE_PROCESS_START", "COVERAGE_RCFILE"] | python | 2 | 0 | |
python/pyarrow/tests/test_flight.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import os
import struct
import tempfile
import threading
import time
import traceback
import pytest
import pyarrow as pa
from pyarrow.compat import tobytes
from pyarrow.util import pathlib, find_free_port
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def __init__(self, location=None, **kwargs):
super(ConstantFlightServer, self).__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table))
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super(EchoFlightServer, self).__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return iter([flight.Result(context.peer_identity())])
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
for action in self.expected_actions():
yield action
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super(CheckTicketFlightServer, self).__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return iter([])
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super(HttpBasicServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super(HttpBasicClientAuthHandler, self).__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super(TokenServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super(TokenClientAuthHandler, self).__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return iter([flight.Result(middleware.special_value.encode())])
return iter([flight.Result("".encode())])
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("TypeError: Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return iter(self.simple_action_results)
elif action.type == 'echo':
return iter([action.body])
elif action.type == 'bad-action':
return iter(['foo'])
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="TypeError: a bytes-like object is required"):
list(client.do_action('bad-action'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
| [] | [] | [
"ARROW_TEST_DATA"
] | [] | ["ARROW_TEST_DATA"] | python | 1 | 0 | |
providers/azuread/azuread_provider.go | // Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azuread
import (
"errors"
"log"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
)
type AzureADProvider struct { //nolint
terraformutils.Provider
tenantID string
clientID string
clientSecret string
}
func (p *AzureADProvider) setEnvConfig() error {
tenantID := os.Getenv("ARM_TENANT_ID")
if tenantID == "" {
return errors.New("please set ARM_TENANT_ID in your environment")
}
clientID := os.Getenv("ARM_CLIENT_ID")
if clientID == "" {
return errors.New("please set ARM_CLIENT_ID in your environment")
}
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
if clientSecret == "" {
return errors.New("please set ARM_CLIENT_SECRET in your environment")
}
p.tenantID = tenantID
p.clientID = clientID
p.clientSecret = clientSecret
return nil
}
func (p *AzureADProvider) Init(args []string) error {
err := p.setEnvConfig()
if err != nil {
log.Println(err.Error())
return err
}
return nil
}
func (p *AzureADProvider) GetName() string {
return "azuread"
}
func (p *AzureADProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{}
}
func (AzureADProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{}
}
func (p *AzureADProvider) GetSupportedService() map[string]terraformutils.ServiceGenerator {
return map[string]terraformutils.ServiceGenerator{
"user": &UserServiceGenerator{},
"application": &ApplicationServiceGenerator{},
"group": &GroupServiceGenerator{},
"service_principal": &ServicePrincipalServiceGenerator{},
"app_role_assignment": &AppRoleAssignmentServiceGenerator{},
}
}
func (p *AzureADProvider) InitService(serviceName string, verbose bool) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New("azuread: " + serviceName + " not supported service")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetVerbose(verbose)
p.Service.SetProviderName(p.GetName())
p.Service.SetArgs(map[string]interface{}{
"tenant_id": p.tenantID,
"client_id": p.clientID,
"client_secret": p.clientSecret,
})
return nil
}
| [
"\"ARM_TENANT_ID\"",
"\"ARM_CLIENT_ID\"",
"\"ARM_CLIENT_SECRET\""
] | [] | [
"ARM_CLIENT_ID",
"ARM_CLIENT_SECRET",
"ARM_TENANT_ID"
] | [] | ["ARM_CLIENT_ID", "ARM_CLIENT_SECRET", "ARM_TENANT_ID"] | go | 3 | 0 | |
setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="seafarer",
version="0.0.1",
author="Kairsten Fay",
author_email="[email protected]",
description="An additional layer of abstraction for seaborn and matplotlib plots",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kairstenfay/seafarer",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | [] | [] | [] | [] | [] | python | null | null | null |
rcon/extended_commands.py | import random
import os
import re
from datetime import datetime, timedelta
import logging
import socket
from rcon.cache_utils import ttl_cache, invalidates, get_redis_client
from rcon.commands import HLLServerError, ServerCtl, CommandFailedError
from rcon.steam_utils import get_player_country_code, get_player_has_bans
STEAMID = "steam_id_64"
NAME = "name"
ROLE = "role"
logger = logging.getLogger(__name__)
class Rcon(ServerCtl):
settings = (
("team_switch_cooldown", int),
("autobalance_threshold", int),
("idle_autokick_time", int),
("max_ping_autokick", int),
("queue_length", int),
("vip_slots_num", int),
("autobalance_enabled", bool),
("votekick_enabled", bool),
("votekick_threshold", str),
)
slots_regexp = re.compile(r"^\d{1,3}/\d{2,3}$")
map_regexp = re.compile(r"^(\w+_?)+$")
chat_regexp = re.compile(
r"CHAT\[((Team)|(Unit))\]\[(.*)\(((Allies)|(Axis))/(\d+)\)\]: (.*)"
)
player_info_pattern = r"(.*)\(((Allies)|(Axis))/(\d+)\)"
player_info_regexp = re.compile(r"(.*)\(((Allies)|(Axis))/(\d+)\)")
MAX_SERV_NAME_LEN = 1024 # I totally made up that number. Unable to test
log_time_regexp = re.compile(".*\((\d+)\).*")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_playerids(self, as_dict=False):
raw_list = super().get_playerids()
player_list = []
player_dict = {}
for playerinfo in raw_list:
name, steamid = playerinfo.rsplit(":", 1)
name = name[:-1]
steamid = steamid[1:]
player_dict[name] = steamid
player_list.append((name, steamid))
return player_dict if as_dict else player_list
def get_vips_count(self):
players = self.get_playerids()
vips = {v["steam_id_64"] for v in self.get_vip_ids()}
vip_count = 0
for _, steamid in players:
if steamid in vips:
vip_count += 1
return vip_count
@ttl_cache(ttl=60 * 60 * 24, cache_falsy=False)
def get_player_info(self, player):
try:
try:
raw = super().get_player_info(player)
name, steam_id_64 = raw.split("\n")
except CommandFailedError:
name = player
steam_id_64 = self.get_playerids(as_dict=True).get(name)
if not steam_id_64:
return {}
country = get_player_country_code(steam_id_64)
steam_bans = get_player_has_bans(steam_id_64)
except (CommandFailedError, ValueError):
# Making that debug instead of exception as it's way to spammy
logger.exception("Can't get player info for %s", player)
# logger.exception("Can't get player info for %s", player)
return {}
name = name.split(": ", 1)[-1]
steam_id = steam_id_64.split(": ", 1)[-1]
if name != player:
logger.error(
"get_player_info('%s') returned for a different name: %s %s",
player,
name,
steam_id,
)
return {}
return {
NAME: name,
STEAMID: steam_id,
"country": country,
"steam_bans": steam_bans,
}
@ttl_cache(ttl=60 * 60 * 24)
def get_admin_ids(self):
res = super().get_admin_ids()
admins = []
for item in res:
steam_id_64, role, name = item.split(" ", 2)
admins.append({STEAMID: steam_id_64, NAME: name[1:-1], ROLE: role})
return admins
def get_online_console_admins(self):
admins = self.get_admin_ids()
players = self.get_players()
online = []
admins_ids = set(a["steam_id_64"] for a in admins)
for player in players:
if player["steam_id_64"] in admins_ids:
online.append(player["name"])
return online
def do_add_admin(self, steam_id_64, role, name):
with invalidates(Rcon.get_admin_ids):
return super().do_add_admin(steam_id_64, role, name)
def do_remove_admin(self, steam_id_64):
with invalidates(Rcon.get_admin_ids):
return super().do_remove_admin(steam_id_64)
@ttl_cache(ttl=5)
def get_players(self):
# TODO refactor to use get_playerids. Also bacth call to steam API and find a way to cleverly cache the steam results
names = super().get_players()
players = []
for n in names:
player = {NAME: n}
player.update(self.get_player_info(n))
players.append(player)
return players
@ttl_cache(ttl=60)
def get_perma_bans(self):
return super().get_perma_bans()
@ttl_cache(ttl=60)
def get_temp_bans(self):
res = super().get_temp_bans()
logger.debug(res)
return res
def _struct_ban(self, ban, type_):
# name, time = ban.split(', banned on ')
# '76561197984877751 : nickname "Dr.WeeD" banned for 2 hours on 2020.12.03-12.40.08 for "None" by admin "test"'
steamd_id_64, rest = ban.split(" :", 1)
name = None
reason = None
by = None
date = None
if "nickname" in rest:
name = rest.split('" banned', 1)[0]
name = name.split(' nickname "', 1)[-1]
groups = re.match(".*(\d{4}\.\d{2}\.\d{2}-\d{2}\.\d{2}.\d{2}) (.*)", ban)
if groups and groups.groups():
date = groups.group(1)
try:
reason = groups.group(2)
except:
logger.error("Unable to extract reason from ban")
by = ban.split(" by admin ", -1)[-1]
return {
"type": type_,
"name": name,
"steam_id_64": steamd_id_64,
# TODO FIX
"timestamp": None,
"ban_time": date,
"reason": reason,
"by": by.replace('"', ""),
"raw": ban,
}
def get_bans(self):
temp_bans = [self._struct_ban(b, "temp") for b in self.get_temp_bans()]
bans = [self._struct_ban(b, "perma") for b in self.get_perma_bans()]
# Most recent first
bans.reverse()
return temp_bans + bans
def do_unban(self, steam_id_64):
bans = self.get_bans()
type_to_func = {
"temp": self.do_remove_temp_ban,
"perma": self.do_remove_perma_ban,
}
for b in bans:
if b.get("steam_id_64") == steam_id_64:
type_to_func[b["type"]](b["raw"])
def get_ban(self, steam_id_64):
"""
get all bans from steam_id_64
@param steam_id_64: steam_id_64 of a user
@return: a array of bans
"""
bans = self.get_bans()
return list(filter(lambda x: x.get("steam_id_64") == steam_id_64, bans))
@ttl_cache(ttl=60 * 60)
def get_vip_ids(self):
res = super().get_vip_ids()
l = []
for item in res:
try:
steam_id_64, name = item.split(" ", 1)
name = name.replace('"', "")
name = name.replace("\n", "")
name = name.strip()
except ValueError:
self._reconnect()
raise
l.append(dict(zip((STEAMID, NAME), (steam_id_64, name))))
return sorted(l, key=lambda d: d[NAME])
def do_remove_vip(self, steam_id_64):
with invalidates(Rcon.get_vip_ids):
return super().do_remove_vip(steam_id_64)
def do_add_vip(self, name, steam_id_64):
with invalidates(Rcon.get_vip_ids):
return super().do_add_vip(steam_id_64, name)
def do_remove_all_vips(self):
vips = self.get_vip_ids()
for vip in vips:
try:
self.do_remove_vip(vip["steam_id_64"])
except (CommandFailedError, ValueError):
self._reconnect()
raise
return "SUCCESS"
@ttl_cache(ttl=60)
def get_next_map(self):
current = self.get_map()
current = current.replace("_RESTART", "")
rotation = self.get_map_rotation()
try:
next_id = rotation.index(current)
next_id += 1
if next_id == len(rotation):
next_id = 0
return rotation[next_id]
except ValueError:
logger.error(
"Can't find %s in rotation, assuming next map as first map of rotation",
current,
)
return rotation[0]
def set_map(self, map_name):
with invalidates(Rcon.get_map):
res = super().set_map(map_name)
if res != "SUCCESS":
raise CommandFailedError(res)
@ttl_cache(ttl=10)
def get_map(self):
current_map = super().get_map()
if not self.map_regexp.match(current_map):
raise CommandFailedError("Server returned wrong data")
return current_map
@ttl_cache(ttl=60 * 60)
def get_name(self):
name = super().get_name()
if len(name) > self.MAX_SERV_NAME_LEN:
raise CommandFailedError("Server returned wrong data")
return name
@ttl_cache(ttl=60 * 60)
def get_team_switch_cooldown(self):
return int(super().get_team_switch_cooldown())
def set_team_switch_cooldown(self, minutes):
with invalidates(Rcon.get_team_switch_cooldown):
return super().set_team_switch_cooldown(minutes)
@ttl_cache(ttl=60 * 60)
def get_autobalance_threshold(self):
return int(super().get_autobalance_threshold())
def set_autobalance_threshold(self, max_diff):
with invalidates(Rcon.get_autobalance_threshold):
return super().set_autobalance_threshold(max_diff)
@ttl_cache(ttl=60 * 60)
def get_idle_autokick_time(self):
return int(super().get_idle_autokick_time())
def set_idle_autokick_time(self, minutes):
with invalidates(Rcon.get_idle_autokick_time):
return super().set_idle_autokick_time(minutes)
@ttl_cache(ttl=60 * 60)
def get_max_ping_autokick(self):
return int(super().get_max_ping_autokick())
def set_max_ping_autokick(self, max_ms):
with invalidates(Rcon.get_max_ping_autokick):
return super().set_max_ping_autokick(max_ms)
@ttl_cache(ttl=60 * 60)
def get_queue_length(self):
return int(super().get_queue_length())
def set_queue_length(self, num):
with invalidates(Rcon.get_queue_length):
return super().set_queue_length(num)
@ttl_cache(ttl=60 * 60)
def get_vip_slots_num(self):
return super().get_vip_slots_num()
def set_vip_slots_num(self, num):
with invalidates(Rcon.get_vip_slots_num):
return super().set_vip_slots_num(num)
def get_welcome_message(self):
red = get_redis_client()
msg = red.get("WELCOME_MESSAGE")
if msg:
return msg.decode()
return msg
def set_welcome_message(self, msg, save=True):
from rcon.broadcast import format_message
prev = None
try:
red = get_redis_client()
if save:
prev = red.getset("WELCOME_MESSAGE", msg)
else:
prev = red.get("WELCOME_MESSAGE")
red.expire("WELCOME_MESSAGE", 60 * 60 * 24 * 7)
except Exception:
logger.exception("Can't save message in redis: %s", msg)
try:
formatted = format_message(self, msg)
except Exception:
logger.exception("Unable to format message")
formatted = msg
super().set_welcome_message(formatted)
return prev.decode() if prev else ""
def get_broadcast_message(self):
red = get_redis_client()
msg = red.get("BROADCAST_MESSAGE")
if isinstance(msg, (str, bytes)):
return msg.decode()
return msg
def set_broadcast(self, msg, save=True):
from rcon.broadcast import format_message
prev = None
try:
red = get_redis_client()
if save:
prev = red.getset("BROADCAST_MESSAGE", msg)
else:
prev = red.get("BROADCAST_MESSAGE")
red.expire("BROADCAST_MESSAGE", 60 * 30)
except Exception:
logger.exception("Can't save message in redis: %s", msg)
try:
formatted = format_message(self, msg)
except Exception:
logger.exception("Unable to format message")
formatted = msg
super().set_broadcast(formatted)
return prev.decode() if prev else ""
@ttl_cache(ttl=20)
def get_slots(self):
res = super().get_slots()
if not self.slots_regexp.match(res):
raise CommandFailedError("Server returned crap")
return res
@ttl_cache(ttl=5, cache_falsy=False)
def get_status(self):
slots = self.get_slots()
return {
"name": self.get_name(),
"map": self.get_map(),
"nb_players": slots,
"short_name": os.getenv("SERVER_SHORT_NAME", None) or "HLL Rcon",
"player_count": slots.split("/")[0],
}
@ttl_cache(ttl=60 * 60 * 24)
def get_maps(self):
return super().get_maps()
def get_server_settings(self):
settings = {}
for name, type_ in self.settings:
try:
settings[name] = type_(getattr(self, f"get_{name}")())
except:
logger.exception("Failed to retrieve settings %s", name)
raise
return settings
def do_save_setting(self, name, value):
if not name in dict(self.settings):
raise ValueError(f"'{name}' can't be save with this method")
return getattr(self, f"set_{name}")(value)
def _convert_relative_time(self, from_, time_str):
time, unit = time_str.split(" ")
if unit == "ms":
return from_ - timedelta(milliseconds=int(time))
if unit == "sec":
return from_ - timedelta(seconds=float(time))
if unit == "min":
minutes, seconds = time.split(":")
return from_ - timedelta(minutes=float(minutes), seconds=float(seconds))
if unit == "hours":
hours, minutes, seconds = time.split(":")
return from_ - timedelta(
hours=int(hours), minutes=int(minutes), seconds=int(seconds)
)
@staticmethod
def _extract_time(time_str):
groups = Rcon.log_time_regexp.match(time_str)
if not groups:
raise ValueError("Unable to extract time from '%s'", time_str)
try:
return datetime.fromtimestamp(int(groups.group(1)))
except (ValueError, TypeError) as e:
raise ValueError("Time '%s' is not a valid integer", time_str) from e
@ttl_cache(ttl=2)
def get_structured_logs(
self, since_min_ago, filter_action=None, filter_player=None
):
try:
raw = super().get_logs(since_min_ago)
except socket.timeout:
# The hll server just hangs when there are no logs for the requested time
raw = ""
return self.parse_logs(raw, filter_action, filter_player)
@ttl_cache(ttl=60 * 60)
def get_profanities(self):
return super().get_profanities()
@ttl_cache(ttl=60 * 60)
def get_autobalance_enabled(self):
return super().get_autobalance_enabled() == "on"
@ttl_cache(ttl=60 * 60)
def get_votekick_enabled(self):
return super().get_votekick_enabled() == "on"
@ttl_cache(ttl=60 * 60)
def get_votekick_threshold(self):
res = super().get_votekick_threshold()
if isinstance(res, str):
return res.strip()
return res
def set_autobalance_enabled(self, bool_):
with invalidates(self.get_autobalance_enabled):
return super().set_autobalance_enabled("on" if bool_ else "off")
def set_votekick_enabled(self, bool_):
with invalidates(self.get_votekick_enabled):
return super().set_votekick_enabled("on" if bool_ else "off")
def set_votekick_threshold(self, threshold_pairs):
# Todo use proper data structure
with invalidates(self.get_votekick_threshold):
res = super().set_votekick_threshold(threshold_pairs)
print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! {res}")
logger.error("Threshold res %s", res)
if res.lower().startswith("error"):
logger.error("Unable to set votekick threshold: %s", res)
raise CommandFailedError(res)
def do_reset_votekick_threshold(self):
with invalidates(self.get_votekick_threshold):
return super().do_reset_votekick_threshold()
def set_profanities(self, profanities):
current = self.get_profanities()
with invalidates(self.get_profanities):
removed = set(current) - set(profanities)
added = set(profanities) - set(current)
if removed:
self.do_unban_profanities(list(removed))
if added:
self.do_ban_profanities(list(added))
return profanities
def do_unban_profanities(self, profanities):
if not isinstance(profanities, list):
profanities = [profanities]
with invalidates(self.get_profanities):
return super().do_unban_profanities(",".join(profanities))
def do_ban_profanities(self, profanities):
if not isinstance(profanities, list):
profanities = [profanities]
with invalidates(self.get_profanities):
return super().do_ban_profanities(",".join(profanities))
def do_kick(self, player, reason):
with invalidates(Rcon.get_players):
return super().do_kick(player, reason)
def do_temp_ban(
self, player=None, steam_id_64=None, duration_hours=2, reason="", admin_name=""
):
if player and player in super().get_players():
# When banning a player by steam id, if he is currently in game he won't be banned immedietly
steam_id_64 = None
with invalidates(Rcon.get_players, Rcon.get_temp_bans):
return super().do_temp_ban(
player, steam_id_64, duration_hours, reason, admin_name
)
def do_remove_temp_ban(self, ban_log):
with invalidates(Rcon.get_temp_bans):
return super().do_remove_temp_ban(ban_log)
def do_remove_perma_ban(self, ban_log):
with invalidates(Rcon.get_perma_bans):
return super().do_remove_perma_ban(ban_log)
def do_perma_ban(self, player=None, steam_id_64=None, reason="", admin_name=""):
if player and player in super().get_players():
# When banning a player by steam id, if he is currently in game he won't be banned immedietly
steam_id_64 = None
with invalidates(Rcon.get_players, Rcon.get_perma_bans):
return super().do_perma_ban(player, steam_id_64, reason, admin_name)
@ttl_cache(60 * 5)
def get_map_rotation(self):
l = super().get_map_rotation()
for map_ in l:
if not self.map_regexp.match(map_):
raise CommandFailedError("Server return wrong data")
return l
def do_add_map_to_rotation(self, map_name):
return self.do_add_maps_to_rotation([map_name])
def do_remove_map_from_rotation(self, map_name):
return self.do_remove_maps_from_rotation([map_name])
def do_remove_maps_from_rotation(self, maps):
with invalidates(Rcon.get_map_rotation):
for map_name in maps:
super().do_remove_map_from_rotation(map_name)
return "SUCCESS"
def do_add_maps_to_rotation(self, maps):
with invalidates(Rcon.get_map_rotation):
for map_name in maps:
super().do_add_map_to_rotation(map_name)
return "SUCCESS"
def do_randomize_map_rotation(self, maps=None):
maps = maps or self.get_maps()
current = self.get_map_rotation()
random.shuffle(maps)
for m in maps:
if m in current:
self.do_remove_map_from_rotation(m)
self.do_add_map_to_rotation(m)
return maps
def set_maprotation(self, rotation):
if not rotation:
raise CommandFailedError("Empty rotation")
rotation = list(rotation)
logger.info("Apply map rotation %s", rotation)
current = self.get_map_rotation()
if rotation == current:
logger.debug("Map rotation is the same, nothing to do")
return current
with invalidates(Rcon.get_map_rotation):
if len(current) == 1:
logger.info("Current rotation is a single map")
for idx, m in enumerate(rotation):
if m not in current:
self.do_add_map_to_rotation(m)
if m in current and idx != 0:
self.do_remove_map_from_rotation(m)
self.do_add_map_to_rotation(m)
if current[0] not in rotation:
self.do_remove_map_from_rotation(m)
return rotation
first = rotation.pop(0)
to_remove = set(current) - {first}
if to_remove == set(current):
self.do_add_map_to_rotation(first)
self.do_remove_maps_from_rotation(to_remove)
self.do_add_maps_to_rotation(rotation)
return [first] + rotation
@ttl_cache(ttl=60 * 2)
def get_scoreboard(self, minutes=180, sort="ratio"):
logs = self.get_structured_logs(minutes, "KILL")
scoreboard = []
for player in logs["players"]:
if not player:
continue
kills = 0
death = 0
for log in logs["logs"]:
if log["player"] == player:
kills += 1
elif log["player2"] == player:
death += 1
if kills == 0 and death == 0:
continue
scoreboard.append(
{
"player": player,
"(real) kills": kills,
"(real) death": death,
"ratio": kills / max(death, 1),
}
)
scoreboard = sorted(scoreboard, key=lambda o: o[sort], reverse=True)
for o in scoreboard:
o["ratio"] = "%.2f" % o["ratio"]
return scoreboard
@ttl_cache(ttl=60 * 2)
def get_teamkills_boards(self, sort="TK Minutes"):
logs = self.get_structured_logs(180)
scoreboard = []
for player in logs["players"]:
if not player:
continue
first_timestamp = float("inf")
last_timestamp = 0
tk = 0
death_by_tk = 0
for log in logs["logs"]:
if log["player"] == player or log["player2"] == player:
first_timestamp = min(log["timestamp_ms"], first_timestamp)
last_timestamp = max(log["timestamp_ms"], last_timestamp)
if log["action"] == "TEAM KILL":
if log["player"] == player:
tk += 1
elif log["player2"] == player:
death_by_tk += 1
if tk == 0 and death_by_tk == 0:
continue
scoreboard.append(
{
"player": player,
"Teamkills": tk,
"Death by TK": death_by_tk,
"Estimated play time (minutes)": (last_timestamp - first_timestamp)
// 1000
// 60,
"TK Minutes": tk
/ max((last_timestamp - first_timestamp) // 1000 // 60, 1),
}
)
scoreboard = sorted(scoreboard, key=lambda o: o[sort], reverse=True)
for o in scoreboard:
o["TK Minutes"] = "%.2f" % o["TK Minutes"]
return scoreboard
@staticmethod
def parse_logs(raw, filter_action=None, filter_player=None):
synthetic_actions = [
"CHAT[Allies]",
"CHAT[Axis]",
"CHAT",
"VOTE STARTED",
"VOTE COMPLETED",
]
now = datetime.now()
res = []
actions = set()
players = set()
for line in raw.split("\n"):
if not line:
continue
try:
time, rest = line.split("] ", 1)
# time = self._convert_relative_time(now, time[1:])
time = Rcon._extract_time(time[1:])
sub_content = (
action
) = player = player2 = weapon = steam_id_64_1 = steam_id_64_2 = None
content = rest
if rest.startswith("DISCONNECTED") or rest.startswith("CONNECTED"):
action, content = rest.split(" ", 1)
elif rest.startswith("KILL") or rest.startswith("TEAM KILL"):
action, content = rest.split(": ", 1)
elif rest.startswith("CHAT"):
match = Rcon.chat_regexp.match(rest)
groups = match.groups()
scope = groups[0]
side = groups[4]
player = groups[3]
steam_id_64_1 = groups[-2]
action = f"CHAT[{side}][{scope}]"
sub_content = groups[-1]
# import ipdb; ipdb.set_trace()
content = f"{player}: {sub_content} ({steam_id_64_1})"
elif rest.startswith("VOTE"):
# [15:49 min (1606998428)] VOTE Player [[fr]ELsass_blitz] Started a vote of type (PVR_Kick_Abuse) against [拢儿]. VoteID: [1]
action = "VOTE"
if rest.startswith("VOTE Player") and " against " in rest.lower():
action = "VOTE STARTED"
groups = re.match(
r"VOTE Player \[(.*)\].* against \[(.*)\]\. VoteID: \[\d+\]",
rest,
)
player = groups[1]
player2 = groups[2]
elif rest.startswith("VOTE Player") and "voted" in rest.lower():
groups = re.match(r"VOTE Player \[(.*)\] voted.*", rest)
player = groups[1]
elif "completed" in rest.lower():
action = "VOTE COMPLETED"
elif "kick" in rest.lower():
action = "VOTE COMPLETED"
groups = re.match(r"VOTE Vote Kick \{(.*)\}.*", rest)
player = groups[1]
else:
player = ""
player2 = None
sub_content = rest.split("VOTE")[-1]
content = rest.split("VOTE")[-1]
elif rest.upper().startswith("PLAYER"):
action = "CAMERA"
_, content = rest.split(" ", 1)
matches = re.match("\[(.*)\s{1}\((\d+)\)\]", content)
if matches and len(matches.groups()) == 2:
player, steam_id_64_1 = matches.groups()
_, sub_content = content.rsplit("]", 1)
else:
logger.error("Unable to parse line: %s", line)
else:
logger.error("Unkown type line: '%s'", line)
continue
if action in {"CONNECTED", "DISCONNECTED"}:
player = content
if action in {"KILL", "TEAM KILL"}:
parts = re.split(Rcon.player_info_pattern + r" -> ", content, 1)
player = parts[1]
steam_id_64_1 = parts[-2]
player2 = parts[-1]
player2, weapon = player2.rsplit(" with ", 1)
player2, *_, steam_id_64_2 = Rcon.player_info_regexp.match(
player2
).groups()
players.add(player)
players.add(player2)
actions.add(action)
except:
logger.exception("Invalid line: '%s'", line)
continue
if filter_action and not action.startswith(filter_action):
continue
if filter_player and filter_player not in line:
continue
res.append(
{
"version": 1,
"timestamp_ms": int(time.timestamp() * 1000),
"relative_time_ms": (time - now).total_seconds() * 1000,
"raw": line,
"line_without_time": rest,
"action": action,
"player": player,
"steam_id_64_1": steam_id_64_1,
"player2": player2,
"steam_id_64_2": steam_id_64_2,
"weapon": weapon,
"message": content,
"sub_content": sub_content,
}
)
res.reverse()
return {
"actions": list(actions) + synthetic_actions,
"players": list(players),
"logs": res,
}
if __name__ == "__main__":
from rcon.settings import SERVER_INFO
r = Rcon(SERVER_INFO)
print(r.get_map_rotation())
print(r.do_randomize_map_rotation())
print(r.get_map_rotation())
| [] | [] | [
"SERVER_SHORT_NAME"
] | [] | ["SERVER_SHORT_NAME"] | python | 1 | 0 | |
src/main/java/com/murphy1/foodlogger/services/impl/NutritionixServiceImpl.java | package com.murphy1.foodlogger.services.impl;
import com.murphy1.foodlogger.model.NutritionixBaseProduct;
import com.murphy1.foodlogger.model.NutritionixDetailedProduct;
import com.murphy1.foodlogger.services.NutritionixService;
import lombok.extern.slf4j.Slf4j;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.springframework.stereotype.Service;
import org.json.*;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
@Service
@Slf4j
public class NutritionixServiceImpl implements NutritionixService {
private String nutritionixInstantSearch = "https://trackapi.nutritionix.com/v2/search/instant?query=";
private String nutritionixNutrientsSearch = "https://trackapi.nutritionix.com/v2/natural/nutrients";
// Headers
private String contentType = "application/json";
private String appId = System.getenv("APP_ID");
private String appKey = System.getenv("APP_KEY");
@Override
public List<NutritionixBaseProduct> searchQuery(String query){
//replace spaces in the query with '+'
String[] strArray = query.split(" ");
String newQuery = "";
if (strArray.length == 1){
newQuery = query;
}else{
newQuery = strArray[0];
for (int i = 1; i < strArray.length; i++){
newQuery = newQuery.concat("+"+strArray[i]);
}
}
HttpClient httpClient = HttpClientBuilder.create().build();
HttpGet getRequest = new HttpGet(nutritionixInstantSearch + newQuery);
getRequest.setHeader("Content-Type", contentType);
getRequest.setHeader("x-app-id", appId);
getRequest.setHeader("x-app-key", appKey);
String str = "";
try{
HttpResponse response = httpClient.execute(getRequest);
HttpEntity entity = response.getEntity();
str = EntityUtils.toString(entity);
}catch (IOException e){
e.printStackTrace();
}
JSONObject jsonObject = new JSONObject(str);
JSONArray common = (JSONArray) jsonObject.get("common");
List<NutritionixBaseProduct> productList = new ArrayList<>();
// only return top 20 searches to maintain speed
for (int i = 0; i < 20; i++){
NutritionixBaseProduct product = new NutritionixBaseProduct();
try{
product.setFood_name(formatJson((common.getJSONObject(i).getString("food_name"))));
product.setServing_unit(common.getJSONObject(i).getString("serving_unit"));
product.setTag_name(common.getJSONObject(i).getString("tag_name"));
product.setServing_qty(common.getJSONObject(i).getInt("serving_qty"));
product.setTag_id(common.getJSONObject(i).getInt("tag_id"));
productList.add(product);
}catch (JSONException e){
log.info("Error thrown as one field was returned as null from the Nutritionix API!");
}
}
return productList;
}
@Override
public NutritionixDetailedProduct getDetailedProduct(String query) throws UnsupportedEncodingException {
HttpClient httpClient = HttpClientBuilder.create().build();
HttpPost postRequest = new HttpPost(nutritionixNutrientsSearch);
String s = "query";
String body = "{\""+s+"\" : \""+query+"\"}";
HttpEntity entity = new ByteArrayEntity(body.getBytes("UTF-8"));
postRequest.setEntity(entity);
postRequest.setHeader("Content-Type", contentType);
postRequest.setHeader("x-app-id", appId);
postRequest.setHeader("x-app-key", appKey);
String responseEntityString = "";
try{
HttpResponse response = httpClient.execute(postRequest);
HttpEntity responseEntity = response.getEntity();
responseEntityString = EntityUtils.toString(responseEntity);
}catch (IOException e){
e.printStackTrace();
}
JSONObject jsonObject = new JSONObject(responseEntityString);
JSONArray foods = (JSONArray) jsonObject.get("foods");
NutritionixDetailedProduct product = new NutritionixDetailedProduct();
product.setFoodName(formatJson(foods.getJSONObject(0).getString("food_name")));
product.setServingQuantity(foods.getJSONObject(0).getInt("serving_qty"));
product.setServingUnit(foods.getJSONObject(0).getString("serving_unit"));
product.setServingWeightGrams(foods.getJSONObject(0).getInt("serving_weight_grams"));
try{
product.setCalories(foods.getJSONObject(0).getInt("nf_calories"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no calories!");
}
try{
product.setTotalFat(foods.getJSONObject(0).getInt("nf_total_fat"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no total fat!");
}
try{
product.setSaturatedFat(foods.getJSONObject(0).getInt("nf_saturated_fat"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no saturated fat!");
}
try{
product.setCholesterol(foods.getJSONObject(0).getInt("nf_cholesterol"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no cholesterol!");
}
try{
product.setSodium(foods.getJSONObject(0).getInt("nf_sodium"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no sodium!");
}
try{
product.setTotalCarbs(foods.getJSONObject(0).getInt("nf_total_carbohydrate"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no carbs!");
}
try{
product.setDietaryFiber(foods.getJSONObject(0).getInt("nf_dietary_fiber"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no fiber!");
}
try{
product.setSugars(foods.getJSONObject(0).getInt("nf_sugars"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no sugars!");
}
try{
product.setProtein(foods.getJSONObject(0).getInt("nf_protein"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no protein!");
}
try{
product.setPotassium(foods.getJSONObject(0).getInt("nf_potassium"));
}catch (JSONException e){
log.info("Error thrown from Nutritionix API! Food has no potassium!");
}
return product;
}
private String formatJson(String unformattedString){
// This method will format the JSON before it is shown to the user
String unfStr = unformattedString;
return unfStr.substring(0, 1).toUpperCase() + unfStr.substring(1);
}
}
| [
"\"APP_ID\"",
"\"APP_KEY\""
] | [] | [
"APP_ID",
"APP_KEY"
] | [] | ["APP_ID", "APP_KEY"] | java | 2 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'insta2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
env/vars.go | package env
import (
"fmt"
"os"
"github.com/joho/godotenv"
)
var _ = godotenv.Load(".env.example") // Cargar del archivo llamado ".env"
var (
ConnectionString = fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
os.Getenv("user"),
os.Getenv("pass"),
os.Getenv("host"),
os.Getenv("port"),
os.Getenv("db_name"))
)
const AllowedCORSDomain = "http://localhost"
| [
"\"user\"",
"\"pass\"",
"\"host\"",
"\"port\"",
"\"db_name\""
] | [] | [
"pass",
"host",
"db_name",
"user",
"port"
] | [] | ["pass", "host", "db_name", "user", "port"] | go | 5 | 0 | |
experiments_transformers/generate_data.py | # -*- coding: utf-8 -*-
import os
import requests
import json
import time
import random
import itertools
import numpy as np
from tqdm import tqdm
from preprocessing import (
read_ts_dataset,
normalize_dataset,
moving_windows_preprocessing,
denormalize,
)
NUM_CORES = 7
def notify_slack(msg, webhook=None):
if webhook is None:
webhook = os.environ.get("webhook_slack")
if webhook is not None:
try:
requests.post(webhook, json.dumps({"text": msg}))
except:
print("Error while notifying slack")
print(msg)
else:
print("NO WEBHOOK FOUND")
# Preprocessing parameters
with open("parameters.json") as f:
PARAMETERS = json.load(f)
NORMALIZATION_METHOD = PARAMETERS["normalization_method"]
PAST_HISTORY_FACTOR = PARAMETERS[
"past_history_factor"
] # past_history = forecast_horizon * past_history_factor
# This variable stores the urls of each dataset.
with open("../data/datasets.json") as f:
DATASETS = json.load(f)
DATASET_NAMES = [d for d in list(DATASETS.keys())]
def generate_dataset(args):
dataset, norm_method, past_history_factor = args
train_url = DATASETS[dataset]["train"]
test_url = DATASETS[dataset]["test"]
if not os.path.exists("../data/{}/train.csv".format(dataset)) or not os.path.exists(
"../data/{}/test.csv".format(dataset)
):
if not os.path.exists("../data/{}".format(dataset)):
os.system("mkdir -p ../data/{}".format(dataset))
os.system("wget -O ../data/{}/train.csv {}".format(dataset, train_url))
os.system("wget -O ../data/{}/test.csv {}".format(dataset, test_url))
if not os.path.exists(
"../data/{}/{}/{}/".format(dataset, norm_method, past_history_factor)
):
os.system(
"mkdir -p ../data/{}/{}/{}/".format(
dataset, norm_method, past_history_factor
)
)
# Read data
train = read_ts_dataset("../data/{}/train.csv".format(dataset))
test = read_ts_dataset("../data/{}/test.csv".format(dataset))
forecast_horizon = test.shape[1]
print(
dataset,
{
"Number of time series": train.shape[0],
"Max length": np.max([ts.shape[0] for ts in train]),
"Min length": np.min([ts.shape[0] for ts in train]),
"Forecast Horizon": forecast_horizon,
},
)
# Normalize data
train, test, norm_params = normalize_dataset(
train, test, norm_method, dtype="float32"
)
norm_params_json = [{k: float(p[k]) for k in p} for p in norm_params]
norm_params_json = json.dumps(norm_params_json)
with open("../data/{}/{}/norm_params.json".format(dataset, norm_method), "w") as f:
f.write(norm_params_json)
# Format training and test input/output data using the moving window strategy
past_history = int(forecast_horizon * past_history_factor)
x_train, y_train, x_test, y_test = moving_windows_preprocessing(
train, test, past_history, forecast_horizon, np.float32, n_cores=NUM_CORES
)
y_test_denorm = np.copy(y_test)
i = 0
for nparams in norm_params:
if len(train[i]) < past_history:
continue
y_test_denorm[i] = denormalize(y_test[i], nparams, method=norm_method)
i += 1
print("TRAINING DATA")
print("Input shape", x_train.shape)
print("Output_shape", y_train.shape)
print()
print("TEST DATA")
print("Input shape", x_test.shape)
print("Output_shape", y_test.shape)
np.save(
"../data/{}/{}/{}/x_train.np".format(dataset, norm_method, past_history_factor),
x_train,
)
np.save(
"../data/{}/{}/{}/y_train.np".format(dataset, norm_method, past_history_factor),
y_train,
)
np.save(
"../data/{}/{}/{}/x_test.np".format(dataset, norm_method, past_history_factor),
x_test,
)
np.save(
"../data/{}/{}/{}/y_test.np".format(dataset, norm_method, past_history_factor),
y_test,
)
np.save(
"../data/{}/{}/{}/y_test_denorm.np".format(
dataset, norm_method, past_history_factor
),
y_test_denorm,
)
params = [
(dataset, norm_method, past_history_factor)
for dataset, norm_method, past_history_factor in itertools.product(
DATASET_NAMES, NORMALIZATION_METHOD, PAST_HISTORY_FACTOR
)
]
for i, args in tqdm(enumerate(params)):
t0 = time.time()
dataset, norm_method, past_history_factor = args
if dataset != "SolarEnergy":
continue
generate_dataset(args)
notify_slack(
"[{}/{}] Generated dataset {} with {} normalization and past history factor of {} ({:.2f} s)".format(
i, len(params), dataset, norm_method, past_history_factor, time.time() - t0
)
)
print(
"[{}/{}] Generated dataset {} with {} normalization and past history factor of {} ({:.2f} s)".format(
i, len(params), dataset, norm_method, past_history_factor, time.time() - t0
)
)
| [] | [] | [
"webhook_slack"
] | [] | ["webhook_slack"] | python | 1 | 0 | |
roles/lib_zabbix/library/zbx_template.py | #!/usr/bin/env python
'''
Ansible module for template
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix template ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def main():
''' Ansible module for template
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'template'
idname = 'templateid'
tname = module.params['name']
state = module.params['state']
# get a template, see if it exists
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'host': tname},
'selectParentTemplates': 'templateid',
'selectGroups': 'groupid',
'selectApplications': 'applicationid',
'selectDiscoveries': 'extend',
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
if not tname:
module.exit_json(failed=True,
changed=False,
results='Must specifiy a template name.',
state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'groups': module.params.get('groups', [{'groupid': '1'}]),
'host': tname,
}
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'templates' and zab_results.has_key('parentTemplates'):
if zab_results['parentTemplates'] != value:
differences[key] = value
elif zab_results[key] != str(value) and zab_results[key] != value:
differences[key] = value
if not differences:
module.exit_json(changed=False, results=content['result'], state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| [] | [] | [
"ZABBIX_USER",
"ZABBIX_PASSWORD"
] | [] | ["ZABBIX_USER", "ZABBIX_PASSWORD"] | python | 2 | 0 | |
main.go | package main
import (
"fmt"
"net"
"net/http"
"os"
"os/signal"
"sync"
"sync/atomic"
"syscall"
"time"
)
const (
defaultPort = "8080"
)
// connManager keeps a record of active connections and their states
type connManager struct {
activeConns map[net.Conn]*atomic.Value
mu sync.Mutex
}
// setState is a callback called from the http server when the connection state changes
func (cm *connManager) setState(nc net.Conn, state http.ConnState) {
cm.mu.Lock()
defer cm.mu.Unlock()
if cm.activeConns == nil {
cm.activeConns = make(map[net.Conn]*atomic.Value)
}
switch state {
case http.StateNew:
cm.activeConns[nc] = &atomic.Value{}
case http.StateHijacked, http.StateClosed:
delete(cm.activeConns, nc)
}
if v, ok := cm.activeConns[nc]; ok {
v.Store(state)
}
}
// closeIdleConns closes idle connections and reports if there are still
// any in-flight connections
func (cm *connManager) closeIdleConns() bool {
cm.mu.Lock()
defer cm.mu.Unlock()
inflight := false
for nc, v := range cm.activeConns {
state, ok := v.Load().(http.ConnState)
if !ok || state == http.StateNew || state == http.StateActive {
inflight = true
continue
}
nc.Close()
delete(cm.activeConns, nc)
}
return inflight
}
type handler struct{}
func newHandler() *handler {
return &handler{}
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
d, err := time.ParseDuration(r.FormValue("wait"))
if err != nil {
d = 10 * time.Microsecond
}
fmt.Fprintf(w, "hello ")
time.Sleep(d)
fmt.Fprintf(w, "world!\n")
}
func main() {
// Register signal handler for SIGTERM and SIGINT
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
connMgr := new(connManager)
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
addr := fmt.Sprintf(":%s", port)
server := http.Server{
Addr: addr,
Handler: newHandler(),
ConnState: connMgr.setState, // register callback when the connection state changes
}
listener, err := net.Listen("tcp", addr)
if err != nil {
fmt.Fprintln(os.Stderr, "error starting server:", err)
os.Exit(1)
}
errCh := make(chan error, 1)
go func() {
errCh <- server.Serve(listener)
}()
fmt.Println("Serving on port: " + port)
select {
case err := <-errCh:
fmt.Fprintln(os.Stderr, "error starting server:", err)
os.Exit(1)
case <-signals:
// It is required that the listener is closed as soon as the signal is
// received to prevent any new traffic from getting in
listener.Close()
// busy loop until all connections are closed
ticker := time.NewTicker(10 * time.Millisecond)
for {
if stillActive := connMgr.closeIdleConns(); !stillActive {
return
}
<-ticker.C
}
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
internal/protoc/downloader.go | // Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package protoc
import (
"archive/zip"
"bytes"
"context"
"crypto/sha512"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/gofrs/flock"
"github.com/uber/prototool/internal/file"
"github.com/uber/prototool/internal/settings"
"github.com/uber/prototool/internal/vars"
"go.uber.org/multierr"
"go.uber.org/zap"
)
const (
fileLockRetryDelay = 250 * time.Millisecond
fileLockTimeout = 10 * time.Second
)
type downloader struct {
lock sync.RWMutex
logger *zap.Logger
cachePath string
protocURL string
config settings.Config
// the looked-up and verified to exist base path
cachedBasePath string
// If set, Prototool will invoke protoc and include
// the well-known-types, from the configured binPath
// and wktPath.
protocBinPath string
protocWKTPath string
}
func newDownloader(config settings.Config, options ...DownloaderOption) (*downloader, error) {
downloader := &downloader{
config: config,
logger: zap.NewNop(),
}
for _, option := range options {
option(downloader)
}
if downloader.config.Compile.ProtobufVersion == "" {
downloader.config.Compile.ProtobufVersion = vars.DefaultProtocVersion
}
if downloader.protocBinPath != "" || downloader.protocWKTPath != "" {
if downloader.protocURL != "" {
return nil, fmt.Errorf("cannot use protoc-url in combination with either protoc-bin-path or protoc-wkt-path")
}
if downloader.protocBinPath == "" || downloader.protocWKTPath == "" {
return nil, fmt.Errorf("both protoc-bin-path and protoc-wkt-path must be set")
}
cleanBinPath := filepath.Clean(downloader.protocBinPath)
if _, err := os.Stat(cleanBinPath); os.IsNotExist(err) {
return nil, err
}
cleanWKTPath := filepath.Clean(downloader.protocWKTPath)
if _, err := os.Stat(cleanWKTPath); os.IsNotExist(err) {
return nil, err
}
protobufPath := filepath.Join(cleanWKTPath, "google", "protobuf")
info, err := os.Stat(protobufPath)
if os.IsNotExist(err) {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%q is not a valid well-known types directory", protobufPath)
}
downloader.protocBinPath = cleanBinPath
downloader.protocWKTPath = cleanWKTPath
}
return downloader, nil
}
func (d *downloader) Download() (string, error) {
d.lock.RLock()
cachedBasePath := d.cachedBasePath
d.lock.RUnlock()
if cachedBasePath != "" {
return cachedBasePath, nil
}
return d.cache()
}
func (d *downloader) ProtocPath() (string, error) {
if d.protocBinPath != "" {
return d.protocBinPath, nil
}
basePath, err := d.Download()
if err != nil {
return "", err
}
return filepath.Join(basePath, "bin", "protoc"), nil
}
func (d *downloader) WellKnownTypesIncludePath() (string, error) {
if d.protocWKTPath != "" {
return d.protocWKTPath, nil
}
basePath, err := d.Download()
if err != nil {
return "", err
}
return filepath.Join(basePath, "include"), nil
}
func (d *downloader) Delete() error {
basePath, err := d.getBasePathNoVersionOSARCH()
if err != nil {
return err
}
d.cachedBasePath = ""
d.logger.Debug("deleting", zap.String("path", basePath))
return os.RemoveAll(basePath)
}
func (d *downloader) cache() (_ string, retErr error) {
if d.protocBinPath != "" {
return d.protocBinPath, nil
}
d.lock.Lock()
defer d.lock.Unlock()
basePath, err := d.getBasePath()
if err != nil {
return "", err
}
lock, err := newFlock(basePath)
if err != nil {
return "", err
}
if err := flockLock(lock); err != nil {
return "", err
}
defer func() { retErr = multierr.Append(retErr, flockUnlock(lock)) }()
if err := d.checkDownloaded(basePath); err != nil {
if err := d.download(basePath); err != nil {
return "", err
}
if err := d.checkDownloaded(basePath); err != nil {
return "", err
}
d.logger.Debug("protobuf downloaded", zap.String("path", basePath))
} else {
d.logger.Debug("protobuf already downloaded", zap.String("path", basePath))
}
d.cachedBasePath = basePath
return basePath, nil
}
func (d *downloader) checkDownloaded(basePath string) error {
buffer := bytes.NewBuffer(nil)
cmd := exec.Command(filepath.Join(basePath, "bin", "protoc"), "--version")
cmd.Stdout = buffer
if err := cmd.Run(); err != nil {
return err
}
if d.protocURL != "" {
// skip version check since we do not know the version
return nil
}
output := strings.TrimSpace(buffer.String())
d.logger.Debug("output from protoc --version", zap.String("output", output))
expected := fmt.Sprintf("libprotoc %s", d.config.Compile.ProtobufVersion)
if output != expected {
return fmt.Errorf("expected %s from protoc --version, got %s", expected, output)
}
return nil
}
func (d *downloader) download(basePath string) (retErr error) {
return d.downloadInternal(basePath, runtime.GOOS, runtime.GOARCH)
}
func (d *downloader) downloadInternal(basePath string, goos string, goarch string) (retErr error) {
data, err := d.getDownloadData(goos, goarch)
if err != nil {
return err
}
// this is a working but hacky unzip
// there must be a library for this
// we don't properly copy directories, modification times, etc
readerAt := bytes.NewReader(data)
zipReader, err := zip.NewReader(readerAt, int64(len(data)))
if err != nil {
return err
}
for _, file := range zipReader.File {
fileMode := file.Mode()
d.logger.Debug("found protobuf file in zip", zap.String("fileName", file.Name), zap.Any("fileMode", fileMode))
if fileMode.IsDir() {
continue
}
readCloser, err := file.Open()
if err != nil {
return err
}
defer func() {
retErr = multierr.Append(retErr, readCloser.Close())
}()
fileData, err := ioutil.ReadAll(readCloser)
if err != nil {
return err
}
writeFilePath := filepath.Join(basePath, file.Name)
if err := os.MkdirAll(filepath.Dir(writeFilePath), 0755); err != nil {
return err
}
writeFile, err := os.OpenFile(writeFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode)
if err != nil {
return err
}
defer func() {
retErr = multierr.Append(retErr, writeFile.Close())
}()
if _, err := writeFile.Write(fileData); err != nil {
return err
}
d.logger.Debug("wrote protobuf file", zap.String("path", writeFilePath))
}
return nil
}
func (d *downloader) getDownloadData(goos string, goarch string) (_ []byte, retErr error) {
url, err := d.getProtocURL(goos, goarch)
if err != nil {
return nil, err
}
defer func() {
if retErr == nil {
d.logger.Debug("downloaded protobuf zip file", zap.String("url", url))
}
}()
switch {
case strings.HasPrefix(url, "file://"):
return ioutil.ReadFile(strings.TrimPrefix(url, "file://"))
case strings.HasPrefix(url, "http://"), strings.HasPrefix(url, "https://"):
response, err := http.Get(url)
if err != nil || response.StatusCode != http.StatusOK {
// if there is not given protocURL, we tried to
// download this from GitHub Releases, so add
// extra context to the error message
if d.protocURL == "" {
return nil, fmt.Errorf("error downloading %s: %v\nMake sure GitHub Releases has a proper protoc zip file of the form protoc-VERSION-OS-ARCH.zip at https://github.com/protocolbuffers/protobuf/releases/v%s\nNote that many micro versions do not have this, and no version before 3.0.0-beta-2 has this", url, err, d.config.Compile.ProtobufVersion)
}
return nil, err
}
defer func() {
if response.Body != nil {
retErr = multierr.Append(retErr, response.Body.Close())
}
}()
return ioutil.ReadAll(response.Body)
default:
return nil, fmt.Errorf("unknown url, can only handle http, https, file: %s", url)
}
}
func (d *downloader) getProtocURL(goos string, goarch string) (string, error) {
if d.protocURL != "" {
return d.protocURL, nil
}
_, unameM, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
protocS, err := getProtocSPath(goos)
if err != nil {
return "", err
}
// Protoc download URLs for release candidates don't use the hyphen in the version part of the URL
re := regexp.MustCompile(`(rc)-(\d+$)`)
version := re.ReplaceAllString(d.config.Compile.ProtobufVersion, "$1$2")
return fmt.Sprintf(
"https://github.com/protocolbuffers/protobuf/releases/download/v%s/protoc-%s-%s-%s.zip",
version,
d.config.Compile.ProtobufVersion,
protocS,
unameM,
), nil
}
func (d *downloader) getBasePath() (string, error) {
basePathNoVersion, err := d.getBasePathNoVersion()
if err != nil {
return "", err
}
return filepath.Join(basePathNoVersion, d.getBasePathVersionPart()), nil
}
func (d *downloader) getBasePathNoVersionOSARCH() (string, error) {
basePath := d.cachePath
var err error
if basePath == "" {
basePath, err = getDefaultBasePathNoOSARCH()
if err != nil {
return "", err
}
} else {
basePath, err = file.AbsClean(basePath)
if err != nil {
return "", err
}
}
if err := file.CheckAbs(basePath); err != nil {
return "", err
}
return basePath, nil
}
func (d *downloader) getBasePathNoVersion() (string, error) {
basePath := d.cachePath
var err error
if basePath == "" {
basePath, err = getDefaultBasePath()
if err != nil {
return "", err
}
} else {
basePath, err = file.AbsClean(basePath)
if err != nil {
return "", err
}
}
if err := file.CheckAbs(basePath); err != nil {
return "", err
}
return filepath.Join(basePath, "protobuf"), nil
}
func (d *downloader) getBasePathVersionPart() string {
if d.protocURL != "" {
// we don't know the version or what is going on here
hash := sha512.New()
_, _ = hash.Write([]byte(d.protocURL))
return base64.URLEncoding.EncodeToString(hash.Sum(nil))
}
return d.config.Compile.ProtobufVersion
}
func getDefaultBasePath() (string, error) {
return getDefaultBasePathInternal(runtime.GOOS, runtime.GOARCH, os.Getenv)
}
func getDefaultBasePathInternal(goos string, goarch string, getenvFunc func(string) string) (string, error) {
basePathNoOSARCH, err := getDefaultBasePathInternalNoOSARCH(goos, goarch, getenvFunc)
if err != nil {
return "", err
}
unameS, unameM, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
return filepath.Join(basePathNoOSARCH, unameS, unameM), nil
}
func getDefaultBasePathNoOSARCH() (string, error) {
return getDefaultBasePathInternalNoOSARCH(runtime.GOOS, runtime.GOARCH, os.Getenv)
}
func getDefaultBasePathInternalNoOSARCH(goos string, goarch string, getenvFunc func(string) string) (string, error) {
unameS, _, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
xdgCacheHome := getenvFunc("XDG_CACHE_HOME")
if xdgCacheHome != "" {
return filepath.Join(xdgCacheHome, "prototool"), nil
}
home := getenvFunc("HOME")
if home == "" {
return "", fmt.Errorf("HOME is not set")
}
switch unameS {
case "Darwin":
return filepath.Join(home, "Library", "Caches", "prototool"), nil
case "Linux":
return filepath.Join(home, ".cache", "prototool"), nil
default:
return "", fmt.Errorf("invalid value for uname -s: %v", unameS)
}
}
func getProtocSPath(goos string) (string, error) {
switch goos {
case "darwin":
return "osx", nil
case "linux":
return "linux", nil
default:
return "", fmt.Errorf("unsupported value for runtime.GOOS: %v", goos)
}
}
func getUnameSUnameMPaths(goos string, goarch string) (string, string, error) {
var unameS string
switch goos {
case "darwin":
unameS = "Darwin"
case "linux":
unameS = "Linux"
default:
return "", "", fmt.Errorf("unsupported value for runtime.GOOS: %v", goos)
}
var unameM string
switch goarch {
case "amd64":
unameM = "x86_64"
case "arm64":
unameM = "x86_64"
default:
return "", "", fmt.Errorf("unsupported value for runtime.GOARCH: %v", goarch)
}
return unameS, unameM, nil
}
func newFlock(basePath string) (*flock.Flock, error) {
fileLockPath := basePath + ".lock"
// mkdir is atomic
if err := os.MkdirAll(filepath.Dir(fileLockPath), 0755); err != nil {
return nil, err
}
return flock.New(fileLockPath), nil
}
func flockLock(lock *flock.Flock) error {
ctx, cancel := context.WithTimeout(context.Background(), fileLockTimeout)
defer cancel()
locked, err := lock.TryLockContext(ctx, fileLockRetryDelay)
if err != nil {
return fmt.Errorf("error acquiring file lock at %s - if you think this is in error, remove %s: %v", lock.Path(), lock.Path(), err)
}
if !locked {
return fmt.Errorf("could not acquire file lock at %s after %v - if you think this is in error, remove %s", lock.Path(), fileLockTimeout, lock.Path())
}
return nil
}
func flockUnlock(lock *flock.Flock) error {
if err := lock.Unlock(); err != nil {
return fmt.Errorf("error unlocking file lock at %s: %v", lock.Path(), err)
}
return nil
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
activity-write-daemon/main.go | package main
import (
"os"
"log"
"encoding/json"
"activity-write-daemon/messaging"
"activity-write-daemon/persistence"
"activity-write-daemon/parsers"
"activity-write-daemon/models"
)
func storeEvent(activity *models.Activity) {
log.Printf("Event %s is being processed..\n", activity.ID)
isStored := persistence.PushEvent(activity)
if isStored {
log.Printf("Event stored?: %s\n", activity.ID)
}
}
func publishProjectionMsg(activity *models.Activity) error {
mqStr1 := os.Getenv("READ_MQ_CONN_STR")
mqStr2 := os.Getenv("READ_MQ_SLAVE_CONN_STR")
exchangeName := os.Getenv("ACTIVITY_EXCHANGE_NAME")
serializedData, serializeErr := json.Marshal(activity)
if serializeErr != nil {
return serializeErr
}
err := messaging.PublishEvent(mqStr1, mqStr2, exchangeName, string(serializedData))
return err
}
// main is the application's composition root.
func main() {
log.Println("GO Social Write Daemon Starting....")
mqConnStr := os.Getenv("WRITE_MQ_CONN_STR")
queueName := os.Getenv("WRITE_API_QUEUE_NAME")
msgs, err := messaging.ConsumeQueue(mqConnStr, queueName)
if err != nil {
log.Fatal("Daemon cannot consume messages")
}
forever := make(chan bool)
go func() {
for d := range msgs {
activity, err := parsers.ParseActivityJson(d.Body)
if err != nil {
log.Printf("Error %v\n", err)
log.Printf("This event cannot be unmarshalled: %s\n", d.Body)
break
}
storeEvent(activity)
err = publishProjectionMsg(activity)
if err != nil {
log.Printf("Publishing to projection exchange failed: %s \n", activity.ID)
break;
}
d.Ack(false)
}
}()
log.Printf(" [*] Waiting for messages. To exit press CTRL+C")
<-forever
} | [
"\"READ_MQ_CONN_STR\"",
"\"READ_MQ_SLAVE_CONN_STR\"",
"\"ACTIVITY_EXCHANGE_NAME\"",
"\"WRITE_MQ_CONN_STR\"",
"\"WRITE_API_QUEUE_NAME\""
] | [] | [
"WRITE_API_QUEUE_NAME",
"READ_MQ_CONN_STR",
"WRITE_MQ_CONN_STR",
"ACTIVITY_EXCHANGE_NAME",
"READ_MQ_SLAVE_CONN_STR"
] | [] | ["WRITE_API_QUEUE_NAME", "READ_MQ_CONN_STR", "WRITE_MQ_CONN_STR", "ACTIVITY_EXCHANGE_NAME", "READ_MQ_SLAVE_CONN_STR"] | go | 5 | 0 | |
ImageNet/train_imagenet_resnetnew_lp.py | # Copyright (c) HP-NTU Digital Manufacturing Corporate Lab, Nanyang Technological University, Singapore.
#
# This source code is licensed under the Apache-2.0 license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# import torchvision.models as models
import models_lp as models
# import numpy as np
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ZeroBN ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--save', default='./logs', type=str, help='the folder to save checkpoint')
parser.add_argument('--syncbn', default=1, type=int, help='if need sync bn')
parser.add_argument('--sparsity-regularization', '-sr', dest='sr', action='store_true',
help='train with channel sparsity regularization')
parser.add_argument('--ssr', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('--zerobn', type=int, default=30, help='epoches set bn to 0')
parser.add_argument('--prune_ratio', type=float, default=0.7, help='prune ratio')
parser.add_argument('--our', type=int, default=0, help='If zerobn method')
parser.add_argument('--interval', type=int, default=2, help='the interval of zero and recovery')
parser.add_argument('--autostart_zerobn', '-auto', dest='auto', action='store_true',
help='auto start zerobn')
best_acc1 = 0
skip_list = [4, 5, 8, 11, 14, 15, 18, 21, 24, 27, 28, 31, 34, 37, 40, 43, 46, 47, 50, 53] # = id + 1
def zeroBN(model, args):
total = 0
count = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):
count = count + 1
if count not in skip_list:
total += m.weight.data.shape[0]
count = 0
bn = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):
count = count + 1
if count not in skip_list:
size = m.weight.data.shape[0]
bn[index:(index + size)] = m.weight.data.abs().clone()
index += size
count = 0
y, i = torch.sort(bn)
thre_index = int(total * args.prune_ratio)
thre = y[thre_index]
for m in model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):
# tmp = m.weight.data.abs()
count = count + 1
if count not in skip_list:
k = m.weight.data.abs() - thre
k = k + k.abs()
k = k.sign()
m.weight.data = m.weight.data * k
m.bias.data = m.bias.data * k
nonzero = torch.nonzero(k).size()[0]
all = k.size()[0]
if 1.0 * nonzero / all < 0.25:
m.weight.data = m.weight.data * 0
m.bias.data = m.bias.data * 0
count = 0
def main(times):
args = parser.parse_args()
args.save = os.path.join(args.save, str(times))
assert 'resnet50_new' in args.arch
args.mode = args.interval + 1
if args.auto:
print('auto start zerbn')
args.zerobn = int(args.prune_ratio * (-args.epochs/3) + args.epochs*2/3)
args.zerobn = (args.zerobn//10)*10
args.auto = False
print(args.zerobn)
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
# args.times = times
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.syncbn == 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
if args.our == 1:
if epoch == args.zerobn:
best_acc1 = 0.0
if epoch <= args.zerobn or epoch % args.interval == args.mode or epoch == args.epochs - 1:
print('zero epoch:\t', epoch)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
# if args.times == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save)
else:
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
# if args.times == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
if args.sr:
count = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):
count = count + 1
if count not in skip_list:
m.weight.grad.data.add_(args.ssr * torch.sign(m.weight.data)) # L1
count = 0
optimizer.step()
if args.our and epoch >= args.zerobn and (epoch % args.interval == args.mode or epoch == args.epochs - 1):
zeroBN(model, args)
if epoch == args.zerobn:
args.mode = epoch % args.interval
zeroBN(model, args)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
# print(images)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, folder):
filename = os.path.join(folder, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(folder, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
ad = int(args.epochs / 3)
lr = args.lr * (0.1 ** (epoch // ad))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main(0)
| [] | [] | [
"RANK",
"WORLD_SIZE"
] | [] | ["RANK", "WORLD_SIZE"] | python | 2 | 0 | |
Sentinel.pyt | # -*- coding: UTF-8 -*-
# Copyright 2018 Esri Deutschland GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Tons of!!!
VERSION=20180622
import arcpy,os,sys
HERE=os.path.dirname(__file__); sys.path.append(os.path.join(HERE,"lib"))
try: reload(sensub) # With this, changes to the module's .py file become effective on a toolbox Refresh (F5) from within ArcGIS, i.e. without having to restart ArcGIS.
except NameError: import sensub
sensub.arcpy, sensub.THERE, sensub.SYMDIR = arcpy, HERE, os.path.join(HERE,"lyr")
ARCMAP = arcpy.sys.executable.endswith("ArcMap.exe")
if ARCMAP:
MXD, CME = arcpy.mapping.MapDocument("CURRENT"), "CopiedMapExtent"
sensub.MXD, sensub.CME = MXD, CME
DHUSUSR = arcpy.Parameter("DHUSUSR", "DHuS user name", datatype="GPString")
DHUSPWD = arcpy.Parameter("DHUSPWD", "DHuS password", datatype="GPStringHidden")
DHUSALT = arcpy.Parameter("DHUSALT", "DHuS alternative site", datatype="GPString", parameterType="Optional")
DHUSALT.filter.type="ValueList"; DHUSALT.filter.list=["CODE-DE"]
class Toolbox (object):
def __init__ (self):
"""Initialize the toolbox (toolbox name is the name of the .pyt file)."""
self.label = "Sentinel"
self.tools = [Search,Download] # Keep in sync with toolbox tool class names (see below)!
self.alias = ""
class Search (object):
"""WHERE does this python docstring appear? In ArcCatalog? Elsewhere??"""
i=dict() # Map parameter name to parameter index; provides 'parameter by name'.
w=dict() # Forward warning messages from updateParameters to updateMessages. (Is there a better way to accomplish this?)
WGS84 = arcpy.SpatialReference("WGS 1984")
def __init__ (self):
"""Initialize the tool (tool name is the name of the class)."""
self.label = "Search DHuS catalog" # Displayed name.
self.description = "Search Data Hub Services' (DHuS) product catalog for Sentinel-2 products (L1C, or L2A where available) according to given criteria (in particular spatiotemporal constraints and cloud cover limit)."
self.canRunInBackground = False
if ARCMAP: # Dispose well-known broken in_memory layers:
try:
for df in arcpy.mapping.ListDataFrames(MXD):
for lyr in arcpy.mapping.ListLayers(MXD, CME+"*", df):
if lyr.isBroken: arcpy.mapping.RemoveLayer(df, lyr)
except RuntimeError: pass # "Object: CreateObject cannot open map document"! This happens after having edited the "Item Description..." (enforces to restart ArcGIS)!
def getParameterInfo (self): # Why arcpy always calls getParameterInfo multiple times (up to seven times when a tool is called using arcpy.ImportToolbox("P:/ath/to/File.pyt"))?? Instantiation of each toolbox tool class happens even oftener.
"""Prepare parameter definitions."""
params=[DHUSUSR,DHUSPWD,DHUSALT]
PROCLEVEL = arcpy.Parameter("PROCLEVEL", "Processing level", datatype="GPString")
PROCLEVEL.filter.type="ValueList"; PROCLEVEL.filter.list=["1C","2A"]; params.append(PROCLEVEL)
SENSINGMIN = arcpy.Parameter("SENSINGMIN", "Sensing earliest date", datatype="GPDate"); params.append(SENSINGMIN)
params.append(arcpy.Parameter("SENSINGMAX", "Sensing latest date", datatype="GPDate", parameterType="Optional"))
AOIENV = arcpy.Parameter("AOIENV", "Area Of Interest (AOI) Envelope in decimal degrees", datatype="GPEnvelope"); params.append(AOIENV)
aoiMap = arcpy.Parameter("aoiMap", "Use current map extent for AOI Envelope", datatype="GPBoolean", parameterType="Optional"); params.append(aoiMap)
if not ARCMAP: aoiMap.enabled=False
params.append(arcpy.Parameter("aoiLayer", "Use layer extent for AOI Envelope", datatype="GPLayer", parameterType="Optional"))
# OVERLAPMIN = arcpy.Parameter("OVERLAPMIN", "Minimum AOI overlap percentage", datatype="GPLong", parameterType="Optional")
# OVERLAPMIN.filter.type="Range"; OVERLAPMIN.filter.list=[1,100]; params.append(OVERLAPMIN)
CLOUDYMAX = arcpy.Parameter("CLOUDYMAX", "Maximum cloud cover percentage", datatype="GPLong", parameterType="Optional")
CLOUDYMAX.filter.type="Range"; CLOUDYMAX.filter.list=[0,100]; params.append(CLOUDYMAX)
FGDB = arcpy.Parameter("FGDB", "File geodatabase holding the search results catalog", datatype="DEWorkspace")
FGDB.filter.list=["Local Database"]; params.append(FGDB)
CATNAME = arcpy.Parameter("CATNAME", "Name of the local search results catalog", datatype="GPString"); params.append(CATNAME)
OPMERGE = arcpy.Parameter("OPMERGE", "Merge new finds with ones from previous searches", datatype="GPBoolean", parameterType="Optional"); params.append(OPMERGE)
ROWSMAX = arcpy.Parameter("ROWSMAX", "Maximum count of search result rows", datatype="GPLong", parameterType="Optional")
ROWSMAX.filter.type="Range"; ROWSMAX.filter.list=[1,5000]; params.append(ROWSMAX)
params.append(arcpy.Parameter("PRODCAT_", datatype="DERasterCatalog", symbology=sensub.dep("Product.lyr"), parameterType="Derived", direction="Output")) # Why direction must/can be specified when "Derived" implicitly enforces "Output"??
# params.append(arcpy.Parameter("aoiTmp", datatype="DEFeatureClass", symbology=sensub.dep(CME+".lyr"), parameterType="Derived", direction="Output"))
# params.append(arcpy.Parameter("debug", "debug", datatype="GPString", parameterType="Optional"))
# Preset:
sensub.recall(self, params, ["aoiMap","aoiLayer"])
if PROCLEVEL.value is None: PROCLEVEL.value="1C"
if SENSINGMIN.value is None: SENSINGMIN.value = sensub.dayStart(datetime.date.today() - datetime.timedelta(days=30))
if AOIENV.value is None: AOIENV.value = sensub.AOIDEMO
aoiMap.value=False # Paranoid.
# if OVERLAPMIN.value is None: OVERLAPMIN.value=1
if CLOUDYMAX.value is None: CLOUDYMAX.value=50
if CATNAME.value is None: CATNAME.value="Product"
if OPMERGE.value is None: OPMERGE.value=True
if ROWSMAX.value is None: ROWSMAX.value=25
return params
def updateParameters (self, params):
"""Modify the values and properties of parameters before internal validation is performed. This method is called whenever a parameter has been changed."""
dhusAlt,PROCLEVEL = params[self.i["DHUSALT"]].value, params[self.i["PROCLEVEL"]]
if not DHUSALT.hasBeenValidated:
if dhusAlt=="CODE-DE":
PROCLEVEL.value="1C"; PROCLEVEL.enabled=False
else: PROCLEVEL.enabled=True
AOIENV,aoiLayer,aoiMap = params[self.i["AOIENV"]], params[self.i["aoiLayer"]], params[self.i["aoiMap"]]
if not AOIENV.hasBeenValidated:
aoiLayer.enabled = AOIENV.enabled = True # Why GPEnvelope's widget allows "Clear" when not being enabled??
if ARCMAP: aoiMap.enabled=True
aoiMap.value = False
elif not aoiMap.hasBeenValidated:
if aoiMap.value and ARCMAP:
e = MXD.activeDataFrame.extent
pe = sensub.projectExtent(self, e, "aoiMap", "Active data frame")
if pe:
params[self.i["AOIENV"]].value = pe
tmpName = "%s%d" % (CME, arcpy.mapping.ListDataFrames(MXD).index(MXD.activeDataFrame))
tmpSource = os.path.join("in_memory",tmpName)
fresh=False
if not arcpy.Exists(tmpSource):
arcpy.CreateFeatureclass_management("in_memory", tmpName, "POLYGON", spatial_reference=self.WGS84)
fresh=True
ll = arcpy.mapping.ListLayers(MXD, tmpName, MXD.activeDataFrame)
if not ll: arcpy.mapping.AddLayer(MXD.activeDataFrame, arcpy.mapping.Layer(tmpSource), "TOP") # Most notably: Placed above all group layers.
if fresh or not ll: arcpy.ApplySymbologyFromLayer_management(tmpName, sensub.dep(CME+".lyr"))
if not fresh: # Dispose previous CME beforehand:
with arcpy.da.UpdateCursor(tmpSource,"OID@") as rows:
for row in rows: rows.deleteRow()
with arcpy.da.InsertCursor(tmpSource,"SHAPE@") as rows: rows.insertRow([e.polygon])
if not fresh: arcpy.RefreshActiveView()
AOIENV.enabled = aoiLayer.enabled = False
else: aoiMap.value=False
else: AOIENV.enabled = aoiLayer.enabled = True
elif not aoiLayer.hasBeenValidated:
if aoiLayer.value:
dismiss=False
if not aoiLayer.valueAsText.endswith(".lyr") and ARCMAP:
dfLayers = (lyr.name for lyr in arcpy.mapping.ListLayers(MXD, data_frame=MXD.activeDataFrame))
if aoiLayer.valueAsText not in dfLayers:
self.w["aoiLayer"]="Layer not found in active data frame, nothing copied over."
dismiss=True
if not dismiss:
if hasattr(aoiLayer.value,"dataSource") and aoiLayer.value.dataSource: # "Basemap" has no dataSource attribute! And 'geoprocessing Layer object' has no supports() funtion.
d = arcpy.Describe(aoiLayer.value.dataSource)
if d.dataType=="FeatureDataset": self.w["aoiLayer"]="FeatureDataset found, nothing copied over."
else:
pe = sensub.projectExtent(self, d.extent, "aoiLayer", "Data source")
if pe: params[self.i["AOIENV"]].value = pe
else: self.w["aoiLayer"]="Data source info not found, nothing copied over."
# else: aoiLayer.value="" # Silently dismiss.
AOIENV.enabled = aoiMap.enabled = False
else: # Release other:
AOIENV.enabled=True
if ARCMAP: aoiMap.enabled=True
CATNAME = params[self.i["CATNAME"]]
if not CATNAME.hasBeenValidated: CATNAME.value = arcpy.ValidateTableName(CATNAME.value, params[self.i["FGDB"]].value)
def updateMessages (self, params):
"""Modify the messages created by internal validation for each tool parameter. This method is called after internal validation."""
for k in self.w.keys(): params[self.i[k]].setWarningMessage(self.w.pop(k))
SENSINGMIN,SENSINGMAX = params[self.i["SENSINGMIN"]], params[self.i["SENSINGMAX"]]
if SENSINGMIN.value:
sensub.enforceDateOnly(SENSINGMIN)
S2first,present = datetime.date(2015,6,28), datetime.date.today()
if SENSINGMIN.value.date()<S2first:
SENSINGMIN.setWarningMessage("Earliest image from Sentinel-2 is dated "+S2first.isoformat())
SENSINGMIN.value = sensub.dayStart(S2first)
elif SENSINGMIN.value.date()>present:
SENSINGMIN.setWarningMessage("Sensing earliest date cannot lie in the future.")
SENSINGMIN.value = sensub.dayStart(present)
sensub.enforceDateOnly(SENSINGMAX)
if SENSINGMIN.value and SENSINGMAX.value and SENSINGMIN.value.date()>=SENSINGMAX.value.date():
SENSINGMAX.setWarningMessage("Sensing latest date must not be before or equal to Sensing earliest date.")
SENSINGMAX.value = SENSINGMIN.value + datetime.timedelta(days=1)
AOIENV = params[self.i["AOIENV"]]
if AOIENV.value:
e = AOIENV.value
if (e.XMax-e.XMin)>10 or (e.YMax-e.YMin)>10: AOIENV.setErrorMessage("Must be within an area described by 10° of longitude and 10° of latitude.") # DHuS OpenSearch limitation.
# aoiLayer = params[self.i["aoiLayer"]]
# if aoiLayer.hasWarning() or aoiLayer.hasError(): # Release all:
# aoiLayer.enabled = AOIENV.enabled = True
# if ARCMAP: params[self.i["aoiMap"]].enabled=True
FGDB = params[self.i["FGDB"]]
if FGDB.valueAsText and FGDB.valueAsText.endswith(".lyr"): FGDB.setErrorMessage("Not a workspace.") # Why DEWorkspace validates a .lyr file as a Workspace??
OPMERGE = params[self.i["OPMERGE"]]
if not OPMERGE.value: OPMERGE.setWarningMessage("Without Merge, existing finds from previous searches will be deleted from the local search results catalog.")
def execute (self, params, messages):
"""Apply the tool."""
sensub.memorize(params)
FGDB,CATNAME = params[self.i["FGDB"]], params[self.i["CATNAME"]]
prodCat,fresh = os.path.join(FGDB.valueAsText,CATNAME.valueAsText), False
sensub.setEnv("PRODCAT", prodCat) # Preset for Download tool.
e = params[self.i["AOIENV"]].value; aoiEnv = "%f %f %f %f" % (e.XMin,e.YMin,e.XMax,e.YMax)
sensub.auth(params[self.i["DHUSUSR"]].value, params[self.i["DHUSPWD"]].value, params[self.i["DHUSALT"]].value)
finds = sensub.search(params[self.i["PROCLEVEL"]].value, params[self.i["SENSINGMIN"]].value, params[self.i["SENSINGMAX"]].value, aoiEnv, 1, params[self.i["CLOUDYMAX"]].value, params[self.i["ROWSMAX"]].value) # OVERLAPMIN currently not implemented, set to a fixed dummy value.
if not finds: return
if not arcpy.Exists(prodCat):
arcpy.AddWarning(prodCat+": does not yet exist, creating on the fly...")
SRS = arcpy.SpatialReference("WGS 1984 Web Mercator (Auxiliary Sphere)")
arcpy.CreateRasterCatalog_management(FGDB.value, CATNAME.value, SRS, SRS, raster_management_type="MANAGED")
arcpy.AddField_management(prodCat,"SensingDate","DATE")
arcpy.AddField_management(prodCat,"CloudCover","FLOAT")
arcpy.AddField_management(prodCat,"Size","TEXT",field_length=12)
arcpy.AddField_management(prodCat,"Added","DATE")
arcpy.AddField_management(prodCat,"Marked","SHORT")
arcpy.AddField_management(prodCat,"Downloaded","DATE")
arcpy.AddField_management(prodCat,"Found","DATE")
arcpy.AddField_management(prodCat,"Title","TEXT",field_length=80)
arcpy.AddField_management(prodCat,"UUID","TEXT",field_length=36)
arcpy.AddField_management(prodCat,"MD5","TEXT",field_length=32)
for fieldName in "UUID","Name","Title","SensingDate","CloudCover","Size","Added","Marked","Downloaded","Found","MD5": arcpy.AddIndex_management(prodCat,fieldName,fieldName)
fresh=True
truncated=False
if not fresh and not params[self.i["OPMERGE"]].value:
arcpy.AddWarning("Merge option not chosen, therefore deleting all existing finds (that originate from previous searches).")
arcpy.TruncateTable_management(prodCat); truncated=True
allFinds,exclusion = set(finds.iterkeys()), set()
if not truncated: # Exclude already existent products from re-fetching their previews:
existent=set()
with arcpy.da.SearchCursor(prodCat, "UUID", where_clause="UUID IN "+sensub.sql(allFinds), sql_clause=("DISTINCT",None)) as rows:
for r in rows: existent.add(r[0])
exclusion = allFinds.intersection(existent)
newFinds = allFinds.difference(exclusion)
newCount,exclusionCount = len(newFinds), len(exclusion)
arcpy.AddMessage("New finds: %d, already existent in local catalog: %d" % (newCount, exclusionCount))
if newCount>0:
arcpy.AddMessage("Fetching metadata, tile preview(s) for...")
tmpDir = os.path.join(os.path.realpath(os.environ["TEMP"]), "previews") + datetime.datetime.now().strftime("%Y%m%d%H%M%S") # What about module "tempfile"?
os.mkdir(tmpDir); toRemove=set()
for p,UUID in enumerate(newFinds,1):
Title,SensingDate,CloudCover,Size = finds[UUID]
arcpy.AddMessage("...new find %d/%d (%s)," % (p,newCount, SensingDate))
tiles,urlFormat = sensub.prodTiles(Title,UUID,SensingDate)
tileCount = len(tiles)
for t,(tileName,previewPath) in enumerate(tiles.items(),1):
arcpy.AddMessage(" tile %d/%d (%s)" % (t,tileCount,tileName))
preview,issue,severity = sensub.download(urlFormat%previewPath, tmpDir, "%s.%s.jp2"%(UUID,tileName), slim=True)
if not issue or issue=="already exists":
toRemove.add(preview); auxPath=preview+".aux.xml"
if not os.path.exists(auxPath):
with open(auxPath,"w") as aux: aux.write("<PAMDataset><PAMRasterBand band='1'><NoDataValue>0</NoDataValue></PAMRasterBand><PAMRasterBand band='2'><NoDataValue>0</NoDataValue></PAMRasterBand><PAMRasterBand band='3'><NoDataValue>0</NoDataValue></PAMRasterBand></PAMDataset>")
toRemove.add(auxPath)
arcpy.AddMessage("Appending new tile preview(s)...")
arcpy.env.rasterStatistics = "NONE" # I.e. no persisted histogram stretching. Alternatively, choose "STATISTICS 1 1 (0 255)".
arcpy.WorkspaceToRasterCatalog_management(tmpDir, prodCat)
for f in toRemove: os.remove(f)
os.rmdir(tmpDir)
arcpy.AddMessage("Updating attributes...")
when = datetime.datetime.now()
wcl = "UUID IS NULL"
if exclusionCount>0: wcl += " OR UUID IN "+sensub.sql(exclusion)
with arcpy.da.UpdateCursor(prodCat, ("UUID","Name","Title","SensingDate","CloudCover","Size","Added","Marked","Found"), where_clause=wcl) as rows:
for r in rows:
if r[0]: r[8]=when # Found again.
else: # New find:
UUID,tileName,sfx = r[1].split(".")
if finds.has_key(UUID): # Paranoid... maybe there are some relicts from a previous unhandled cancellation/crash?
Title,SensingDate,CloudCover,Size = finds[UUID]
r = [UUID, "%s %s"%(tileName,SensingDate.date().isoformat()), Title,SensingDate,CloudCover,Size,when,None,when]
rows.updateRow(r)
params[self.i["PRODCAT_"]].value = prodCat
#HOWTO "Reload Cache" from arcpy? Is comtypes needed for this?
class Download (object):
i=dict() # Map parameter name to parameter index; provides 'parameter by name'.
import collections
modes = collections.OrderedDict([
("CartOnly", "Cart-only (no raster data)"),
("Full", "Full product (cart in parallel)"),
("ImgSel", "Image selection (bare raster)")]) # Provide displayName (label) by mode name.
probs = collections.OrderedDict([("CLD","Cloud"), ("SNW", "Snow/Ice")])
prbNames = probs.keys()
indices = collections.OrderedDict([
("NDWI","NDWI(McFeeters)*"),
("MNDWI","*"),
("nNDVI","-NDVI*"),
("nNDVI_GREEN","-NDVI-GREEN*"),
("SWI",None),
("WRI","*"),
("NWIgreen","NWI(green)*"),
("NWIblue","NWI(blue)*"),
("MBWI",None),
("WI2015","*"),
("AWEInsh","*"),
("AWEIsh","*"),
("SBM2m3_6p2m8p6m11p6m12p2", u"SBM(2•3—6²•8⁶•11⁶•12²)")]) #•⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻—₀₁₂₃₄₅₆₇₈₉⋅Πρᵢ↑xᵢ
idxFilterable = list()
for n,dn in indices.iteritems():
if dn is None: indices[n]=n
elif dn=="*": indices[n]=n+"*"
if indices[n].endswith("*"): idxFilterable.append(n)
idxNames = indices.keys(); idxNames.reverse()
images = collections.OrderedDict([
("B01","Coastal aerosol, 443nm(20nm), 60m"),
("B02","Blue, 490nm(65nm), 10m"),
("B03","Green, 560nm(35nm), 10m"),
("B04","Red, 665nm(30nm), 10m"),
("B05","Vegetation (red edge), 705nm(15nm), 20m"),
("B06","Vegetation (red edge), 740nm(15nm), 20m"),
("B07","Vegetation (red edge), 783nm(20nm), 20m"),
("B08","NIR (broad), 842nm(115nm), 10m"),
("B8A","Vegetation (red edge), 865nm(20nm), 20m"),
("B09","Water vapour, 945nm(20nm), 60m"),
("B10","(L1C-only) SWIR (cirrus), 1380nm(30nm), 60m"),
("B11","SWIR (snow/ice/cloud), 1610nm(90nm), 20m"),
("B12","SWIR (snow/ice/cloud), 2190nm(180nm), 20m"),
("TCI","Natural color composite (3•8 bit), 10m"),
# L2A-only:
("TCI_20m",None),
("TCI_60m",None),
("CLD","Cloud confidence, 20m"),
("CLD_60m",None),
("SNW","Snow/ice confidence, 20m"),
("SNW_60m",None),
("SCL","Scene Classification, 20m"),
("SCL_60m",None),
("AOT","Aerosol Optical Thickness (at 550nm), 10m"),
("AOT_20m",None),
("AOT_60m",None),
("WVP","Water Vapour, 10m"),
("WVP_20m",None),
("WVP_60m",None),
# ("VIS","(not documented), 20m"), # Extincted with PSD14.5!
("B02_20m",None),
("B02_60m",None),
("B03_20m",None),
("B03_60m",None),
("B04_20m",None),
("B04_60m",None),
("B05_60m",None),
("B06_60m",None),
("B07_60m",None),
("B8A_60m",None),
("B11_60m",None),
("B12_60m",None)])
imgNames = images.keys()
outNames,plainFileBased = ["MSIL1C"], ["TCI1C","TCI","SCL","SNW","CLD"]
outNames += plainFileBased
arcVersion, saEnabled = arcpy.GetInstallInfo()["Version"], False
bmfUtility = True if arcVersion>="10.5" else False # With lower arcVersion, the expression parser of BandArithmeticFunction is too picky.
if ARCMAP:
try:
arcpy.sa.Int(1) # Is there a better way to check whether sa is enabled (not to be confused with "available")?
saEnabled = True
except RuntimeError as err:
if not "ERROR 000824" in err.message: raise
def __init__ (self):
self.label = "Download Marked packages" # Displayed name.
self.description = "For each Marked entry in the local product catalog, download the respective raster data package."
self.canRunInBackground = False
def getParameterInfo (self):
params=[DHUSUSR,DHUSPWD,DHUSALT]
params.append(arcpy.Parameter("PRODCAT", "Product catalog where Marked rows denote download", datatype="DERasterCatalog"))
params.append(arcpy.Parameter("RASTERDIR", "Directory to store downloads", datatype="DEFolder"))
OPMODE = arcpy.Parameter("OPMODE", "Operation mode", datatype="GPString", parameterType="Optional")
OPMODE.filter.type="ValueList"; OPMODE.filter.list=self.modes.values(); params.append(OPMODE)
UNZIP = arcpy.Parameter("UNZIP", "Unzip .zip after download", datatype="GPBoolean", parameterType="Optional"); params.append(UNZIP)
catName=" L2A additions (masks, filters, index selection)"
for n,dn in self.probs.iteritems():
params.append(arcpy.Parameter(n+"MSK", "Create %s mask layer (according to threshold)"%dn, category=catName, datatype="GPBoolean", parameterType="Optional"))
params.append(arcpy.Parameter(n+"FLT", "Apply %s filter to selected filterable* indices (according to threshold)"%dn, category=catName, datatype="GPBoolean", parameterType="Optional"))
threshold = arcpy.Parameter(n+"THR", "%s threshold (probability percentage)"%dn, category=catName, datatype="GPLong", parameterType="Optional")
threshold.filter.type="Range"; threshold.filter.list=[1,100]; params.append(threshold)
for n,dn in self.indices.iteritems(): params.append(arcpy.Parameter(n, dn, category=catName, datatype="GPBoolean", parameterType="Optional"))
catName="Image selection"
for n,dn in self.images.iteritems():
dspName = n if dn is None else "%s: %s"%(n,dn)
params.append(arcpy.Parameter(n, dspName, category=catName, datatype="GPBoolean", parameterType="Optional"))
if n=="TCI": catName="L2A-only images"
for on in self.outNames: params.append(arcpy.Parameter(on+"_", datatype="DERasterDataset", multiValue=True, symbology=sensub.dep(on+".lyr"), parameterType="Derived", direction="Output"))
# Preset:
sensub.recall(self,params)
if OPMODE.value is None: OPMODE.value=self.modes["CartOnly"]
if UNZIP.value is None: UNZIP.value=True
if params[self.i["CLDTHR"]].value is None: params[self.i["CLDTHR"]].value=40
if params[self.i["SNWTHR"]].value is None: params[self.i["SNWTHR"]].value=1
for n in ["TCI"]: #"B04","B03","B02","NDWI"
I = params[self.i[n]]
if I.value is None: I.value=True
return params
def updateParameters (self, params):
OPMODE = params[self.i["OPMODE"]]
isFull = True if OPMODE.value==self.modes["Full"] else False
if not OPMODE.hasBeenValidated:
params[self.i["UNZIP"]].enabled = True if isFull else False
for n in "CLDMSK","SNWMSK": params[self.i[n]].enabled = isFull and self.saEnabled
for n in "CLDFLT","SNWFLT": params[self.i[n]].enabled = isFull and self.saEnabled and self.bmfUtility
for n in self.idxNames: params[self.i[n]].enabled = isFull and self.bmfUtility
isImgSel = True if OPMODE.value==self.modes["ImgSel"] else False
for n in self.imgNames: params[self.i[n]].enabled=isImgSel
for n in self.prbNames:
mskORflt=False
for o in "MSK","FLT":
p = params[self.i[n+o]]
mskORflt = mskORflt or (p.value and p.enabled)
params[self.i[n+"THR"]].enabled = mskORflt and isFull
def updateMessages (self, params):
RASTERDIR = params[self.i["RASTERDIR"]]
if RASTERDIR.value:
rp,MAXLEN = os.path.realpath(RASTERDIR.valueAsText), 11
if len(rp)>MAXLEN: RASTERDIR.setErrorMessage("%s: Path too long (max. %d characters, incl. drive letter and dir. sep.)." % (rp,MAXLEN))
OPMODE,errMsg = params[self.i["OPMODE"]], "%s is empty (see list below), please select at least one of them."
if OPMODE.value==self.modes["ImgSel"] and not sensub.anySelected(self,params,self.imgNames): OPMODE.setErrorMessage(errMsg%"Image selection")
elif OPMODE.value==self.modes["Full"] and not sensub.anySelected(self,params,self.idxFilterable):
for n in self.prbNames:
FLT = params[self.i[n+"FLT"]]
if FLT.value: FLT.setErrorMessage(errMsg%"Selection of filterable* indices")
def execute (self, params, messages):
sensub.memorize(params)
prodCat = params[self.i["PRODCAT"]].valueAsText
catName = os.path.basename(prodCat)
sensub.setEnv("CATNAME", catName); sensub.setEnv("FGDB", os.path.dirname(prodCat)) # Preset for Search tool.
rasterDir = os.path.realpath(params[self.i["RASTERDIR"]].valueAsText)
m,opMode = params[self.i["OPMODE"]].value, dict()
for k in self.modes.keys(): opMode[k] = True if m==self.modes[k] else False
unzip,out,sym, msk,flt,thr,briefName,symName = params[self.i["UNZIP"]].value, dict(), dict(), dict(),dict(),dict(),dict(),dict()
for n,dn in self.probs.iteritems(): msk[n],flt[n],thr[n],briefName[n],symName[n] = params[self.i[n+"MSK"]].value, params[self.i[n+"FLT"]].value, params[self.i[n+"THR"]].value, n+"_20m", dn.replace("/","")
for on in self.outNames: out[on]=list()
toRestore = list()
try:
# cursor = arcpy.da.SearchCursor if opMode["CartOnly"] else arcpy.da.UpdateCursor
with arcpy.da.UpdateCursor(prodCat, ["Name","SensingDate","Size","Marked","Downloaded","Title","UUID","MD5"], where_clause="UUID IN (SELECT DISTINCT UUID FROM "+catName+" WHERE Marked>0)", sql_clause=(None,"ORDER BY Marked DESC")) as rows:
if not sensub.hasNext(rows):
arcpy.AddWarning("Nothing Marked for download!")
return
arcpy.AddMessage("Processing Marked item(s)...")
prodMemo = dict()
if not opMode["ImgSel"]:
dldone = dlskipped = dlfailed = missed = 0
cartName = os.path.join(rasterDir, datetime.datetime.now().strftime("Cart.%Y-%m-%d_%H.%M.%S.xml"))
cartPart = cartName+sensub.PARTIAL
cartFile = open(cartPart,"w")
sensub.flushline('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<metalink xmlns="urn:ietf:params:xml:ns:metalink">', cartFile)
dhusAlt = params[self.i["DHUSALT"]].value
sensub.auth(params[self.i["DHUSUSR"]].value, params[self.i["DHUSPWD"]].value, dhusAlt)
if opMode["Full"] and unzip:
if ARCMAP:
for ln in ["Group","BOA","Gray"]+self.plainFileBased:
if self.arcVersion>="10.6" or ln!="BOA": sym[ln] = arcpy.mapping.Layer(sensub.dep(ln+".lyr"))
sensub.SYMGRP,symPath, = sym["Group"], dict()
for sn in ["Index"]+symName.values(): symPath[sn]=sensub.dep(sn+".lyr")
sensub.SYMRFT = symPath,"dummy-11.tif" # For RFT-based layers.
for lyr in arcpy.mapping.ListLayers(MXD, data_frame=MXD.activeDataFrame):
if lyr.visible and lyr.longName.find("\\")<0: # Only top-level needed.
lyr.visible=False; toRestore.append(lyr) # Hack to minimize ArcMap's annoying intermediate drawing performances. Why does ArcMap always perform redrawing even if the added layer (or its holding group layer) has visible=False (which means that any redrawing is totally useless anyway)? And why is there no arcpy.mapping.PauseDrawing() available?
import re
for r in rows:
Name,SensingDate,Size,Marked,Downloaded,Title,UUID,MD5 = r
L2A = sensub.isL2A(Title)
if L2A and dhusAlt=="CODE-DE":
arcpy.AddWarning("# %s: CODE-DE does not provide any L2A products!"%Title); continue
update,procBaseline,PSD13 = False, sensub.baselineNumber(Title), len(Title)==78 # Title length of a product that complies with PSD version < 14.
if not opMode["ImgSel"]:
processed,md5sum,issue,severity = (None,MD5,None,0) if not prodMemo.has_key(UUID) else prodMemo.get(UUID)
if not processed: # Yet unprocessed single-tile package, or first tile occurrence of a multi-tile package (scene) or of a dupes set (should not happen):
arcpy.AddMessage("# %s, %s (%s)" % (Size, Title, UUID))
if not md5sum: md5sum,issue,severity = sensub.md5sum(UUID)
if not md5sum:
arcpy.AddWarning(" => Missed."); missed += 1
else:
m4int = filename,md5sum,url = Title+".zip", md5sum, sensub.SITE["SAFEZIP"]%UUID
sensub.flushline(" <file name='%s'><hash type='MD5'>%s</hash><url>%s</url></file>" % m4int, cartFile)
if opMode["Full"]:
outcome,issue,severity = sensub.download(url, rasterDir, filename, md5sum, unzip, Title+".SAFE")
if not issue or (issue=="already exists" and outcome is not None):
if not issue: dldone += 1
if unzip: # ILLUSTRATION OF PRESENTATION VARIANTS:
# ( ) = Built-in Function Template
# + = Within Group Layer
# • = Nonexistent SAFE Package Situation
#
# Y: Product Level
# |__X: ArcGIS Version
# \
# Z: PSD Version
#
# 2A
# \
# •------------•------------•
# |\ |\ |\
# | \ | \ | \
# | TCI+---------TCI+--------(2A)+TCI
# | |\ | |\ | |\
# 1C | | \ | | \ | | \
# \| | TCI+---------TCI+---------TCI+
# PSD13-(1C)-|--|----(1C)-|--|----(1C) | |
# \ | | \ | | \ | |
# \| | \| | \| |
# PSD14-TCI1C|-------(1C)-|-------(1C) |
# \ | \ | \ |
# \| \| \|
# PSD14.5-•------------•------------•
# | | |
# 10.4.1 10.5.1 10.6
# 10.5
safeDir,mtdName = outcome; mtdFull=os.path.join(safeDir,mtdName)
if PSD13 or (self.arcVersion>="10.5.1" and not L2A): out["MSIL1C"].append(os.path.join(mtdFull,"Multispectral-10m")) # Built-in function template as part of built-in L1C raster product support.
else: # Remaining PSD14-related cases:
with open(mtdFull) as f:
tci = re.search(r"GRANULE/[^/]+/IMG_DATA/(R10m/(L2A_)?)?T\w+_TCI(_10m)?", f.read())
if tci:
relPath = tci.group(0)
if L2A: relPath = relPath.replace("IMG_DATA/R10m","%s",1).replace("TCI_10m","%s",1)
imgFull = os.path.join(safeDir, relPath+".jp2")
if not L2A: out["TCI1C"].append(imgFull) # PSD14 not supported with ArcGIS version < 10.5.1
else: # Grouping of various L2A layers:
if ARCMAP:
X,reference,refMain,grpName = dict(), (None,None), dict(), re.sub(".+(L2A_)(.+)_N.+_(T\d{1,2}[A-Z]{3}_).+", r"\3\2", Title) # Naming convention similar to L2A .jp2 file names.
if self.arcVersion>="10.6" and procBaseline<="0206": reference = sensub.insertIntoGroup(os.path.join(mtdFull,"BOA Reflectance-10m"), reference, grpName, sym["BOA"], "BOA 10m") # Incorporate built-in L2A raster product demo.
for n in "TCI_10m","SCL_20m","SNW_20m","CLD_20m":
X[n] = sensub.imgPath(imgFull,n,procBaseline)
reference = refMain[n] = sensub.insertIntoGroup(X[n], reference, grpName, sym[n[:3]])
# For the following, ignore self.saEnabled, since a currently missing Spatial Analyst license can be (re-)enabled by the user at any time:
for n,dn in self.probs.iteritems():
if msk[n]: sensub.insertIntoGroup(("mask",(X[briefName[n]],thr[n],symName[n])), refMain[briefName[n]], grpName, altName=dn)
if self.bmfUtility:
reference,anyIndex,B = refMain["SCL_20m"], False, dict()
for bn in "02","03","04","05","06","07","08","11","12":
name = "B"+bn
B[bn] = sensub.imgPath(imgFull, name, label=self.images[name])
for n in self.idxNames:
showIndex = params[self.i[n]].value
if showIndex: anyIndex=True
reference = sensub.insertIntoGroup((n,(B,X,(flt,thr))), reference, grpName, altName=self.indices[n], skip = not showIndex)
if anyIndex: sensub.insertIntoGroup(B["08"], refMain["TCI_10m"], grpName, sym["Gray"]) # For visual (water) index assessment.
if severity==1:
arcpy.AddWarning(" => Skipped."); dlskipped += 1
elif severity>1:
if issue.startswith("cannot reach"): missed += 1
else: dlfailed += 1
processed = datetime.datetime.now()
if not MD5 and md5sum:
r[7]=md5sum; update=True # Cache it to avoid potentially redundant checksum calls.
if opMode["Full"] and not issue:
r[4]=processed; update=True
prodMemo[UUID] = processed,md5sum,issue,severity
elif Marked:
tileName = Name.split()[0]
arcpy.AddMessage("# %s, %s," % (Title,tileName))
if not prodMemo.has_key(UUID): prodMemo[UUID] = sensub.prodTiles(Title,UUID,SensingDate,False,L2A,procBaseline)
tiles,urlFormat = prodMemo.get(UUID)
if urlFormat is None: arcpy.AddWarning(" => Missed.")
else:
tileDir = os.path.join(rasterDir, Title, tileName)
if not os.path.exists(tileDir): os.makedirs(tileDir)
any = {"downloaded":False}
for on in self.plainFileBased: any[on]=False
for n,dn in self.images.iteritems():
if params[self.i[n]].value:
i = self.imgNames.index(n)
if n=="TCI" and PSD13: arcpy.AddWarning(" %s: not available for older products (PSD<14)."%n)
elif n=="B10" and L2A: pass #arcpy.AddWarning(" %s: not available for L2A products."%n)
elif i<14 or L2A:
arcpy.AddMessage(" %s" % n)
pathFormat = tiles[tileName]
imgPath = sensub.imgPath(pathFormat, n, procBaseline, L2A, dn)
if L2A: imgPath=sensub.plain2nodes(imgPath) # Catch-up.
imgFull,issue,severity = sensub.download(urlFormat % imgPath, tileDir, n+".jp2")
if not issue or (issue=="already exists" and imgFull is not None):
for on in self.plainFileBased:
if not any[on] and ((not L2A and on=="TCI1C" and n=="TCI") or (L2A and n.startswith(on))):
out[on].append(imgFull); any[on]=True; break # Highest resolution rules.
if not issue: any["downloaded"]=True
if any["downloaded"]:
r[4]=datetime.datetime.now(); update=True
if update: rows.updateRow(r)
if not opMode["ImgSel"]:
sensub.flushline("</metalink>",cartFile); cartFile.close()
os.rename(cartPart,cartName); arcpy.AddMessage(cartName)
summary = "Missed %s" % missed
if opMode["Full"]: summary = "Downloaded %s, Skipped %s, Failed %s, %s" % (dldone,dlskipped,dlfailed, summary)
arcpy.AddMessage(summary)
for on in self.outNames: params[self.i[on+"_"]].value = ";".join(out[on])
finally:
if toRestore:
for lyr in toRestore: lyr.visible=True
arcpy.RefreshTOC(); arcpy.RefreshActiveView()
| [] | [] | [
"TEMP"
] | [] | ["TEMP"] | python | 1 | 0 | |
internal/models/boil_main_test.go | // Code generated by SQLBoiler 4.2.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 2
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}
| [
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] | [] | [
"HOME",
"XDG_CONFIG_HOME"
] | [] | ["HOME", "XDG_CONFIG_HOME"] | go | 2 | 0 | |
lib/srv/desktop/rdp/rdpclient/client.go | //go:build desktop_access_rdp
// +build desktop_access_rdp
/*
Copyright 2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rdpclient
// Some implementation details that don't belong in the public godoc:
// This package wraps a Rust library based on https://crates.io/crates/rdp-rs.
//
// The Rust library is statically-compiled and called via CGO.
// The Go code sends and receives the CGO versions of Rust RDP events
// https://docs.rs/rdp-rs/0.1.0/rdp/core/event/index.html and translates them
// to the desktop protocol versions.
//
// The flow is roughly this:
// Go Rust
// ==============================================
// rdpclient.New -----------------> connect_rdp
// *connected*
//
// *register output callback*
// -----------------> read_rdp_output
// handleBitmap <----------------
// handleBitmap <----------------
// handleBitmap <----------------
// *output streaming continues...*
//
// *user input messages*
// InputMessage(MouseMove) ------> write_rdp_pointer
// InputMessage(MouseButton) ----> write_rdp_pointer
// InputMessage(KeyboardButton) -> write_rdp_keyboard
// *user input continues...*
//
// *connection closed (client or server side)*
// Wait -----------------> close_rdp
//
/*
// Flags to include the static Rust library.
#cgo linux,386 LDFLAGS: -L${SRCDIR}/target/i686-unknown-linux-gnu/release
#cgo linux,amd64 LDFLAGS: -L${SRCDIR}/target/x86_64-unknown-linux-gnu/release
#cgo linux,arm LDFLAGS: -L${SRCDIR}/target/arm-unknown-linux-gnueabihf/release
#cgo linux,arm64 LDFLAGS: -L${SRCDIR}/target/aarch64-unknown-linux-gnu/release
#cgo linux LDFLAGS: -l:librdp_client.a -lpthread -ldl -lm
#cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/target/x86_64-apple-darwin/release
#cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/target/aarch64-apple-darwin/release
#cgo darwin LDFLAGS: -framework CoreFoundation -framework Security -lrdp_client -lpthread -ldl -lm
#include <librdprs.h>
*/
import "C"
import (
"context"
"errors"
"fmt"
"image"
"os"
"runtime/cgo"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/gravitational/teleport/lib/srv/desktop/tdp"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
func init() {
// initialize the Rust logger by setting $RUST_LOG based
// on the logrus log level
// (unless RUST_LOG is already explicitly set, then we
// assume the user knows what they want)
if rl := os.Getenv("RUST_LOG"); rl == "" {
var rustLogLevel string
switch l := logrus.GetLevel(); l {
case logrus.TraceLevel:
rustLogLevel = "trace"
case logrus.DebugLevel:
rustLogLevel = "debug"
case logrus.InfoLevel:
rustLogLevel = "info"
case logrus.WarnLevel:
rustLogLevel = "warn"
default:
rustLogLevel = "error"
}
os.Setenv("RUST_LOG", rustLogLevel)
}
C.init()
}
// Client is the RDP client.
type Client struct {
cfg Config
// Parameters read from the TDP stream.
clientWidth, clientHeight uint16
username string
// RDP client on the Rust side.
rustClient *C.Client
// Synchronization point to prevent input messages from being forwarded
// until the connection is established.
// Used with sync/atomic, 0 means false, 1 means true.
readyForInput uint32
// wg is used to wait for the input/output streaming
// goroutines to complete
wg sync.WaitGroup
closeOnce sync.Once
clientActivityMu sync.RWMutex
clientLastActive time.Time
}
// New creates and connects a new Client based on cfg.
func New(ctx context.Context, cfg Config) (*Client, error) {
if err := cfg.checkAndSetDefaults(); err != nil {
return nil, err
}
c := &Client{
cfg: cfg,
readyForInput: 0,
}
if err := c.readClientUsername(); err != nil {
return nil, trace.Wrap(err)
}
if err := cfg.AuthorizeFn(c.username); err != nil {
return nil, trace.Wrap(err)
}
if err := c.readClientSize(); err != nil {
return nil, trace.Wrap(err)
}
if err := c.connect(ctx); err != nil {
return nil, trace.Wrap(err)
}
c.start()
return c, nil
}
func (c *Client) readClientUsername() error {
for {
msg, err := c.cfg.Conn.InputMessage()
if err != nil {
return trace.Wrap(err)
}
u, ok := msg.(tdp.ClientUsername)
if !ok {
c.cfg.Log.Debugf("Expected ClientUsername message, got %T", msg)
continue
}
c.cfg.Log.Debugf("Got RDP username %q", u.Username)
c.username = u.Username
return nil
}
}
func (c *Client) readClientSize() error {
for {
msg, err := c.cfg.Conn.InputMessage()
if err != nil {
return trace.Wrap(err)
}
s, ok := msg.(tdp.ClientScreenSpec)
if !ok {
c.cfg.Log.Debugf("Expected ClientScreenSpec message, got %T", msg)
continue
}
c.cfg.Log.Debugf("Got RDP screen size %dx%d", s.Width, s.Height)
c.clientWidth = uint16(s.Width)
c.clientHeight = uint16(s.Height)
return nil
}
}
func (c *Client) connect(ctx context.Context) error {
userCertDER, userKeyDER, err := c.cfg.GenerateUserCert(ctx, c.username, c.cfg.CertTTL)
if err != nil {
return trace.Wrap(err)
}
// Addr and username strings only need to be valid for the duration of
// C.connect_rdp. They are copied on the Rust side and can be freed here.
addr := C.CString(c.cfg.Addr)
defer C.free(unsafe.Pointer(addr))
username := C.CString(c.username)
defer C.free(unsafe.Pointer(username))
res := C.connect_rdp(
addr,
username,
// cert length and bytes.
C.uint32_t(len(userCertDER)),
(*C.uint8_t)(unsafe.Pointer(&userCertDER[0])),
// key length and bytes.
C.uint32_t(len(userKeyDER)),
(*C.uint8_t)(unsafe.Pointer(&userKeyDER[0])),
// screen size.
C.uint16_t(c.clientWidth),
C.uint16_t(c.clientHeight),
)
if err := cgoError(res.err); err != nil {
return trace.Wrap(err)
}
c.rustClient = res.client
return nil
}
// start kicks off goroutines for input/output streaming and returns right
// away. Use Wait to wait for them to finish.
func (c *Client) start() {
// Video output streaming worker goroutine.
c.wg.Add(1)
go func() {
defer c.wg.Done()
defer c.Close()
defer c.cfg.Log.Info("RDP output streaming finished")
h := cgo.NewHandle(c)
defer h.Delete()
// C.read_rdp_output blocks for the duration of the RDP connection and
// calls handle_bitmap repeatedly with the incoming bitmaps.
if err := cgoError(C.read_rdp_output(c.rustClient, C.uintptr_t(h))); err != nil {
c.cfg.Log.Warningf("Failed reading RDP output frame: %v", err)
}
}()
// User input streaming worker goroutine.
c.wg.Add(1)
go func() {
defer c.wg.Done()
defer c.Close()
defer c.cfg.Log.Info("RDP input streaming finished")
// Remember mouse coordinates to send them with all CGOPointer events.
var mouseX, mouseY uint32
for {
msg, err := c.cfg.Conn.InputMessage()
if err != nil {
c.cfg.Log.Warningf("Failed reading RDP input message: %v", err)
return
}
if atomic.LoadUint32(&c.readyForInput) == 0 {
// Input not allowed yet, drop the message.
continue
}
c.UpdateClientActivity()
switch m := msg.(type) {
case tdp.MouseMove:
mouseX, mouseY = m.X, m.Y
if err := cgoError(C.write_rdp_pointer(
c.rustClient,
C.CGOMousePointerEvent{
x: C.uint16_t(m.X),
y: C.uint16_t(m.Y),
button: C.PointerButtonNone,
wheel: C.PointerWheelNone,
},
)); err != nil {
c.cfg.Log.Warningf("Failed forwarding RDP input message: %v", err)
return
}
case tdp.MouseButton:
// Map the button to a C enum value.
var button C.CGOPointerButton
switch m.Button {
case tdp.LeftMouseButton:
button = C.PointerButtonLeft
case tdp.RightMouseButton:
button = C.PointerButtonRight
case tdp.MiddleMouseButton:
button = C.PointerButtonMiddle
default:
button = C.PointerButtonNone
}
if err := cgoError(C.write_rdp_pointer(
c.rustClient,
C.CGOMousePointerEvent{
x: C.uint16_t(mouseX),
y: C.uint16_t(mouseY),
button: uint32(button),
down: m.State == tdp.ButtonPressed,
wheel: C.PointerWheelNone,
},
)); err != nil {
c.cfg.Log.Warningf("Failed forwarding RDP input message: %v", err)
return
}
case tdp.MouseWheel:
var wheel C.CGOPointerWheel
switch m.Axis {
case tdp.VerticalWheelAxis:
wheel = C.PointerWheelVertical
case tdp.HorizontalWheelAxis:
wheel = C.PointerWheelHorizontal
// TDP positive scroll deltas move towards top-left.
// RDP positive scroll deltas move towards top-right.
//
// Fix the scroll direction to match TDP, it's inverted for
// horizontal scroll in RDP.
m.Delta = -m.Delta
default:
wheel = C.PointerWheelNone
}
if err := cgoError(C.write_rdp_pointer(
c.rustClient,
C.CGOMousePointerEvent{
x: C.uint16_t(mouseX),
y: C.uint16_t(mouseY),
button: C.PointerButtonNone,
wheel: uint32(wheel),
wheel_delta: C.int16_t(m.Delta),
},
)); err != nil {
c.cfg.Log.Warningf("Failed forwarding RDP input message: %v", err)
return
}
case tdp.KeyboardButton:
if err := cgoError(C.write_rdp_keyboard(
c.rustClient,
C.CGOKeyboardEvent{
code: C.uint16_t(m.KeyCode),
down: m.State == tdp.ButtonPressed,
},
)); err != nil {
c.cfg.Log.Warningf("Failed forwarding RDP input message: %v", err)
return
}
default:
c.cfg.Log.Warningf("Skipping unimplemented desktop protocol message type %T", msg)
}
}
}()
}
//export handle_bitmap
func handle_bitmap(handle C.uintptr_t, cb C.CGOBitmap) C.CGOError {
return cgo.Handle(handle).Value().(*Client).handleBitmap(cb)
}
func (c *Client) handleBitmap(cb C.CGOBitmap) C.CGOError {
// Notify the input forwarding goroutine that we're ready for input.
// Input can only be sent after connection was established, which we infer
// from the fact that a bitmap was sent.
atomic.StoreUint32(&c.readyForInput, 1)
data := C.GoBytes(unsafe.Pointer(cb.data_ptr), C.int(cb.data_len))
// Convert BGRA to RGBA. It's likely due to Windows using uint32 values for
// pixels (ARGB) and encoding them as big endian. The image.RGBA type uses
// a byte slice with 4-byte segments representing pixels (RGBA).
//
// Also, always force Alpha value to 100% (opaque). On some Windows
// versions it's sent as 0% after decompression for some reason.
for i := 0; i < len(data); i += 4 {
data[i], data[i+2], data[i+3] = data[i+2], data[i], 255
}
img := image.NewNRGBA(image.Rectangle{
Min: image.Pt(int(cb.dest_left), int(cb.dest_top)),
Max: image.Pt(int(cb.dest_right)+1, int(cb.dest_bottom)+1),
})
copy(img.Pix, data)
if err := c.cfg.Conn.OutputMessage(tdp.NewPNG(img, c.cfg.Encoder)); err != nil {
return C.CString(fmt.Sprintf("failed to send PNG frame %v: %v", img.Rect, err))
}
return nil
}
// Wait blocks until the client disconnects and runs the cleanup.
func (c *Client) Wait() error {
c.wg.Wait()
// Let the Rust side free its data.
C.free_rdp(c.rustClient)
return nil
}
// Close shuts down the client and closes any existing connections.
// It is safe to call multiple times, from multiple goroutines.
// Calls other than the first one are no-ops.
func (c *Client) Close() {
c.closeOnce.Do(func() {
if err := cgoError(C.close_rdp(c.rustClient)); err != nil {
c.cfg.Log.Warningf("Error closing RDP connection: %v", err)
}
})
}
// GetClientLastActive returns the time of the last recorded activity.
// For RDP, "activity" is defined as user-input messages
// (mouse move, button press, etc.)
func (c *Client) GetClientLastActive() time.Time {
c.clientActivityMu.RLock()
defer c.clientActivityMu.RUnlock()
return c.clientLastActive
}
// UpdateClientActivity updates the client activity timestamp.
func (c *Client) UpdateClientActivity() {
c.clientActivityMu.Lock()
c.clientLastActive = time.Now().UTC()
c.clientActivityMu.Unlock()
}
// cgoError converts from a CGO-originated error to a Go error, copying the
// error string and releasing the CGO data.
func cgoError(s C.CGOError) error {
if s == nil {
return nil
}
gs := C.GoString(s)
C.free_rust_string(s)
return errors.New(gs)
}
//export free_go_string
func free_go_string(s *C.char) {
C.free(unsafe.Pointer(s))
}
| [
"\"RUST_LOG\""
] | [] | [
"RUST_LOG"
] | [] | ["RUST_LOG"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.